From db48a539269f798b52699869d311eee180991388 Mon Sep 17 00:00:00 2001 From: Noah Meyerhans Date: Mon, 4 Oct 2021 09:33:37 -0700 Subject: [PATCH] New upstream version 1.21.53+repack --- MANIFEST.in | 1 + NOTICE | 86 + PKG-INFO | 230 +- botocore/__init__.py | 2 +- .../data/account/2021-02-01/paginators-1.json | 3 + .../data/account/2021-02-01/service-2.json | 275 ++ .../data/amp/2020-08-01/paginators-1.json | 6 + botocore/data/amp/2020-08-01/service-2.json | 712 +++++ botocore/data/amp/2020-08-01/waiters-2.json | 43 + .../appintegrations/2020-07-29/service-2.json | 426 ++- .../data/apprunner/2020-05-15/service-2.json | 118 +- .../data/appsync/2017-07-25/service-2.json | 59 +- .../cloudcontrol/2021-09-30/paginators-1.json | 3 + .../cloudcontrol/2021-09-30/service-2.json | 854 ++++++ .../cloudcontrol/2021-09-30/waiters-2.json | 31 + .../data/connect/2017-08-08/service-2.json | 104 +- .../dataexchange/2017-07-25/paginators-1.json | 6 + .../dataexchange/2017-07-25/service-2.json | 511 +++- botocore/data/ec2/2016-11-15/service-2.json | 23 +- botocore/data/elbv2/2015-12-01/service-2.json | 22 +- botocore/data/endpoints.json | 174 ++ .../imagebuilder/2019-12-02/service-2.json | 52 +- .../data/lambda/2015-03-31/service-2.json | 96 +- .../license-manager/2018-08-01/service-2.json | 4 + .../data/macie2/2020-01-01/service-2.json | 59 +- .../mediaconvert/2017-08-29/service-2.json | 325 +- .../2020-11-12/service-2.json | 108 +- .../data/pinpoint/2016-12-01/service-2.json | 39 +- botocore/data/sesv2/2019-09-27/service-2.json | 267 +- botocore/data/ssm/2014-11-06/service-2.json | 70 +- botocore/data/ssm/2014-11-06/waiters-2.json | 5 + .../data/synthetics/2017-10-11/service-2.json | 63 + .../data/transfer/2018-11-05/service-2.json | 32 +- .../voice-id/2021-09-27/paginators-1.json | 3 + .../data/voice-id/2021-09-27/service-2.json | 1949 ++++++++++++ .../data/wisdom/2020-10-19/paginators-1.json | 46 + .../data/wisdom/2020-10-19/service-2.json | 2648 +++++++++++++++++ .../data/workmail/2017-10-01/service-2.json | 265 ++ .../data/workspaces/2015-04-08/service-2.json | 142 +- botocore/docs/method.py | 9 +- botocore/utils.py | 4 +- docs/source/conf.py | 2 +- pyproject.toml | 4 + requirements-dev.txt | 11 + setup.py | 2 + tests/__init__.py | 21 +- tests/acceptance/features/steps/base.py | 3 +- tests/functional/csm/test_monitoring.py | 16 +- .../docs/test_shared_example_config.py | 12 +- tests/functional/test_alias.py | 14 +- tests/functional/test_client_class_names.py | 16 +- tests/functional/test_cognito_idp.py | 160 +- tests/functional/test_endpoints.py | 116 +- tests/functional/test_event_alias.py | 39 +- tests/functional/test_h2_required.py | 30 +- tests/functional/test_model_backcompat.py | 31 +- tests/functional/test_model_completeness.py | 36 +- tests/functional/test_paginate.py | 20 +- tests/functional/test_paginator_config.py | 12 +- tests/functional/test_public_apis.py | 39 +- tests/functional/test_regions.py | 83 +- tests/functional/test_response_shadowing.py | 46 +- tests/functional/test_s3.py | 372 ++- tests/functional/test_s3_control_redirects.py | 34 +- tests/functional/test_service_alias.py | 27 +- tests/functional/test_service_names.py | 55 +- tests/functional/test_six_imports.py | 10 +- tests/functional/test_waiter_config.py | 15 +- tests/integration/test_ec2.py | 2 - tests/integration/test_emr.py | 36 +- tests/integration/test_s3.py | 14 +- tests/integration/test_smoke.py | 76 +- tests/integration/test_utils.py | 35 +- tests/integration/test_waiters.py | 7 +- tests/unit/auth/test_sigv4.py | 38 +- tests/unit/crt/auth/test_crt_sigv4.py | 52 +- .../response_parsing/test_response_parsing.py | 174 +- tests/unit/retries/test_special.py | 2 - tests/unit/retries/test_standard.py | 81 +- tests/unit/test_compat.py | 62 +- tests/unit/test_config_provider.py | 29 +- tests/unit/test_eventstream.py | 109 +- tests/unit/test_exceptions.py | 6 +- .../test_http_client_exception_mapping.py | 34 +- tests/unit/test_http_session.py | 13 +- tests/unit/test_model.py | 29 +- tests/unit/test_parsers.py | 46 +- tests/unit/test_protocols.py | 51 +- tests/unit/test_session.py | 24 +- tests/unit/test_session_legacy.py | 24 +- 90 files changed, 10440 insertions(+), 1605 deletions(-) create mode 100644 NOTICE create mode 100644 botocore/data/account/2021-02-01/paginators-1.json create mode 100644 botocore/data/account/2021-02-01/service-2.json create mode 100644 botocore/data/amp/2020-08-01/waiters-2.json create mode 100644 botocore/data/cloudcontrol/2021-09-30/paginators-1.json create mode 100644 botocore/data/cloudcontrol/2021-09-30/service-2.json create mode 100644 botocore/data/cloudcontrol/2021-09-30/waiters-2.json create mode 100644 botocore/data/voice-id/2021-09-27/paginators-1.json create mode 100644 botocore/data/voice-id/2021-09-27/service-2.json create mode 100644 botocore/data/wisdom/2020-10-19/paginators-1.json create mode 100644 botocore/data/wisdom/2020-10-19/service-2.json create mode 100644 pyproject.toml create mode 100644 requirements-dev.txt diff --git a/MANIFEST.in b/MANIFEST.in index 92173752..7ad59012 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,7 @@ include README.rst include LICENSE.txt include requirements.txt +include requirements-dev.txt include botocore/cacert.pem include botocore/vendored/requests/cacert.pem recursive-include botocore/data *.json diff --git a/NOTICE b/NOTICE new file mode 100644 index 00000000..b86fb93e --- /dev/null +++ b/NOTICE @@ -0,0 +1,86 @@ +Botocore +Copyright 2012-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +---- + +Botocore includes a vendorized copy of the requests python library to ease installation. + +Requests License +================ + +Copyright 2013 Kenneth Reitz + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +The requests library also includes some vendorized python libraries to ease installation. + +Urllib3 License +=============== + +This is the MIT license: http://www.opensource.org/licenses/mit-license.php + +Copyright 2008-2011 Andrey Petrov and contributors (see CONTRIBUTORS.txt), +Modifications copyright 2012 Kenneth Reitz. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this +software and associated documentation files (the "Software"), to deal in the Software +without restriction, including without limitation the rights to use, copy, modify, merge, +publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons +to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or +substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR +PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE +FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR +OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +DEALINGS IN THE SOFTWARE. + +Chardet License +=============== + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +02110-1301 USA + +Bundle of CA Root Certificates +============================== + +This library is free software; you can redistribute it and/or +modify it under the terms of the GNU Lesser General Public +License as published by the Free Software Foundation; either +version 2.1 of the License, or (at your option) any later version. + +This library is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +Lesser General Public License for more details. + +You should have received a copy of the GNU Lesser General Public +License along with this library; if not, write to the Free Software +Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA +02110-1301 diff --git a/PKG-INFO b/PKG-INFO index 4266424b..fa6d77bf 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,121 +1,10 @@ Metadata-Version: 2.1 Name: botocore -Version: 1.21.46 +Version: 1.21.53 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services License: Apache License 2.0 -Description: botocore - ======== - - .. image:: https://codecov.io/github/boto/botocore/coverage.svg?branch=develop - :target: https://codecov.io/github/boto/botocore?branch=develop - - - A low-level interface to a growing number of Amazon Web Services. The - botocore package is the foundation for the - `AWS CLI `__ as well as - `boto3 `__. - - On 01/15/2021 deprecation for Python 2.7 was announced and support was dropped - on 07/15/2021. To avoid disruption, customers using Botocore on Python 2.7 may - need to upgrade their version of Python or pin the version of Botocore. For - more information, see this `blog post `__. - - On 10/29/2020 deprecation for Python 3.4 and Python 3.5 was announced and support - was dropped on 02/01/2021. To avoid disruption, customers using Botocore - on Python 3.4 or 3.5 may need to upgrade their version of Python or pin the - version of Botocore. For more information, see - this `blog post `__. - - Getting Started - --------------- - Assuming that you have Python and ``virtualenv`` installed, set up your environment and install the required dependencies like this or you can install the library using ``pip``: - - .. code-block:: sh - - $ git clone https://github.com/boto/botocore.git - $ cd botocore - $ virtualenv venv - ... - $ . venv/bin/activate - $ pip install -r requirements.txt - $ pip install -e . - - .. code-block:: sh - - $ pip install botocore - - Using Botocore - ~~~~~~~~~~~~~~ - After installing botocore - - Next, set up credentials (in e.g. ``~/.aws/credentials``): - - .. code-block:: ini - - [default] - aws_access_key_id = YOUR_KEY - aws_secret_access_key = YOUR_SECRET - - Then, set up a default region (in e.g. ``~/.aws/config``): - - .. code-block:: ini - - [default] - region=us-east-1 - - Other credentials configuration method can be found `here `__ - - Then, from a Python interpreter: - - .. code-block:: python - - >>> import botocore.session - >>> session = botocore.session.get_session() - >>> client = session.create_client('ec2') - >>> print(client.describe_instances()) - - - Getting Help - ------------ - - We use GitHub issues for tracking bugs and feature requests and have limited - bandwidth to address them. Please use these community resources for getting - help. Please note many of the same resources available for ``boto3`` are - applicable for ``botocore``: - - * Ask a question on `Stack Overflow `__ and tag it with `boto3 `__ - * Come join the AWS Python community chat on `gitter `__ - * Open a support ticket with `AWS Support `__ - * If it turns out that you may have found a bug, please `open an issue `__ - - - Contributing - ------------ - - We value feedback and contributions from our community. Whether it's a bug report, new feature, correction, or additional documentation, we welcome your issues and pull requests. Please read through this `CONTRIBUTING `__ document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your contribution. - - - Maintenance and Support for SDK Major Versions - ---------------------------------------------- - - Botocore was made generally available on 06/22/2015 and is currently in the full support phase of the availability life cycle. - - For information about maintenance and support for SDK major versions and their underlying dependencies, see the following in the AWS SDKs and Tools Shared Configuration and Credentials Reference Guide: - - * `AWS SDKs and Tools Maintenance Policy `__ - * `AWS SDKs and Tools Version Support Matrix `__ - - - More Resources - -------------- - - * `NOTICE `__ - * `Changelog `__ - * `License `__ - - Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Intended Audience :: Developers @@ -127,5 +16,122 @@ Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 Requires-Python: >= 3.6 Provides-Extra: crt +License-File: LICENSE.txt +License-File: NOTICE + +botocore +======== + +.. image:: https://codecov.io/github/boto/botocore/coverage.svg?branch=develop + :target: https://codecov.io/github/boto/botocore?branch=develop + + +A low-level interface to a growing number of Amazon Web Services. The +botocore package is the foundation for the +`AWS CLI `__ as well as +`boto3 `__. + +On 01/15/2021 deprecation for Python 2.7 was announced and support was dropped +on 07/15/2021. To avoid disruption, customers using Botocore on Python 2.7 may +need to upgrade their version of Python or pin the version of Botocore. For +more information, see this `blog post `__. + +On 10/29/2020 deprecation for Python 3.4 and Python 3.5 was announced and support +was dropped on 02/01/2021. To avoid disruption, customers using Botocore +on Python 3.4 or 3.5 may need to upgrade their version of Python or pin the +version of Botocore. For more information, see +this `blog post `__. + +Getting Started +--------------- +Assuming that you have Python and ``virtualenv`` installed, set up your environment and install the required dependencies like this or you can install the library using ``pip``: + +.. code-block:: sh + + $ git clone https://github.com/boto/botocore.git + $ cd botocore + $ virtualenv venv + ... + $ . venv/bin/activate + $ pip install -r requirements.txt + $ pip install -e . + +.. code-block:: sh + + $ pip install botocore + +Using Botocore +~~~~~~~~~~~~~~ +After installing botocore + +Next, set up credentials (in e.g. ``~/.aws/credentials``): + +.. code-block:: ini + + [default] + aws_access_key_id = YOUR_KEY + aws_secret_access_key = YOUR_SECRET + +Then, set up a default region (in e.g. ``~/.aws/config``): + +.. code-block:: ini + + [default] + region=us-east-1 + +Other credentials configuration method can be found `here `__ + +Then, from a Python interpreter: + +.. code-block:: python + + >>> import botocore.session + >>> session = botocore.session.get_session() + >>> client = session.create_client('ec2') + >>> print(client.describe_instances()) + + +Getting Help +------------ + +We use GitHub issues for tracking bugs and feature requests and have limited +bandwidth to address them. Please use these community resources for getting +help. Please note many of the same resources available for ``boto3`` are +applicable for ``botocore``: + +* Ask a question on `Stack Overflow `__ and tag it with `boto3 `__ +* Come join the AWS Python community chat on `gitter `__ +* Open a support ticket with `AWS Support `__ +* If it turns out that you may have found a bug, please `open an issue `__ + + +Contributing +------------ + +We value feedback and contributions from our community. Whether it's a bug report, new feature, correction, or additional documentation, we welcome your issues and pull requests. Please read through this `CONTRIBUTING `__ document before submitting any issues or pull requests to ensure we have all the necessary information to effectively respond to your contribution. + + +Maintenance and Support for SDK Major Versions +---------------------------------------------- + +Botocore was made generally available on 06/22/2015 and is currently in the full support phase of the availability life cycle. + +For information about maintenance and support for SDK major versions and their underlying dependencies, see the following in the AWS SDKs and Tools Shared Configuration and Credentials Reference Guide: + +* `AWS SDKs and Tools Maintenance Policy `__ +* `AWS SDKs and Tools Version Support Matrix `__ + + +More Resources +-------------- + +* `NOTICE `__ +* `Changelog `__ +* `License `__ + + + diff --git a/botocore/__init__.py b/botocore/__init__.py index faa3e4e6..8f64790c 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.21.46' +__version__ = '1.21.53' class NullHandler(logging.Handler): diff --git a/botocore/data/account/2021-02-01/paginators-1.json b/botocore/data/account/2021-02-01/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/account/2021-02-01/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/account/2021-02-01/service-2.json b/botocore/data/account/2021-02-01/service-2.json new file mode 100644 index 00000000..a0555882 --- /dev/null +++ b/botocore/data/account/2021-02-01/service-2.json @@ -0,0 +1,275 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2021-02-01", + "endpointPrefix":"account", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"AWS Account", + "serviceId":"Account", + "signatureVersion":"v4", + "signingName":"account", + "uid":"account-2021-02-01" + }, + "operations":{ + "DeleteAlternateContact":{ + "name":"DeleteAlternateContact", + "http":{ + "method":"POST", + "requestUri":"/deleteAlternateContact", + "responseCode":200 + }, + "input":{"shape":"DeleteAlternateContactRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes the specified alternate contact from an Amazon Web Services account.

For complete details about how to use the alternate contact operations, see Access or updating the alternate contacts.

", + "idempotent":true + }, + "GetAlternateContact":{ + "name":"GetAlternateContact", + "http":{ + "method":"POST", + "requestUri":"/getAlternateContact", + "responseCode":200 + }, + "input":{"shape":"GetAlternateContactRequest"}, + "output":{"shape":"GetAlternateContactResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Retrieves the specified alternate contact attached to an Amazon Web Services account.

For complete details about how to use the alternate contact operations, see Access or updating the alternate contacts.

" + }, + "PutAlternateContact":{ + "name":"PutAlternateContact", + "http":{ + "method":"POST", + "requestUri":"/putAlternateContact", + "responseCode":200 + }, + "input":{"shape":"PutAlternateContactRequest"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"TooManyRequestsException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Modifies the specified alternate contact attached to an Amazon Web Services account.

For complete details about how to use the alternate contact operations, see Access or updating the alternate contacts.

", + "idempotent":true + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The operation failed because the calling identity doesn't have the minimum required permissions.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AccountId":{ + "type":"string", + "pattern":"^\\d{12}$" + }, + "AlternateContact":{ + "type":"structure", + "members":{ + "AlternateContactType":{ + "shape":"AlternateContactType", + "documentation":"

The type of alternate contact.

" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

The email address associated with this alternate contact.

" + }, + "Name":{ + "shape":"Name", + "documentation":"

The name associated with this alternate contact.

" + }, + "PhoneNumber":{ + "shape":"PhoneNumber", + "documentation":"

The phone number associated with this alternate contact.

" + }, + "Title":{ + "shape":"Title", + "documentation":"

The title associated with this alternate contact.

" + } + }, + "documentation":"

A structure that contains the details of an alternate contact associated with an Amazon Web Services account

" + }, + "AlternateContactType":{ + "type":"string", + "enum":[ + "BILLING", + "OPERATIONS", + "SECURITY" + ] + }, + "DeleteAlternateContactRequest":{ + "type":"structure", + "required":["AlternateContactType"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + }, + "AlternateContactType":{ + "shape":"AlternateContactType", + "documentation":"

Specifies which of the alternate contacts to delete.

" + } + } + }, + "EmailAddress":{ + "type":"string", + "max":64, + "min":1, + "pattern":"[\\w+=,.-]+@[\\w.-]+\\.[\\w]+", + "sensitive":true + }, + "GetAlternateContactRequest":{ + "type":"structure", + "required":["AlternateContactType"], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + }, + "AlternateContactType":{ + "shape":"AlternateContactType", + "documentation":"

Specifies which alternate contact you want to retrieve.

" + } + } + }, + "GetAlternateContactResponse":{ + "type":"structure", + "members":{ + "AlternateContact":{ + "shape":"AlternateContact", + "documentation":"

A structure that contains the details for the specified alternate contact.

" + } + } + }, + "InternalServerException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The operation failed because of an error internal to Amazon Web Services. Try your operation again later.

", + "error":{"httpStatusCode":500}, + "exception":true, + "fault":true, + "retryable":{"throttling":false} + }, + "Name":{ + "type":"string", + "max":64, + "min":1, + "sensitive":true + }, + "PhoneNumber":{ + "type":"string", + "max":25, + "min":1, + "pattern":"^[\\s0-9()+-]+$", + "sensitive":true + }, + "PutAlternateContactRequest":{ + "type":"structure", + "required":[ + "AlternateContactType", + "EmailAddress", + "Name", + "PhoneNumber", + "Title" + ], + "members":{ + "AccountId":{ + "shape":"AccountId", + "documentation":"

Specifies the 12 digit account ID number of the Amazon Web Services account that you want to access or modify with this operation.

If you do not specify this parameter, it defaults to the Amazon Web Services account of the identity used to call the operation.

To use this parameter, the caller must be an identity in the organization's management account or a delegated administrator account, and the specified account ID must be a member account in the same organization. The organization must have all features enabled, and the organization must have trusted access enabled for the Account Management service, and optionally a delegated admin account assigned.

The management account can't specify its own AccountId; it must call the operation in standalone context by not including the AccountId parameter.

To call this operation on an account that is not a member of an organization, then don't specify this parameter, and call the operation using an identity belonging to the account whose contacts you wish to retrieve or modify.

" + }, + "AlternateContactType":{ + "shape":"AlternateContactType", + "documentation":"

Specifies which alternate contact you want to create or update.

" + }, + "EmailAddress":{ + "shape":"EmailAddress", + "documentation":"

Specifies an email address for the alternate contact.

" + }, + "Name":{ + "shape":"Name", + "documentation":"

Specifies a name for the alternate contact.

" + }, + "PhoneNumber":{ + "shape":"PhoneNumber", + "documentation":"

Specifies a phone number for the alternate contact.

" + }, + "Title":{ + "shape":"Title", + "documentation":"

Specifies a title for the alternate contact.

" + } + } + }, + "ResourceNotFoundException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The operation failed because it specified a resource that can't be found.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "String":{"type":"string"}, + "Title":{ + "type":"string", + "max":50, + "min":1, + "sensitive":true + }, + "TooManyRequestsException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The operation failed because it was called too frequently and exceeded a throttle limit.

", + "error":{ + "httpStatusCode":429, + "senderFault":true + }, + "exception":true, + "retryable":{"throttling":true} + }, + "ValidationException":{ + "type":"structure", + "required":["message"], + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The operation failed because one of the input parameters was invalid.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + } + }, + "documentation":"

Operations for Amazon Web Services Account Management

" +} diff --git a/botocore/data/amp/2020-08-01/paginators-1.json b/botocore/data/amp/2020-08-01/paginators-1.json index 8a0ae8e2..a93e744d 100644 --- a/botocore/data/amp/2020-08-01/paginators-1.json +++ b/botocore/data/amp/2020-08-01/paginators-1.json @@ -5,6 +5,12 @@ "output_token": "nextToken", "limit_key": "maxResults", "result_key": "workspaces" + }, + "ListRuleGroupsNamespaces": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "ruleGroupsNamespaces" } } } diff --git a/botocore/data/amp/2020-08-01/service-2.json b/botocore/data/amp/2020-08-01/service-2.json index 0eea1fff..019f21cc 100644 --- a/botocore/data/amp/2020-08-01/service-2.json +++ b/botocore/data/amp/2020-08-01/service-2.json @@ -12,6 +12,48 @@ "uid":"amp-2020-08-01" }, "operations":{ + "CreateAlertManagerDefinition":{ + "name":"CreateAlertManagerDefinition", + "http":{ + "method":"POST", + "requestUri":"/workspaces/{workspaceId}/alertmanager/definition", + "responseCode":202 + }, + "input":{"shape":"CreateAlertManagerDefinitionRequest"}, + "output":{"shape":"CreateAlertManagerDefinitionResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Create an alert manager definition.

", + "idempotent":true + }, + "CreateRuleGroupsNamespace":{ + "name":"CreateRuleGroupsNamespace", + "http":{ + "method":"POST", + "requestUri":"/workspaces/{workspaceId}/rulegroupsnamespaces", + "responseCode":202 + }, + "input":{"shape":"CreateRuleGroupsNamespaceRequest"}, + "output":{"shape":"CreateRuleGroupsNamespaceResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Create a rule group namespace.

", + "idempotent":true + }, "CreateWorkspace":{ "name":"CreateWorkspace", "http":{ @@ -32,6 +74,44 @@ "documentation":"

Creates a new AMP workspace.

", "idempotent":true }, + "DeleteAlertManagerDefinition":{ + "name":"DeleteAlertManagerDefinition", + "http":{ + "method":"DELETE", + "requestUri":"/workspaces/{workspaceId}/alertmanager/definition", + "responseCode":202 + }, + "input":{"shape":"DeleteAlertManagerDefinitionRequest"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Deletes an alert manager definition.

", + "idempotent":true + }, + "DeleteRuleGroupsNamespace":{ + "name":"DeleteRuleGroupsNamespace", + "http":{ + "method":"DELETE", + "requestUri":"/workspaces/{workspaceId}/rulegroupsnamespaces/{name}", + "responseCode":202 + }, + "input":{"shape":"DeleteRuleGroupsNamespaceRequest"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Delete a rule groups namespace.

", + "idempotent":true + }, "DeleteWorkspace":{ "name":"DeleteWorkspace", "http":{ @@ -42,6 +122,7 @@ "input":{"shape":"DeleteWorkspaceRequest"}, "errors":[ {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, {"shape":"ValidationException"}, {"shape":"ResourceNotFoundException"}, {"shape":"AccessDeniedException"}, @@ -50,6 +131,42 @@ "documentation":"

Deletes an AMP workspace.

", "idempotent":true }, + "DescribeAlertManagerDefinition":{ + "name":"DescribeAlertManagerDefinition", + "http":{ + "method":"GET", + "requestUri":"/workspaces/{workspaceId}/alertmanager/definition", + "responseCode":200 + }, + "input":{"shape":"DescribeAlertManagerDefinitionRequest"}, + "output":{"shape":"DescribeAlertManagerDefinitionResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Describes an alert manager definition.

" + }, + "DescribeRuleGroupsNamespace":{ + "name":"DescribeRuleGroupsNamespace", + "http":{ + "method":"GET", + "requestUri":"/workspaces/{workspaceId}/rulegroupsnamespaces/{name}", + "responseCode":200 + }, + "input":{"shape":"DescribeRuleGroupsNamespaceRequest"}, + "output":{"shape":"DescribeRuleGroupsNamespaceResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Describe a rule groups namespace.

" + }, "DescribeWorkspace":{ "name":"DescribeWorkspace", "http":{ @@ -68,6 +185,24 @@ ], "documentation":"

Describes an existing AMP workspace.

" }, + "ListRuleGroupsNamespaces":{ + "name":"ListRuleGroupsNamespaces", + "http":{ + "method":"GET", + "requestUri":"/workspaces/{workspaceId}/rulegroupsnamespaces", + "responseCode":200 + }, + "input":{"shape":"ListRuleGroupsNamespacesRequest"}, + "output":{"shape":"ListRuleGroupsNamespacesResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"} + ], + "documentation":"

Lists rule groups namespaces.

" + }, "ListTagsForResource":{ "name":"ListTagsForResource", "http":{ @@ -103,6 +238,48 @@ ], "documentation":"

Lists all AMP workspaces, including workspaces being created or deleted.

" }, + "PutAlertManagerDefinition":{ + "name":"PutAlertManagerDefinition", + "http":{ + "method":"PUT", + "requestUri":"/workspaces/{workspaceId}/alertmanager/definition", + "responseCode":202 + }, + "input":{"shape":"PutAlertManagerDefinitionRequest"}, + "output":{"shape":"PutAlertManagerDefinitionResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Update an alert manager definition.

", + "idempotent":true + }, + "PutRuleGroupsNamespace":{ + "name":"PutRuleGroupsNamespace", + "http":{ + "method":"PUT", + "requestUri":"/workspaces/{workspaceId}/rulegroupsnamespaces/{name}", + "responseCode":202 + }, + "input":{"shape":"PutRuleGroupsNamespaceRequest"}, + "output":{"shape":"PutRuleGroupsNamespaceResponse"}, + "errors":[ + {"shape":"ThrottlingException"}, + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InternalServerException"}, + {"shape":"ServiceQuotaExceededException"} + ], + "documentation":"

Update a rule groups namespace.

", + "idempotent":true + }, "TagResource":{ "name":"TagResource", "http":{ @@ -178,6 +355,65 @@ }, "exception":true }, + "AlertManagerDefinitionData":{ + "type":"blob", + "documentation":"

The alert manager definition data.

" + }, + "AlertManagerDefinitionDescription":{ + "type":"structure", + "required":[ + "createdAt", + "data", + "modifiedAt", + "status" + ], + "members":{ + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The time when the alert manager definition was created.

" + }, + "data":{ + "shape":"AlertManagerDefinitionData", + "documentation":"

The alert manager definition.

" + }, + "modifiedAt":{ + "shape":"Timestamp", + "documentation":"

The time when the alert manager definition was modified.

" + }, + "status":{ + "shape":"AlertManagerDefinitionStatus", + "documentation":"

The status of alert manager definition.

" + } + }, + "documentation":"

Represents the properties of an alert manager definition.

" + }, + "AlertManagerDefinitionStatus":{ + "type":"structure", + "required":["statusCode"], + "members":{ + "statusCode":{ + "shape":"AlertManagerDefinitionStatusCode", + "documentation":"

Status code of this definition.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The reason for failure if any.

" + } + }, + "documentation":"

Represents the status of a definition.

" + }, + "AlertManagerDefinitionStatusCode":{ + "type":"string", + "documentation":"

State of an alert manager definition.

", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETING", + "CREATION_FAILED", + "UPDATE_FAILED" + ] + }, "ConflictException":{ "type":"structure", "required":[ @@ -206,6 +442,103 @@ }, "exception":true }, + "CreateAlertManagerDefinitionRequest":{ + "type":"structure", + "required":[ + "data", + "workspaceId" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "data":{ + "shape":"AlertManagerDefinitionData", + "documentation":"

The alert manager definition data.

" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace in which to create the alert manager definition.

", + "location":"uri", + "locationName":"workspaceId" + } + }, + "documentation":"

Represents the input of a CreateAlertManagerDefinition operation.

" + }, + "CreateAlertManagerDefinitionResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"AlertManagerDefinitionStatus", + "documentation":"

The status of alert manager definition.

" + } + }, + "documentation":"

Represents the output of a CreateAlertManagerDefinition operation.

" + }, + "CreateRuleGroupsNamespaceRequest":{ + "type":"structure", + "required":[ + "data", + "name", + "workspaceId" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "data":{ + "shape":"RuleGroupsNamespaceData", + "documentation":"

The namespace data that define the rule groups.

" + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

The rule groups namespace name.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

Optional, user-provided tags for this rule groups namespace.

" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace in which to create the rule group namespace.

", + "location":"uri", + "locationName":"workspaceId" + } + }, + "documentation":"

Represents the input of a CreateRuleGroupsNamespace operation.

" + }, + "CreateRuleGroupsNamespaceResponse":{ + "type":"structure", + "required":[ + "arn", + "name", + "status" + ], + "members":{ + "arn":{ + "shape":"RuleGroupsNamespaceArn", + "documentation":"

The Amazon Resource Name (ARN) of this rule groups namespace.

" + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

The rule groups namespace name.

" + }, + "status":{ + "shape":"RuleGroupsNamespaceStatus", + "documentation":"

The status of rule groups namespace.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags of this rule groups namespace.

" + } + }, + "documentation":"

Represents the output of a CreateRuleGroupsNamespace operation.

" + }, "CreateWorkspaceRequest":{ "type":"structure", "members":{ @@ -252,6 +585,55 @@ }, "documentation":"

Represents the output of a CreateWorkspace operation.

" }, + "DeleteAlertManagerDefinitionRequest":{ + "type":"structure", + "required":["workspaceId"], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace in which to delete the alert manager definition.

", + "location":"uri", + "locationName":"workspaceId" + } + }, + "documentation":"

Represents the input of a DeleteAlertManagerDefinition operation.

" + }, + "DeleteRuleGroupsNamespaceRequest":{ + "type":"structure", + "required":[ + "name", + "workspaceId" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", + "idempotencyToken":true, + "location":"querystring", + "locationName":"clientToken" + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

The rule groups namespace name.

", + "location":"uri", + "locationName":"name" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace to delete rule group definition.

", + "location":"uri", + "locationName":"workspaceId" + } + }, + "documentation":"

Represents the input of a DeleteRuleGroupsNamespace operation.

" + }, "DeleteWorkspaceRequest":{ "type":"structure", "required":["workspaceId"], @@ -272,6 +654,63 @@ }, "documentation":"

Represents the input of a DeleteWorkspace operation.

" }, + "DescribeAlertManagerDefinitionRequest":{ + "type":"structure", + "required":["workspaceId"], + "members":{ + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace to describe.

", + "location":"uri", + "locationName":"workspaceId" + } + }, + "documentation":"

Represents the input of a DescribeAlertManagerDefinition operation.

" + }, + "DescribeAlertManagerDefinitionResponse":{ + "type":"structure", + "required":["alertManagerDefinition"], + "members":{ + "alertManagerDefinition":{ + "shape":"AlertManagerDefinitionDescription", + "documentation":"

The properties of the selected workspace's alert manager definition.

" + } + }, + "documentation":"

Represents the output of a DescribeAlertManagerDefinition operation.

" + }, + "DescribeRuleGroupsNamespaceRequest":{ + "type":"structure", + "required":[ + "name", + "workspaceId" + ], + "members":{ + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

The rule groups namespace.

", + "location":"uri", + "locationName":"name" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace to describe.

", + "location":"uri", + "locationName":"workspaceId" + } + }, + "documentation":"

Represents the input of a DescribeRuleGroupsNamespace operation.

" + }, + "DescribeRuleGroupsNamespaceResponse":{ + "type":"structure", + "required":["ruleGroupsNamespace"], + "members":{ + "ruleGroupsNamespace":{ + "shape":"RuleGroupsNamespaceDescription", + "documentation":"

The selected rule groups namespace.

" + } + }, + "documentation":"

Represents the output of a DescribeRuleGroupsNamespace operation.

" + }, "DescribeWorkspaceRequest":{ "type":"structure", "required":["workspaceId"], @@ -328,6 +767,58 @@ "fault":true, "retryable":{"throttling":false} }, + "ListRuleGroupsNamespacesRequest":{ + "type":"structure", + "required":["workspaceId"], + "members":{ + "maxResults":{ + "shape":"ListRuleGroupsNamespacesRequestMaxResultsInteger", + "documentation":"

Maximum results to return in response (default=100, maximum=1000).

", + "location":"querystring", + "locationName":"maxResults" + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

Optional filter for rule groups namespace name. Only the rule groups namespace that begin with this value will be returned.

", + "location":"querystring", + "locationName":"name" + }, + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Pagination token to request the next page in a paginated list. This token is obtained from the output of the previous ListRuleGroupsNamespaces request.

", + "location":"querystring", + "locationName":"nextToken" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace.

", + "location":"uri", + "locationName":"workspaceId" + } + }, + "documentation":"

Represents the input of a ListRuleGroupsNamespaces operation.

" + }, + "ListRuleGroupsNamespacesRequestMaxResultsInteger":{ + "type":"integer", + "box":true, + "max":1000, + "min":1 + }, + "ListRuleGroupsNamespacesResponse":{ + "type":"structure", + "required":["ruleGroupsNamespaces"], + "members":{ + "nextToken":{ + "shape":"PaginationToken", + "documentation":"

Pagination token to use when requesting the next page in this list.

" + }, + "ruleGroupsNamespaces":{ + "shape":"RuleGroupsNamespaceSummaryList", + "documentation":"

The list of the selected rule groups namespaces.

" + } + }, + "documentation":"

Represents the output of a ListRuleGroupsNamespaces operation.

" + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["resourceArn"], @@ -395,6 +886,101 @@ "type":"string", "documentation":"

A token used to access the next page in a paginated result set.

" }, + "PutAlertManagerDefinitionRequest":{ + "type":"structure", + "required":[ + "data", + "workspaceId" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "data":{ + "shape":"AlertManagerDefinitionData", + "documentation":"

The alert manager definition data.

" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace in which to update the alert manager definition.

", + "location":"uri", + "locationName":"workspaceId" + } + }, + "documentation":"

Represents the input of a PutAlertManagerDefinition operation.

" + }, + "PutAlertManagerDefinitionResponse":{ + "type":"structure", + "required":["status"], + "members":{ + "status":{ + "shape":"AlertManagerDefinitionStatus", + "documentation":"

The status of alert manager definition.

" + } + }, + "documentation":"

Represents the output of a PutAlertManagerDefinition operation.

" + }, + "PutRuleGroupsNamespaceRequest":{ + "type":"structure", + "required":[ + "data", + "name", + "workspaceId" + ], + "members":{ + "clientToken":{ + "shape":"IdempotencyToken", + "documentation":"

Optional, unique, case-sensitive, user-provided identifier to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "data":{ + "shape":"RuleGroupsNamespaceData", + "documentation":"

The namespace data that define the rule groups.

" + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

The rule groups namespace name.

", + "location":"uri", + "locationName":"name" + }, + "workspaceId":{ + "shape":"WorkspaceId", + "documentation":"

The ID of the workspace in which to update the rule group namespace.

", + "location":"uri", + "locationName":"workspaceId" + } + }, + "documentation":"

Represents the input of a PutRuleGroupsNamespace operation.

" + }, + "PutRuleGroupsNamespaceResponse":{ + "type":"structure", + "required":[ + "arn", + "name", + "status" + ], + "members":{ + "arn":{ + "shape":"RuleGroupsNamespaceArn", + "documentation":"

The Amazon Resource Name (ARN) of this rule groups namespace.

" + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

The rule groups namespace name.

" + }, + "status":{ + "shape":"RuleGroupsNamespaceStatus", + "documentation":"

The status of rule groups namespace.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags of this rule groups namespace.

" + } + }, + "documentation":"

Represents the output of a PutRuleGroupsNamespace operation.

" + }, "ResourceNotFoundException":{ "type":"structure", "required":[ @@ -423,6 +1009,132 @@ }, "exception":true }, + "RuleGroupsNamespaceArn":{ + "type":"string", + "documentation":"

An ARN identifying a rule groups namespace.

" + }, + "RuleGroupsNamespaceData":{ + "type":"blob", + "documentation":"

The rule groups namespace data.

" + }, + "RuleGroupsNamespaceDescription":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "data", + "modifiedAt", + "name", + "status" + ], + "members":{ + "arn":{ + "shape":"RuleGroupsNamespaceArn", + "documentation":"

The Amazon Resource Name (ARN) of this rule groups namespace.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The time when the rule groups namespace was created.

" + }, + "data":{ + "shape":"RuleGroupsNamespaceData", + "documentation":"

The rule groups namespace data.

" + }, + "modifiedAt":{ + "shape":"Timestamp", + "documentation":"

The time when the rule groups namespace was modified.

" + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

The rule groups namespace name.

" + }, + "status":{ + "shape":"RuleGroupsNamespaceStatus", + "documentation":"

The status of rule groups namespace.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags of this rule groups namespace.

" + } + }, + "documentation":"

Represents a description of the rule groups namespace.

" + }, + "RuleGroupsNamespaceName":{ + "type":"string", + "documentation":"

The namespace name that the rule group belong to.

", + "max":64, + "min":1, + "pattern":"[0-9A-Za-z][-.0-9A-Z_a-z]*" + }, + "RuleGroupsNamespaceStatus":{ + "type":"structure", + "required":["statusCode"], + "members":{ + "statusCode":{ + "shape":"RuleGroupsNamespaceStatusCode", + "documentation":"

Status code of this namespace.

" + }, + "statusReason":{ + "shape":"String", + "documentation":"

The reason for failure if any.

" + } + }, + "documentation":"

Represents the status of a namespace.

" + }, + "RuleGroupsNamespaceStatusCode":{ + "type":"string", + "documentation":"

State of a namespace.

", + "enum":[ + "CREATING", + "ACTIVE", + "UPDATING", + "DELETING", + "CREATION_FAILED", + "UPDATE_FAILED" + ] + }, + "RuleGroupsNamespaceSummary":{ + "type":"structure", + "required":[ + "arn", + "createdAt", + "modifiedAt", + "name", + "status" + ], + "members":{ + "arn":{ + "shape":"RuleGroupsNamespaceArn", + "documentation":"

The Amazon Resource Name (ARN) of this rule groups namespace.

" + }, + "createdAt":{ + "shape":"Timestamp", + "documentation":"

The time when the rule groups namespace was created.

" + }, + "modifiedAt":{ + "shape":"Timestamp", + "documentation":"

The time when the rule groups namespace was modified.

" + }, + "name":{ + "shape":"RuleGroupsNamespaceName", + "documentation":"

The rule groups namespace name.

" + }, + "status":{ + "shape":"RuleGroupsNamespaceStatus", + "documentation":"

The status of rule groups namespace.

" + }, + "tags":{ + "shape":"TagMap", + "documentation":"

The tags of this rule groups namespace.

" + } + }, + "documentation":"

Represents a summary of the rule groups namespace.

" + }, + "RuleGroupsNamespaceSummaryList":{ + "type":"list", + "member":{"shape":"RuleGroupsNamespaceSummary"}, + "documentation":"

A list of rule groups namespace summary.

" + }, "ServiceQuotaExceededException":{ "type":"structure", "required":[ diff --git a/botocore/data/amp/2020-08-01/waiters-2.json b/botocore/data/amp/2020-08-01/waiters-2.json new file mode 100644 index 00000000..db06c33b --- /dev/null +++ b/botocore/data/amp/2020-08-01/waiters-2.json @@ -0,0 +1,43 @@ +{ + "version" : 2, + "waiters" : { + "WorkspaceActive" : { + "description" : "Wait until a workspace reaches ACTIVE status", + "delay" : 2, + "maxAttempts" : 60, + "operation" : "DescribeWorkspace", + "acceptors" : [ { + "matcher" : "path", + "argument" : "workspace.status.statusCode", + "state" : "success", + "expected" : "ACTIVE" + }, { + "matcher" : "path", + "argument" : "workspace.status.statusCode", + "state" : "retry", + "expected" : "UPDATING" + }, { + "matcher" : "path", + "argument" : "workspace.status.statusCode", + "state" : "retry", + "expected" : "CREATING" + } ] + }, + "WorkspaceDeleted" : { + "description" : "Wait until a workspace reaches DELETED status", + "delay" : 2, + "maxAttempts" : 60, + "operation" : "DescribeWorkspace", + "acceptors" : [ { + "matcher" : "error", + "state" : "success", + "expected" : "ResourceNotFoundException" + }, { + "matcher" : "path", + "argument" : "workspace.status.statusCode", + "state" : "retry", + "expected" : "DELETING" + } ] + } + } +} \ No newline at end of file diff --git a/botocore/data/appintegrations/2020-07-29/service-2.json b/botocore/data/appintegrations/2020-07-29/service-2.json index ca3d4c29..b7be0723 100644 --- a/botocore/data/appintegrations/2020-07-29/service-2.json +++ b/botocore/data/appintegrations/2020-07-29/service-2.json @@ -12,6 +12,24 @@ "uid":"appintegrations-2020-07-29" }, "operations":{ + "CreateDataIntegration":{ + "name":"CreateDataIntegration", + "http":{ + "method":"POST", + "requestUri":"/dataIntegrations" + }, + "input":{"shape":"CreateDataIntegrationRequest"}, + "output":{"shape":"CreateDataIntegrationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ResourceQuotaExceededException"}, + {"shape":"DuplicateResourceException"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates and persists a DataIntegration resource.

You cannot create a DataIntegration association for a DataIntegration that has been previously associated. Use a different DataIntegration, or recreate the DataIntegration using the CreateDataIntegration API.

" + }, "CreateEventIntegration":{ "name":"CreateEventIntegration", "http":{ @@ -30,6 +48,23 @@ ], "documentation":"

Creates an EventIntegration, given a specified name, description, and a reference to an Amazon EventBridge bus in your account and a partner event source that pushes events to that bus. No objects are created in the your account, only metadata that is persisted on the EventIntegration control plane.

" }, + "DeleteDataIntegration":{ + "name":"DeleteDataIntegration", + "http":{ + "method":"DELETE", + "requestUri":"/dataIntegrations/{Identifier}" + }, + "input":{"shape":"DeleteDataIntegrationRequest"}, + "output":{"shape":"DeleteDataIntegrationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes the DataIntegration. Only DataIntegrations that don't have any DataIntegrationAssociations can be deleted. Deleting a DataIntegration also deletes the underlying Amazon AppFlow flow and service linked role.

You cannot create a DataIntegration association for a DataIntegration that has been previously associated. Use a different DataIntegration, or recreate the DataIntegration using the CreateDataIntegration API.

" + }, "DeleteEventIntegration":{ "name":"DeleteEventIntegration", "http":{ @@ -47,6 +82,23 @@ ], "documentation":"

Deletes the specified existing event integration. If the event integration is associated with clients, the request is rejected.

" }, + "GetDataIntegration":{ + "name":"GetDataIntegration", + "http":{ + "method":"GET", + "requestUri":"/dataIntegrations/{Identifier}" + }, + "input":{"shape":"GetDataIntegrationRequest"}, + "output":{"shape":"GetDataIntegrationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Returns information about the DataIntegration.

You cannot create a DataIntegration association for a DataIntegration that has been previously associated. Use a different DataIntegration, or recreate the DataIntegration using the CreateDataIntegration API.

" + }, "GetEventIntegration":{ "name":"GetEventIntegration", "http":{ @@ -62,7 +114,40 @@ {"shape":"InvalidRequestException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Return information about the event integration.

" + "documentation":"

Returns information about the event integration.

" + }, + "ListDataIntegrationAssociations":{ + "name":"ListDataIntegrationAssociations", + "http":{ + "method":"GET", + "requestUri":"/dataIntegrations/{Identifier}/associations" + }, + "input":{"shape":"ListDataIntegrationAssociationsRequest"}, + "output":{"shape":"ListDataIntegrationAssociationsResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Returns a paginated list of DataIntegration associations in the account.

You cannot create a DataIntegration association for a DataIntegration that has been previously associated. Use a different DataIntegration, or recreate the DataIntegration using the CreateDataIntegration API.

" + }, + "ListDataIntegrations":{ + "name":"ListDataIntegrations", + "http":{ + "method":"GET", + "requestUri":"/dataIntegrations" + }, + "input":{"shape":"ListDataIntegrationsRequest"}, + "output":{"shape":"ListDataIntegrationsResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Returns a paginated list of DataIntegrations in the account.

You cannot create a DataIntegration association for a DataIntegration that has been previously associated. Use a different DataIntegration, or recreate the DataIntegration using the CreateDataIntegration API.

" }, "ListEventIntegrationAssociations":{ "name":"ListEventIntegrationAssociations", @@ -145,6 +230,23 @@ ], "documentation":"

Removes the specified tags from the specified resource.

" }, + "UpdateDataIntegration":{ + "name":"UpdateDataIntegration", + "http":{ + "method":"PATCH", + "requestUri":"/dataIntegrations/{Identifier}" + }, + "input":{"shape":"UpdateDataIntegrationRequest"}, + "output":{"shape":"UpdateDataIntegrationResponse"}, + "errors":[ + {"shape":"InternalServiceError"}, + {"shape":"ThrottlingException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"InvalidRequestException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Updates the description of a DataIntegration.

You cannot create a DataIntegration association for a DataIntegration that has been previously associated. Use a different DataIntegration, or recreate the DataIntegration using the CreateDataIntegration API.

" + }, "UpdateEventIntegration":{ "name":"UpdateEventIntegration", "http":{ @@ -190,6 +292,82 @@ "min":1, "pattern":".*" }, + "CreateDataIntegrationRequest":{ + "type":"structure", + "required":["Name"], + "members":{ + "Name":{ + "shape":"Name", + "documentation":"

The name of the DataIntegration.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the DataIntegration.

" + }, + "KmsKey":{ + "shape":"NonBlankString", + "documentation":"

The KMS key for the DataIntegration.

" + }, + "SourceURI":{ + "shape":"NonBlankString", + "documentation":"

The URI of the data source.

" + }, + "ScheduleConfig":{ + "shape":"ScheduleConfiguration", + "documentation":"

The name of the data and how often it should be pulled from the source.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

One or more tags.

" + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + } + } + }, + "CreateDataIntegrationResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN)

" + }, + "Id":{ + "shape":"UUID", + "documentation":"

A unique identifier.

" + }, + "Name":{ + "shape":"Name", + "documentation":"

The name of the DataIntegration.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the DataIntegration.

" + }, + "KmsKey":{ + "shape":"NonBlankString", + "documentation":"

The KMS key for the DataIntegration.

" + }, + "SourceURI":{ + "shape":"NonBlankString", + "documentation":"

The URI of the data source.

" + }, + "ScheduleConfiguration":{ + "shape":"ScheduleConfiguration", + "documentation":"

The name of the data and how often it should be pulled from the source.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

One or more tags.

" + }, + "ClientToken":{ + "shape":"IdempotencyToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

" + } + } + }, "CreateEventIntegrationRequest":{ "type":"structure", "required":[ @@ -234,6 +412,71 @@ } } }, + "DataIntegrationAssociationSummary":{ + "type":"structure", + "members":{ + "DataIntegrationAssociationArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the DataIntegration association.

" + }, + "DataIntegrationArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN)of the DataIntegration.

" + }, + "ClientId":{ + "shape":"ClientId", + "documentation":"

The identifier for teh client that is associated with the DataIntegration association.

" + } + }, + "documentation":"

Summary information about the DataIntegration association.

" + }, + "DataIntegrationAssociationsList":{ + "type":"list", + "member":{"shape":"DataIntegrationAssociationSummary"}, + "max":50, + "min":1 + }, + "DataIntegrationSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the DataIntegration.

" + }, + "Name":{ + "shape":"Name", + "documentation":"

The name of the DataIntegration.

" + }, + "SourceURI":{ + "shape":"NonBlankString", + "documentation":"

The URI of the data source.

" + } + }, + "documentation":"

Summary information about the DataIntegration.

" + }, + "DataIntegrationsList":{ + "type":"list", + "member":{"shape":"DataIntegrationSummary"}, + "max":50, + "min":1 + }, + "DeleteDataIntegrationRequest":{ + "type":"structure", + "required":["DataIntegrationIdentifier"], + "members":{ + "DataIntegrationIdentifier":{ + "shape":"Identifier", + "documentation":"

A unique identifier for the DataIntegration.

", + "location":"uri", + "locationName":"Identifier" + } + } + }, + "DeleteDataIntegrationResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteEventIntegrationRequest":{ "type":"structure", "required":["Name"], @@ -361,6 +604,55 @@ "max":50, "min":1 }, + "GetDataIntegrationRequest":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"Identifier", + "documentation":"

A unique identifier.

", + "location":"uri", + "locationName":"Identifier" + } + } + }, + "GetDataIntegrationResponse":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the DataIntegration.

" + }, + "Id":{ + "shape":"UUID", + "documentation":"

A unique identifier.

" + }, + "Name":{ + "shape":"Name", + "documentation":"

The name of the DataIntegration.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

The KMS key for the DataIntegration.

" + }, + "KmsKey":{ + "shape":"NonBlankString", + "documentation":"

The KMS key for the DataIntegration.

" + }, + "SourceURI":{ + "shape":"NonBlankString", + "documentation":"

The URI of the data source.

" + }, + "ScheduleConfiguration":{ + "shape":"ScheduleConfiguration", + "documentation":"

The name of the data and how often it should be pulled from the source.

" + }, + "Tags":{ + "shape":"TagMap", + "documentation":"

One or more tags.

" + } + } + }, "GetEventIntegrationRequest":{ "type":"structure", "required":["Name"], @@ -408,6 +700,12 @@ "min":1, "pattern":".*" }, + "Identifier":{ + "type":"string", + "max":255, + "min":1, + "pattern":".*\\S.*" + }, "InternalServiceError":{ "type":"structure", "members":{ @@ -423,10 +721,77 @@ "members":{ "Message":{"shape":"Message"} }, - "documentation":"

The request is not valid.

", + "documentation":"

The request is not valid.

", "error":{"httpStatusCode":400}, "exception":true }, + "ListDataIntegrationAssociationsRequest":{ + "type":"structure", + "required":["DataIntegrationIdentifier"], + "members":{ + "DataIntegrationIdentifier":{ + "shape":"Identifier", + "documentation":"

A unique identifier for the DataIntegration.

", + "location":"uri", + "locationName":"Identifier" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListDataIntegrationAssociationsResponse":{ + "type":"structure", + "members":{ + "DataIntegrationAssociations":{ + "shape":"DataIntegrationAssociationsList", + "documentation":"

The Amazon Resource Name (ARN) and unique ID of the DataIntegration association.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListDataIntegrationsRequest":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + } + } + }, + "ListDataIntegrationsResponse":{ + "type":"structure", + "members":{ + "DataIntegrations":{ + "shape":"DataIntegrationsList", + "documentation":"

The DataIntegrations associated with this account.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, "ListEventIntegrationAssociationsRequest":{ "type":"structure", "required":["EventIntegrationName"], @@ -539,6 +904,12 @@ "min":1, "pattern":".*\\S.*" }, + "Object":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9\\/\\._\\-]+$" + }, "ResourceNotFoundException":{ "type":"structure", "members":{ @@ -557,6 +928,30 @@ "error":{"httpStatusCode":429}, "exception":true }, + "Schedule":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9\\/\\._\\-]+$" + }, + "ScheduleConfiguration":{ + "type":"structure", + "members":{ + "FirstExecutionFrom":{ + "shape":"NonBlankString", + "documentation":"

The start date for objects to import in the first flow run.

" + }, + "Object":{ + "shape":"Object", + "documentation":"

The name of the object to pull from the data source.

" + }, + "ScheduleExpression":{ + "shape":"Schedule", + "documentation":"

How often the data should be pulled from data source.

" + } + }, + "documentation":"

The name of the data and how often it should be pulled from the source.

" + }, "Source":{ "type":"string", "max":256, @@ -649,6 +1044,31 @@ "members":{ } }, + "UpdateDataIntegrationRequest":{ + "type":"structure", + "required":["Identifier"], + "members":{ + "Identifier":{ + "shape":"Identifier", + "documentation":"

A unique identifier for the DataIntegration.

", + "location":"uri", + "locationName":"Identifier" + }, + "Name":{ + "shape":"Name", + "documentation":"

The name of the DataIntegration.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

A description of the DataIntegration.

" + } + } + }, + "UpdateDataIntegrationResponse":{ + "type":"structure", + "members":{ + } + }, "UpdateEventIntegrationRequest":{ "type":"structure", "required":["Name"], @@ -671,5 +1091,5 @@ } } }, - "documentation":"

The Amazon AppIntegrations service enables you to configure and reuse connections to external applications.

For information about how you can use external applications with Amazon Connect, see Set up pre-built integrations in the Amazon Connect Administrator Guide.

" + "documentation":"

The Amazon AppIntegrations service enables you to configure and reuse connections to external applications.

For information about how you can use external applications with Amazon Connect, see Set up pre-built integrations and Deliver information to agents using Amazon Connect Wisdom in the Amazon Connect Administrator Guide.

" } diff --git a/botocore/data/apprunner/2020-05-15/service-2.json b/botocore/data/apprunner/2020-05-15/service-2.json index eb4ac5a2..7b36b8ac 100644 --- a/botocore/data/apprunner/2020-05-15/service-2.json +++ b/botocore/data/apprunner/2020-05-15/service-2.json @@ -26,7 +26,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"InvalidStateException"} ], - "documentation":"

Associate your own domain name with the AWS App Runner subdomain URL of your App Runner service.

After you call AssociateCustomDomain and receive a successful response, use the information in the CustomDomain record that's returned to add CNAME records to your Domain Name System (DNS). For each mapped domain name, add a mapping to the target App Runner subdomain and one or more certificate validation records. App Runner then performs DNS validation to verify that you own or control the domain name that you associated. App Runner tracks domain validity in a certificate stored in AWS Certificate Manager (ACM).

" + "documentation":"

Associate your own domain name with the App Runner subdomain URL of your App Runner service.

After you call AssociateCustomDomain and receive a successful response, use the information in the CustomDomain record that's returned to add CNAME records to your Domain Name System (DNS). For each mapped domain name, add a mapping to the target App Runner subdomain and one or more certificate validation records. App Runner then performs DNS validation to verify that you own or control the domain name that you associated. App Runner tracks domain validity in a certificate stored in AWS Certificate Manager (ACM).

" }, "CreateAutoScalingConfiguration":{ "name":"CreateAutoScalingConfiguration", @@ -41,7 +41,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Create an AWS App Runner automatic scaling configuration resource. App Runner requires this resource when you create App Runner services that require non-default auto scaling settings. You can share an auto scaling configuration across multiple services.

Create multiple revisions of a configuration by using the same AutoScalingConfigurationName and different AutoScalingConfigurationRevision values. When you create a service, you can set it to use the latest active revision of an auto scaling configuration or a specific revision.

Configure a higher MinSize to increase the spread of your App Runner service over more Availability Zones in the AWS Region. The tradeoff is a higher minimal cost.

Configure a lower MaxSize to control your cost. The tradeoff is lower responsiveness during peak demand.

" + "documentation":"

Create an App Runner automatic scaling configuration resource. App Runner requires this resource when you create App Runner services that require non-default auto scaling settings. You can share an auto scaling configuration across multiple services.

Create multiple revisions of a configuration by using the same AutoScalingConfigurationName and different AutoScalingConfigurationRevision values. When you create a service, you can set it to use the latest active revision of an auto scaling configuration or a specific revision.

Configure a higher MinSize to increase the spread of your App Runner service over more Availability Zones in the Amazon Web Services Region. The tradeoff is a higher minimal cost.

Configure a lower MaxSize to control your cost. The tradeoff is lower responsiveness during peak demand.

" }, "CreateConnection":{ "name":"CreateConnection", @@ -56,7 +56,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Create an AWS App Runner connection resource. App Runner requires a connection resource when you create App Runner services that access private repositories from certain third-party providers. You can share a connection across multiple services.

A connection resource is needed to access GitHub repositories. GitHub requires a user interface approval process through the App Runner console before you can use the connection.

" + "documentation":"

Create an App Runner connection resource. App Runner requires a connection resource when you create App Runner services that access private repositories from certain third-party providers. You can share a connection across multiple services.

A connection resource is needed to access GitHub repositories. GitHub requires a user interface approval process through the App Runner console before you can use the connection.

" }, "CreateService":{ "name":"CreateService", @@ -71,7 +71,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"ServiceQuotaExceededException"} ], - "documentation":"

Create an AWS App Runner service. After the service is created, the action also automatically starts a deployment.

This is an asynchronous operation. On a successful call, you can use the returned OperationId and the ListOperations call to track the operation's progress.

" + "documentation":"

Create an App Runner service. After the service is created, the action also automatically starts a deployment.

This is an asynchronous operation. On a successful call, you can use the returned OperationId and the ListOperations call to track the operation's progress.

" }, "DeleteAutoScalingConfiguration":{ "name":"DeleteAutoScalingConfiguration", @@ -86,7 +86,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Delete an AWS App Runner automatic scaling configuration resource. You can delete a specific revision or the latest active revision. You can't delete a configuration that's used by one or more App Runner services.

" + "documentation":"

Delete an App Runner automatic scaling configuration resource. You can delete a specific revision or the latest active revision. You can't delete a configuration that's used by one or more App Runner services.

" }, "DeleteConnection":{ "name":"DeleteConnection", @@ -101,7 +101,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

Delete an AWS App Runner connection. You must first ensure that there are no running App Runner services that use this connection. If there are any, the DeleteConnection action fails.

" + "documentation":"

Delete an App Runner connection. You must first ensure that there are no running App Runner services that use this connection. If there are any, the DeleteConnection action fails.

" }, "DeleteService":{ "name":"DeleteService", @@ -117,7 +117,7 @@ {"shape":"InvalidStateException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

Delete an AWS App Runner service.

This is an asynchronous operation. On a successful call, you can use the returned OperationId and the ListOperations call to track the operation's progress.

" + "documentation":"

Delete an App Runner service.

This is an asynchronous operation. On a successful call, you can use the returned OperationId and the ListOperations call to track the operation's progress.

" }, "DescribeAutoScalingConfiguration":{ "name":"DescribeAutoScalingConfiguration", @@ -132,7 +132,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Return a full description of an AWS App Runner automatic scaling configuration resource.

" + "documentation":"

Return a full description of an App Runner automatic scaling configuration resource.

" }, "DescribeCustomDomains":{ "name":"DescribeCustomDomains", @@ -147,7 +147,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Return a description of custom domain names that are associated with an AWS App Runner service.

" + "documentation":"

Return a description of custom domain names that are associated with an App Runner service.

" }, "DescribeService":{ "name":"DescribeService", @@ -162,7 +162,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

Return a full description of an AWS App Runner service.

" + "documentation":"

Return a full description of an App Runner service.

" }, "DisassociateCustomDomain":{ "name":"DisassociateCustomDomain", @@ -178,7 +178,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidStateException"} ], - "documentation":"

Disassociate a custom domain name from an AWS App Runner service.

Certificates tracking domain validity are associated with a custom domain and are stored in AWS Certificate Manager (ACM). These certificates aren't deleted as part of this action. App Runner delays certificate deletion for 30 days after a domain is disassociated from your service.

" + "documentation":"

Disassociate a custom domain name from an App Runner service.

Certificates tracking domain validity are associated with a custom domain and are stored in AWS Certificate Manager (ACM). These certificates aren't deleted as part of this action. App Runner delays certificate deletion for 30 days after a domain is disassociated from your service.

" }, "ListAutoScalingConfigurations":{ "name":"ListAutoScalingConfigurations", @@ -192,7 +192,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

Returns a list of AWS App Runner automatic scaling configurations in your AWS account. You can query the revisions for a specific configuration name or the revisions for all configurations in your account. You can optionally query only the latest revision of each requested name.

" + "documentation":"

Returns a list of App Runner automatic scaling configurations in your Amazon Web Services account. You can query the revisions for a specific configuration name or the revisions for all configurations in your account. You can optionally query only the latest revision of each requested name.

" }, "ListConnections":{ "name":"ListConnections", @@ -206,7 +206,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

Returns a list of AWS App Runner connections that are associated with your AWS account.

" + "documentation":"

Returns a list of App Runner connections that are associated with your Amazon Web Services account.

" }, "ListOperations":{ "name":"ListOperations", @@ -221,7 +221,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Return a list of operations that occurred on an AWS App Runner service.

The resulting list of OperationSummary objects is sorted in reverse chronological order. The first object on the list represents the last started operation.

" + "documentation":"

Return a list of operations that occurred on an App Runner service.

The resulting list of OperationSummary objects is sorted in reverse chronological order. The first object on the list represents the last started operation.

" }, "ListServices":{ "name":"ListServices", @@ -235,7 +235,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

Returns a list of running AWS App Runner services in your AWS account.

" + "documentation":"

Returns a list of running App Runner services in your Amazon Web Services account.

" }, "ListTagsForResource":{ "name":"ListTagsForResource", @@ -251,7 +251,7 @@ {"shape":"InvalidRequestException"}, {"shape":"InvalidStateException"} ], - "documentation":"

List tags that are associated with for an AWS App Runner resource. The response contains a list of tag key-value pairs.

" + "documentation":"

List tags that are associated with for an App Runner resource. The response contains a list of tag key-value pairs.

" }, "PauseService":{ "name":"PauseService", @@ -267,7 +267,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"InvalidStateException"} ], - "documentation":"

Pause an active AWS App Runner service. App Runner reduces compute capacity for the service to zero and loses state (for example, ephemeral storage is removed).

This is an asynchronous operation. On a successful call, you can use the returned OperationId and the ListOperations call to track the operation's progress.

" + "documentation":"

Pause an active App Runner service. App Runner reduces compute capacity for the service to zero and loses state (for example, ephemeral storage is removed).

This is an asynchronous operation. On a successful call, you can use the returned OperationId and the ListOperations call to track the operation's progress.

" }, "ResumeService":{ "name":"ResumeService", @@ -283,7 +283,7 @@ {"shape":"InternalServiceErrorException"}, {"shape":"InvalidStateException"} ], - "documentation":"

Resume an active AWS App Runner service. App Runner provisions compute capacity for the service.

This is an asynchronous operation. On a successful call, you can use the returned OperationId and the ListOperations call to track the operation's progress.

" + "documentation":"

Resume an active App Runner service. App Runner provisions compute capacity for the service.

This is an asynchronous operation. On a successful call, you can use the returned OperationId and the ListOperations call to track the operation's progress.

" }, "StartDeployment":{ "name":"StartDeployment", @@ -298,7 +298,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

Initiate a manual deployment of the latest commit in a source code repository or the latest image in a source image repository to an AWS App Runner service.

For a source code repository, App Runner retrieves the commit and builds a Docker image. For a source image repository, App Runner retrieves the latest Docker image. In both cases, App Runner then deploys the new image to your service and starts a new container instance.

This is an asynchronous operation. On a successful call, you can use the returned OperationId and the ListOperations call to track the operation's progress.

" + "documentation":"

Initiate a manual deployment of the latest commit in a source code repository or the latest image in a source image repository to an App Runner service.

For a source code repository, App Runner retrieves the commit and builds a Docker image. For a source image repository, App Runner retrieves the latest Docker image. In both cases, App Runner then deploys the new image to your service and starts a new container instance.

This is an asynchronous operation. On a successful call, you can use the returned OperationId and the ListOperations call to track the operation's progress.

" }, "TagResource":{ "name":"TagResource", @@ -346,7 +346,7 @@ {"shape":"InvalidStateException"}, {"shape":"InternalServiceErrorException"} ], - "documentation":"

Update an AWS App Runner service. You can update the source configuration and instance configuration of the service. You can also update the ARN of the auto scaling configuration resource that's associated with the service. However, you can't change the name or the encryption configuration of the service. These can be set only when you create the service.

To update the tags applied to your service, use the separate actions TagResource and UntagResource.

This is an asynchronous operation. On a successful call, you can use the returned OperationId and the ListOperations call to track the operation's progress.

" + "documentation":"

Update an App Runner service. You can update the source configuration and instance configuration of the service. You can also update the ARN of the auto scaling configuration resource that's associated with the service. However, you can't change the name or the encryption configuration of the service. These can be set only when you create the service.

To update the tags applied to your service, use the separate actions TagResource and UntagResource.

This is an asynchronous operation. On a successful call, you can use the returned OperationId and the ListOperations call to track the operation's progress.

" } }, "shapes":{ @@ -472,7 +472,7 @@ "documentation":"

The time when the auto scaling configuration was deleted. It's in Unix time stamp format.

" } }, - "documentation":"

Describes an AWS App Runner automatic scaling configuration resource. Multiple revisions of a configuration have the same AutoScalingConfigurationName and different AutoScalingConfigurationRevision values.

A higher MinSize increases the spread of your App Runner service over more Availability Zones in the AWS Region. The tradeoff is a higher minimal cost.

A lower MaxSize controls your cost. The tradeoff is lower responsiveness during peak demand.

" + "documentation":"

Describes an App Runner automatic scaling configuration resource. Multiple revisions of a configuration have the same AutoScalingConfigurationName and different AutoScalingConfigurationRevision values.

A higher MinSize increases the spread of your App Runner service over more Availability Zones in the Amazon Web Services Region. The tradeoff is a higher minimal cost.

A lower MaxSize controls your cost. The tradeoff is lower responsiveness during peak demand.

" }, "AutoScalingConfigurationName":{ "type":"string", @@ -503,7 +503,7 @@ "documentation":"

The revision of this auto scaling configuration. It's unique among all the active configurations (\"Status\": \"ACTIVE\") with the same AutoScalingConfigurationName.

" } }, - "documentation":"

Provides summary information about an AWS App Runner automatic scaling configuration resource.

This type contains limited information about an auto scaling configuration. It includes only identification information, without configuration details. It's returned by the ListAutoScalingConfigurations action. Complete configuration information is returned by the CreateAutoScalingConfiguration, DescribeAutoScalingConfiguration, and DeleteAutoScalingConfiguration actions using the AutoScalingConfiguration type.

" + "documentation":"

Provides summary information about an App Runner automatic scaling configuration resource.

This type contains limited information about an auto scaling configuration. It includes only identification information, without configuration details. It's returned by the ListAutoScalingConfigurations action. Complete configuration information is returned by the CreateAutoScalingConfiguration, DescribeAutoScalingConfiguration, and DeleteAutoScalingConfiguration actions using the AutoScalingConfiguration type.

" }, "AutoScalingConfigurationSummaryList":{ "type":"list", @@ -561,7 +561,7 @@ "documentation":"

The basic configuration for building and running the App Runner service. Use it to quickly launch an App Runner service without providing a apprunner.yaml file in the source code repository (or ignoring the file if it exists).

" } }, - "documentation":"

Describes the configuration that AWS App Runner uses to build and run an App Runner service from a source code repository.

" + "documentation":"

Describes the configuration that App Runner uses to build and run an App Runner service from a source code repository.

" }, "CodeConfigurationValues":{ "type":"structure", @@ -588,7 +588,7 @@ "documentation":"

The environment variables that are available to your running App Runner service. An array of key-value pairs. Keys with a prefix of AWSAPPRUNNER are reserved for system use and aren't valid.

" } }, - "documentation":"

Describes the basic configuration needed for building and running an AWS App Runner service. This type doesn't support the full set of possible configuration options. Fur full configuration capabilities, use a apprunner.yaml file in the source code repository.

" + "documentation":"

Describes the basic configuration needed for building and running an App Runner service. This type doesn't support the full set of possible configuration options. Fur full configuration capabilities, use a apprunner.yaml file in the source code repository.

" }, "CodeRepository":{ "type":"structure", @@ -643,7 +643,7 @@ "documentation":"

The App Runner connection creation time, expressed as a Unix time stamp.

" } }, - "documentation":"

Describes an AWS App Runner connection resource.

" + "documentation":"

Describes an App Runner connection resource.

" }, "ConnectionName":{ "type":"string", @@ -684,7 +684,7 @@ "documentation":"

The App Runner connection creation time, expressed as a Unix time stamp.

" } }, - "documentation":"

Provides summary information about an AWS App Runner connection resource.

" + "documentation":"

Provides summary information about an App Runner connection resource.

" }, "ConnectionSummaryList":{ "type":"list", @@ -702,7 +702,7 @@ "members":{ "AutoScalingConfigurationName":{ "shape":"AutoScalingConfigurationName", - "documentation":"

A name for the auto scaling configuration. When you use it for the first time in an AWS Region, App Runner creates revision number 1 of this name. When you use the same name in subsequent calls, App Runner creates incremental revisions of the configuration.

" + "documentation":"

A name for the auto scaling configuration. When you use it for the first time in an Amazon Web Services Region, App Runner creates revision number 1 of this name. When you use the same name in subsequent calls, App Runner creates incremental revisions of the configuration.

" }, "MaxConcurrency":{ "shape":"ASConfigMaxConcurrency", @@ -741,7 +741,7 @@ "members":{ "ConnectionName":{ "shape":"ConnectionName", - "documentation":"

A name for the new connection. It must be unique across all App Runner connections for the AWS account in the AWS Region.

" + "documentation":"

A name for the new connection. It must be unique across all App Runner connections for the Amazon Web Services account in the Amazon Web Services Region.

" }, "ProviderType":{ "shape":"ProviderType", @@ -772,7 +772,7 @@ "members":{ "ServiceName":{ "shape":"ServiceName", - "documentation":"

A name for the new service. It must be unique across all the running App Runner services in your AWS account in the AWS Region.

" + "documentation":"

A name for the new service. It must be unique across all the running App Runner services in your Amazon Web Services account in the Amazon Web Services Region.

" }, "SourceConfiguration":{ "shape":"SourceConfiguration", @@ -788,11 +788,11 @@ }, "EncryptionConfiguration":{ "shape":"EncryptionConfiguration", - "documentation":"

An optional custom encryption key that App Runner uses to encrypt the copy of your source repository that it maintains and your service logs. By default, App Runner uses an AWS managed CMK.

" + "documentation":"

An optional custom encryption key that App Runner uses to encrypt the copy of your source repository that it maintains and your service logs. By default, App Runner uses an Amazon Web Services managed CMK.

" }, "HealthCheckConfiguration":{ "shape":"HealthCheckConfiguration", - "documentation":"

The settings for the health check that AWS App Runner performs to monitor the health of your service.

" + "documentation":"

The settings for the health check that App Runner performs to monitor the health of your service.

" }, "AutoScalingConfigurationArn":{ "shape":"AppRunnerResourceArn", @@ -842,7 +842,7 @@ "documentation":"

The current state of the domain name association.

" } }, - "documentation":"

Describes a custom domain that's associated with an AWS App Runner service.

" + "documentation":"

Describes a custom domain that's associated with an App Runner service.

" }, "CustomDomainAssociationStatus":{ "type":"string", @@ -1068,7 +1068,7 @@ "documentation":"

The ARN of the KMS key that's used for encryption.

" } }, - "documentation":"

Describes a custom encryption key that AWS App Runner uses to encrypt copies of the source repository and service logs.

" + "documentation":"

Describes a custom encryption key that App Runner uses to encrypt copies of the source repository and service logs.

" }, "ErrorMessage":{ "type":"string", @@ -1082,7 +1082,7 @@ "documentation":"

The IP protocol that App Runner uses to perform health checks for your service.

If you set Protocol to HTTP, App Runner sends health check requests to the HTTP path specified by Path.

Default: TCP

" }, "Path":{ - "shape":"String", + "shape":"HealthCheckPath", "documentation":"

The URL that health check requests are sent to.

Path is only applicable when you set Protocol to HTTP.

Default: \"/\"

" }, "Interval":{ @@ -1095,14 +1095,14 @@ }, "HealthyThreshold":{ "shape":"HealthCheckHealthyThreshold", - "documentation":"

The number of consecutive checks that must succeed before App Runner decides that the service is healthy.

Default: 3

" + "documentation":"

The number of consecutive checks that must succeed before App Runner decides that the service is healthy.

Default: 1

" }, "UnhealthyThreshold":{ "shape":"HealthCheckUnhealthyThreshold", - "documentation":"

The number of consecutive checks that must fail before App Runner decides that the service is unhealthy.

Default: 3

" + "documentation":"

The number of consecutive checks that must fail before App Runner decides that the service is unhealthy.

Default: 5

" } }, - "documentation":"

Describes the settings for the health check that AWS App Runner performs to monitor the health of a service.

" + "documentation":"

Describes the settings for the health check that App Runner performs to monitor the health of a service.

" }, "HealthCheckHealthyThreshold":{ "type":"integer", @@ -1114,6 +1114,10 @@ "max":20, "min":1 }, + "HealthCheckPath":{ + "type":"string", + "min":1 + }, "HealthCheckProtocol":{ "type":"string", "enum":[ @@ -1147,13 +1151,13 @@ "documentation":"

The port that your application listens to in the container.

Default: 8080

" } }, - "documentation":"

Describes the configuration that AWS App Runner uses to run an App Runner service using an image pulled from a source image repository.

" + "documentation":"

Describes the configuration that App Runner uses to run an App Runner service using an image pulled from a source image repository.

" }, "ImageIdentifier":{ "type":"string", "max":1024, "min":1, - "pattern":"([0-9]{12}.dkr.ecr.[a-z\\-]+-[0-9]{1}.amazonaws.com\\/.*)|(^public\\.ecr\\.aws\\/.+\\/.+)" + "pattern":"([0-9]{12}.dkr.ecr.[a-z\\-]+-[0-9]{1}.amazonaws.com\\/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*\\/)*[a-z0-9]+(?:[._-][a-z0-9]+)*)(:([\\w\\d+\\-=._:\\/@])+|@([\\w\\d\\:]+))?)|(^public\\.ecr\\.aws\\/.+\\/((?:[a-z0-9]+(?:[._-][a-z0-9]+)*\\/)*[a-z0-9]+(?:[._-][a-z0-9]+)*)(:([\\w\\d+\\-=._:\\/@])+|@([\\w\\d\\:]+))?)" }, "ImageRepository":{ "type":"structure", @@ -1197,10 +1201,10 @@ }, "InstanceRoleArn":{ "shape":"RoleArn", - "documentation":"

The Amazon Resource Name (ARN) of an IAM role that provides permissions to your App Runner service. These are permissions that your code needs when it calls any AWS APIs.

" + "documentation":"

The Amazon Resource Name (ARN) of an IAM role that provides permissions to your App Runner service. These are permissions that your code needs when it calls any Amazon Web Services APIs.

" } }, - "documentation":"

Describes the runtime configuration of an AWS App Runner service instance (scaling unit).

" + "documentation":"

Describes the runtime configuration of an App Runner service instance (scaling unit).

" }, "Integer":{"type":"integer"}, "InternalServiceErrorException":{ @@ -1444,7 +1448,7 @@ "documentation":"

The time when the operation was last updated. It's in the Unix time stamp format.

" } }, - "documentation":"

Provides summary information for an operation that occurred on an AWS App Runner service.

" + "documentation":"

Provides summary information for an operation that occurred on an App Runner service.

" }, "OperationSummaryList":{ "type":"list", @@ -1493,7 +1497,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

A resource doesn't exist for the specified Amazon Resource Name (ARN) in your AWS account.

", + "documentation":"

A resource doesn't exist for the specified Amazon Resource Name (ARN) in your Amazon Web Services account.

", "exception":true }, "ResumeServiceRequest":{ @@ -1522,9 +1526,9 @@ }, "RoleArn":{ "type":"string", - "max":102, + "max":1024, "min":29, - "pattern":"arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):iam::[0-9]{12}:role/[\\w+=,.@-]{1,64}" + "pattern":"arn:(aws|aws-us-gov|aws-cn|aws-iso|aws-iso-b):iam::[0-9]{12}:(role|role\\/service-role)\\/[\\w+=,.@\\-/]{1,1000}" }, "Runtime":{ "type":"string", @@ -1567,7 +1571,7 @@ }, "ServiceId":{ "shape":"ServiceId", - "documentation":"

An ID that App Runner generated for this service. It's unique within the AWS Region.

" + "documentation":"

An ID that App Runner generated for this service. It's unique within the Amazon Web Services Region.

" }, "ServiceArn":{ "shape":"AppRunnerResourceArn", @@ -1603,7 +1607,7 @@ }, "EncryptionConfiguration":{ "shape":"EncryptionConfiguration", - "documentation":"

The encryption key that App Runner uses to encrypt the service logs and the copy of the source repository that App Runner maintains for the service. It can be either a customer-provided encryption key or an AWS managed CMK.

" + "documentation":"

The encryption key that App Runner uses to encrypt the service logs and the copy of the source repository that App Runner maintains for the service. It can be either a customer-provided encryption key or an Amazon Web Services managed CMK.

" }, "HealthCheckConfiguration":{ "shape":"HealthCheckConfiguration", @@ -1614,7 +1618,7 @@ "documentation":"

Summary information for the App Runner automatic scaling configuration resource that's associated with this service.

" } }, - "documentation":"

Describes an AWS App Runner service. It can describe a service in any state, including deleted services.

This type contains the full information about a service, including configuration details. It's returned by the CreateService, DescribeService, and DeleteService actions. A subset of this information is returned by the ListServices action using the ServiceSummary type.

" + "documentation":"

Describes an App Runner service. It can describe a service in any state, including deleted services.

This type contains the full information about a service, including configuration details. It's returned by the CreateService, DescribeService, and DeleteService actions. A subset of this information is returned by the ListServices action using the ServiceSummary type.

" }, "ServiceId":{ "type":"string", @@ -1638,7 +1642,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

App Runner can't create this resource. You've reached your account quota for this resource type.

For App Runner per-resource quotas, see AWS App Runner endpoints and quotas in the AWS General Reference.

", + "documentation":"

App Runner can't create this resource. You've reached your account quota for this resource type.

For App Runner per-resource quotas, see App Runner endpoints and quotas in the Amazon Web Services General Reference.

", "exception":true }, "ServiceStatus":{ @@ -1661,7 +1665,7 @@ }, "ServiceId":{ "shape":"ServiceId", - "documentation":"

An ID that App Runner generated for this service. It's unique within the AWS Region.

" + "documentation":"

An ID that App Runner generated for this service. It's unique within the Amazon Web Services Region.

" }, "ServiceArn":{ "shape":"AppRunnerResourceArn", @@ -1684,7 +1688,7 @@ "documentation":"

The current state of the App Runner service. These particular values mean the following.

  • CREATE_FAILED – The service failed to create. Read the failure events and logs, change any parameters that need to be fixed, and retry the call to create the service.

    The failed service isn't usable, and still counts towards your service quota. When you're done analyzing the failure, delete the service.

  • DELETE_FAILED – The service failed to delete and can't be successfully recovered. Retry the service deletion call to ensure that all related resources are removed.

" } }, - "documentation":"

Provides summary information for an AWS App Runner service.

This type contains limited information about a service. It doesn't include configuration details. It's returned by the ListServices action. Complete service information is returned by the CreateService, DescribeService, and DeleteService actions using the Service type.

" + "documentation":"

Provides summary information for an App Runner service.

This type contains limited information about a service. It doesn't include configuration details. It's returned by the ListServices action. Complete service information is returned by the CreateService, DescribeService, and DeleteService actions using the Service type.

" }, "ServiceSummaryList":{ "type":"list", @@ -1706,7 +1710,7 @@ "documentation":"

A source code version.

For a git-based repository, a branch name maps to a specific version. App Runner uses the most recent commit to the branch.

" } }, - "documentation":"

Identifies a version of code that AWS App Runner refers to within a source code repository.

" + "documentation":"

Identifies a version of code that App Runner refers to within a source code repository.

" }, "SourceCodeVersionType":{ "type":"string", @@ -1725,14 +1729,14 @@ }, "AutoDeploymentsEnabled":{ "shape":"NullableBoolean", - "documentation":"

If true, continuous integration from the source repository is enabled for the App Runner service. Each repository change (source code commit or new image version) starts a deployment.

Default: true

" + "documentation":"

If true, continuous integration from the source repository is enabled for the App Runner service. Each repository change (including any source code commit or new image version) starts a deployment.

Default: App Runner sets to false for a source image that uses an ECR Public repository or an ECR repository that's in an Amazon Web Services account other than the one that the service is in. App Runner sets to true in all other cases (which currently include a source code repository or a source image using a same-account ECR repository).

" }, "AuthenticationConfiguration":{ "shape":"AuthenticationConfiguration", "documentation":"

Describes the resources that are needed to authenticate access to some source repositories.

" } }, - "documentation":"

Describes the source deployed to an AWS App Runner service. It can be a code or an image repository.

" + "documentation":"

Describes the source deployed to an App Runner service. It can be a code or an image repository.

" }, "StartCommand":{ "type":"string", @@ -1776,7 +1780,7 @@ "documentation":"

The value of the tag.

" } }, - "documentation":"

Describes a tag that is applied to an AWS App Runner resource. A tag is a metadata item consisting of a key-value pair.

" + "documentation":"

Describes a tag that is applied to an App Runner resource. A tag is a metadata item consisting of a key-value pair.

" }, "TagKey":{ "type":"string", @@ -1871,7 +1875,7 @@ }, "HealthCheckConfiguration":{ "shape":"HealthCheckConfiguration", - "documentation":"

The settings for the health check that AWS App Runner performs to monitor the health of your service.

" + "documentation":"

The settings for the health check that App Runner performs to monitor the health of your service.

" } } }, @@ -1893,5 +1897,5 @@ } } }, - "documentation":"AWS App Runner

AWS App Runner is an application service that provides a fast, simple, and cost-effective way to go directly from an existing container image or source code to a running service in the AWS cloud in seconds. You don't need to learn new technologies, decide which compute service to use, or understand how to provision and configure AWS resources.

App Runner connects directly to your container registry or source code repository. It provides an automatic delivery pipeline with fully managed operations, high performance, scalability, and security.

For more information about App Runner, see the AWS App Runner Developer Guide. For release information, see the AWS App Runner Release Notes.

To install the Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools that you can use to access the API, see Tools for Amazon Web Services.

Endpoints

For a list of Region-specific endpoints that App Runner supports, see AWS App Runner endpoints and quotas in the AWS General Reference.

" + "documentation":"App Runner

App Runner is an application service that provides a fast, simple, and cost-effective way to go directly from an existing container image or source code to a running service in the Amazon Web Services Cloud in seconds. You don't need to learn new technologies, decide which compute service to use, or understand how to provision and configure Amazon Web Services resources.

App Runner connects directly to your container registry or source code repository. It provides an automatic delivery pipeline with fully managed operations, high performance, scalability, and security.

For more information about App Runner, see the App Runner Developer Guide. For release information, see the App Runner Release Notes.

To install the Software Development Kits (SDKs), Integrated Development Environment (IDE) Toolkits, and command line tools that you can use to access the API, see Tools for Amazon Web Services.

Endpoints

For a list of Region-specific endpoints that App Runner supports, see App Runner endpoints and quotas in the Amazon Web Services General Reference.

" } diff --git a/botocore/data/appsync/2017-07-25/service-2.json b/botocore/data/appsync/2017-07-25/service-2.json index f0b58d78..b7c12217 100644 --- a/botocore/data/appsync/2017-07-25/service-2.json +++ b/botocore/data/appsync/2017-07-25/service-2.json @@ -716,7 +716,7 @@ "members":{ "authenticationType":{ "shape":"AuthenticationType", - "documentation":"

The authentication type: API key, Identity and Access Management, OIDC, or Amazon Cognito user pools.

" + "documentation":"

The authentication type: API key, Identity and Access Management, OIDC, Amazon Cognito user pools, or Amazon Web Services Lambda.

" }, "openIDConnectConfig":{ "shape":"OpenIDConnectConfig", @@ -728,7 +728,7 @@ }, "lambdaAuthorizerConfig":{ "shape":"LambdaAuthorizerConfig", - "documentation":"

Configuration for AWS Lambda function authorization.

" + "documentation":"

Configuration for Amazon Web Services Lambda function authorization.

" } }, "documentation":"

Describes an additional authentication provider.

" @@ -1094,7 +1094,11 @@ }, "elasticsearchConfig":{ "shape":"ElasticsearchDataSourceConfig", - "documentation":"

Amazon Elasticsearch Service settings.

" + "documentation":"

Amazon OpenSearch Service settings.

As of September 2021, Amazon Elasticsearch service is Amazon OpenSearch Service. This configuration is deprecated. For new data sources, use CreateDataSourceRequest$openSearchServiceConfig to create an OpenSearch data source.

" + }, + "openSearchServiceConfig":{ + "shape":"OpenSearchServiceDataSourceConfig", + "documentation":"

Amazon OpenSearch Service settings.

" }, "httpConfig":{ "shape":"HttpDataSourceConfig", @@ -1183,7 +1187,7 @@ }, "authenticationType":{ "shape":"AuthenticationType", - "documentation":"

The authentication type: API key, Identity and Access Management, OIDC, or Amazon Cognito user pools.

" + "documentation":"

The authentication type: API key, Identity and Access Management, OIDC, Amazon Cognito user pools, or Amazon Web Services Lambda.

" }, "userPoolConfig":{ "shape":"UserPoolConfig", @@ -1207,7 +1211,7 @@ }, "lambdaAuthorizerConfig":{ "shape":"LambdaAuthorizerConfig", - "documentation":"

Configuration for AWS Lambda function authorization.

" + "documentation":"

Configuration for Amazon Web Services Lambda function authorization.

" } } }, @@ -1333,7 +1337,7 @@ }, "type":{ "shape":"DataSourceType", - "documentation":"

The type of the data source.

  • AMAZON_DYNAMODB: The data source is an Amazon DynamoDB table.

  • AMAZON_ELASTICSEARCH: The data source is an Amazon Elasticsearch Service domain.

  • AWS_LAMBDA: The data source is an Amazon Web Services Lambda function.

  • NONE: There is no data source. This type is used when you wish to invoke a GraphQL operation without connecting to a data source, such as performing data transformation with resolvers or triggering a subscription to be invoked from a mutation.

  • HTTP: The data source is an HTTP endpoint.

  • RELATIONAL_DATABASE: The data source is a relational database.

" + "documentation":"

The type of the data source.

  • AWS_LAMBDA: The data source is an Amazon Web Services Lambda function.

  • AMAZON_DYNAMODB: The data source is an Amazon DynamoDB table.

  • AMAZON_ELASTICSEARCH: The data source is an Amazon OpenSearch Service domain.

  • AMAZON_OPENSEARCH_SERVICE: The data source is an Amazon OpenSearch Service domain.

  • NONE: There is no data source. This type is used when you wish to invoke a GraphQL operation without connecting to a data source, such as performing data transformation with resolvers or triggering a subscription to be invoked from a mutation.

  • HTTP: The data source is an HTTP endpoint.

  • RELATIONAL_DATABASE: The data source is a relational database.

" }, "serviceRoleArn":{ "shape":"String", @@ -1349,7 +1353,11 @@ }, "elasticsearchConfig":{ "shape":"ElasticsearchDataSourceConfig", - "documentation":"

Amazon Elasticsearch Service settings.

" + "documentation":"

Amazon OpenSearch Service settings.

" + }, + "openSearchServiceConfig":{ + "shape":"OpenSearchServiceDataSourceConfig", + "documentation":"

Amazon OpenSearch Service settings.

" }, "httpConfig":{ "shape":"HttpDataSourceConfig", @@ -1370,7 +1378,8 @@ "AMAZON_ELASTICSEARCH", "NONE", "HTTP", - "RELATIONAL_DATABASE" + "RELATIONAL_DATABASE", + "AMAZON_OPENSEARCH_SERVICE" ] }, "DataSources":{ @@ -1621,7 +1630,7 @@ "documentation":"

The Amazon Web Services Region.

" } }, - "documentation":"

Describes an Elasticsearch data source configuration.

" + "documentation":"

Describes an OpenSearch data source configuration.

As of September 2021, Amazon Elasticsearch service is Amazon OpenSearch Service. This configuration is deprecated. For new data sources, use OpenSearchServiceDataSourceConfig to specify an OpenSearch data source.

" }, "ErrorMessage":{"type":"string"}, "FieldLogLevel":{ @@ -2000,7 +2009,7 @@ }, "lambdaAuthorizerConfig":{ "shape":"LambdaAuthorizerConfig", - "documentation":"

Configuration for AWS Lambda function authorization.

" + "documentation":"

Configuration for Amazon Web Services Lambda function authorization.

" } }, "documentation":"

Describes a GraphQL API.

" @@ -2043,11 +2052,11 @@ }, "authorizerUri":{ "shape":"String", - "documentation":"

The ARN of the lambda function to be called for authorization. This may be a standard Lambda ARN, a version ARN (.../v3) or alias ARN.

Note: This Lambda function must have the following resource-based policy assigned to it. When configuring Lambda authorizers in the Console, this is done for you. To do so with the AWS CLI, run the following:

aws lambda add-permission --function-name \"arn:aws:lambda:us-east-2:111122223333:function:my-function\" --statement-id \"appsync\" --principal appsync.amazonaws.com --action lambda:InvokeFunction

" + "documentation":"

The ARN of the Lambda function to be called for authorization. This may be a standard Lambda ARN, a version ARN (.../v3) or alias ARN.

Note: This Lambda function must have the following resource-based policy assigned to it. When configuring Lambda authorizers in the Console, this is done for you. To do so with the Amazon Web Services CLI, run the following:

aws lambda add-permission --function-name \"arn:aws:lambda:us-east-2:111122223333:function:my-function\" --statement-id \"appsync\" --principal appsync.amazonaws.com --action lambda:InvokeFunction

" }, "identityValidationExpression":{ "shape":"String", - "documentation":"

A regular expression for validation of tokens before the Lambda Function is called.

" + "documentation":"

A regular expression for validation of tokens before the Lambda function is called.

" } }, "documentation":"

A LambdaAuthorizerConfig holds configuration on how to authorize AppSync API access when using the AWS_LAMBDA authorizer mode. Be aware that an AppSync API may have only one Lambda authorizer configured at a time.

" @@ -2452,6 +2461,24 @@ }, "documentation":"

Describes an OpenID Connect configuration.

" }, + "OpenSearchServiceDataSourceConfig":{ + "type":"structure", + "required":[ + "endpoint", + "awsRegion" + ], + "members":{ + "endpoint":{ + "shape":"String", + "documentation":"

The endpoint.

" + }, + "awsRegion":{ + "shape":"String", + "documentation":"

The Amazon Web Services Region.

" + } + }, + "documentation":"

Describes an OpenSearch data source configuration.

" + }, "OutputType":{ "type":"string", "enum":[ @@ -2893,7 +2920,11 @@ }, "elasticsearchConfig":{ "shape":"ElasticsearchDataSourceConfig", - "documentation":"

The new Elasticsearch Service configuration.

" + "documentation":"

The new OpenSearch configuration.

As of September 2021, Amazon Elasticsearch service is Amazon OpenSearch Service. This configuration is deprecated. Instead, use UpdateDataSourceRequest$openSearchServiceConfig to update an OpenSearch data source.

" + }, + "openSearchServiceConfig":{ + "shape":"OpenSearchServiceDataSourceConfig", + "documentation":"

The new OpenSearch configuration.

" }, "httpConfig":{ "shape":"HttpDataSourceConfig", @@ -3015,7 +3046,7 @@ }, "lambdaAuthorizerConfig":{ "shape":"LambdaAuthorizerConfig", - "documentation":"

Configuration for AWS Lambda function authorization.

" + "documentation":"

Configuration for Amazon Web Services Lambda function authorization.

" } } }, diff --git a/botocore/data/cloudcontrol/2021-09-30/paginators-1.json b/botocore/data/cloudcontrol/2021-09-30/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/cloudcontrol/2021-09-30/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/cloudcontrol/2021-09-30/service-2.json b/botocore/data/cloudcontrol/2021-09-30/service-2.json new file mode 100644 index 00000000..2be6e4f7 --- /dev/null +++ b/botocore/data/cloudcontrol/2021-09-30/service-2.json @@ -0,0 +1,854 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2021-09-30", + "endpointPrefix":"cloudcontrolapi", + "jsonVersion":"1.0", + "protocol":"json", + "serviceAbbreviation":"CloudControlApi", + "serviceFullName":"AWS Cloud Control API", + "serviceId":"CloudControl", + "signatureVersion":"v4", + "signingName":"cloudcontrolapi", + "targetPrefix":"CloudApiService", + "uid":"cloudcontrol-2021-09-30" + }, + "operations":{ + "CancelResourceRequest":{ + "name":"CancelResourceRequest", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CancelResourceRequestInput"}, + "output":{"shape":"CancelResourceRequestOutput"}, + "errors":[ + {"shape":"RequestTokenNotFoundException"}, + {"shape":"ConcurrentModificationException"} + ], + "documentation":"

Cancels the specified resource operation request. For more information, see Canceling resource operation requests in the Amazon Web Services Cloud Control API User Guide.

Only resource operations requests with a status of PENDING or IN_PROGRESS can be cancelled.

", + "idempotent":true + }, + "CreateResource":{ + "name":"CreateResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateResourceInput"}, + "output":{"shape":"CreateResourceOutput"}, + "errors":[ + {"shape":"ConcurrentOperationException"}, + {"shape":"ClientTokenConflictException"}, + {"shape":"UnsupportedActionException"}, + {"shape":"TypeNotFoundException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"GeneralServiceException"}, + {"shape":"HandlerInternalFailureException"}, + {"shape":"InvalidCredentialsException"}, + {"shape":"InvalidRequestException"}, + {"shape":"NetworkFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotStabilizedException"}, + {"shape":"NotUpdatableException"}, + {"shape":"ResourceConflictException"}, + {"shape":"ServiceInternalErrorException"}, + {"shape":"ServiceLimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"PrivateTypeException"}, + {"shape":"HandlerFailureException"} + ], + "documentation":"

Creates the specified resource. For more information, see Creating a resource in the Amazon Web Services Cloud Control API User Guide.

After you have initiated a resource creation request, you can monitor the progress of your request by calling GetResourceRequestStatus using the RequestToken of the ProgressEvent type returned by CreateResource.

" + }, + "DeleteResource":{ + "name":"DeleteResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteResourceInput"}, + "output":{"shape":"DeleteResourceOutput"}, + "errors":[ + {"shape":"ConcurrentOperationException"}, + {"shape":"ClientTokenConflictException"}, + {"shape":"UnsupportedActionException"}, + {"shape":"TypeNotFoundException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"GeneralServiceException"}, + {"shape":"HandlerInternalFailureException"}, + {"shape":"InvalidCredentialsException"}, + {"shape":"InvalidRequestException"}, + {"shape":"NetworkFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotStabilizedException"}, + {"shape":"NotUpdatableException"}, + {"shape":"ResourceConflictException"}, + {"shape":"ServiceInternalErrorException"}, + {"shape":"ServiceLimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"PrivateTypeException"}, + {"shape":"HandlerFailureException"} + ], + "documentation":"

Deletes the specified resource. For details, see Deleting a resource in the Amazon Web Services Cloud Control API User Guide.

After you have initiated a resource deletion request, you can monitor the progress of your request by calling GetResourceRequestStatus using the RequestToken of the ProgressEvent returned by DeleteResource.

" + }, + "GetResource":{ + "name":"GetResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetResourceInput"}, + "output":{"shape":"GetResourceOutput"}, + "errors":[ + {"shape":"UnsupportedActionException"}, + {"shape":"TypeNotFoundException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"GeneralServiceException"}, + {"shape":"HandlerInternalFailureException"}, + {"shape":"InvalidCredentialsException"}, + {"shape":"InvalidRequestException"}, + {"shape":"NetworkFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotStabilizedException"}, + {"shape":"NotUpdatableException"}, + {"shape":"ResourceConflictException"}, + {"shape":"ServiceInternalErrorException"}, + {"shape":"ServiceLimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"PrivateTypeException"}, + {"shape":"HandlerFailureException"} + ], + "documentation":"

Returns information about the current state of the specified resource. For details, see Reading a resource's current state.

You can use this action to return information about an existing resource in your account and Amazon Web Services Region, whether or not those resources were provisioned using Cloud Control API.

" + }, + "GetResourceRequestStatus":{ + "name":"GetResourceRequestStatus", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetResourceRequestStatusInput"}, + "output":{"shape":"GetResourceRequestStatusOutput"}, + "errors":[ + {"shape":"RequestTokenNotFoundException"} + ], + "documentation":"

Returns the current status of a resource operation request. For more information, see Tracking the progress of resource operation requests in the Amazon Web Services Cloud Control API User Guide.

" + }, + "ListResourceRequests":{ + "name":"ListResourceRequests", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListResourceRequestsInput"}, + "output":{"shape":"ListResourceRequestsOutput"}, + "documentation":"

Returns existing resource operation requests. This includes requests of all status types. For more information, see Listing active resource operation requests in the Amazon Web Services Cloud Control API User Guide.

Resource operation requests expire after seven days.

" + }, + "ListResources":{ + "name":"ListResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListResourcesInput"}, + "output":{"shape":"ListResourcesOutput"}, + "errors":[ + {"shape":"UnsupportedActionException"}, + {"shape":"TypeNotFoundException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"GeneralServiceException"}, + {"shape":"HandlerInternalFailureException"}, + {"shape":"InvalidCredentialsException"}, + {"shape":"InvalidRequestException"}, + {"shape":"NetworkFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotStabilizedException"}, + {"shape":"NotUpdatableException"}, + {"shape":"ResourceConflictException"}, + {"shape":"ServiceInternalErrorException"}, + {"shape":"ServiceLimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"PrivateTypeException"}, + {"shape":"HandlerFailureException"} + ], + "documentation":"

Returns information about the specified resources. For more information, see Discovering resources in the Amazon Web Services Cloud Control API User Guide.

You can use this action to return information about existing resources in your account and Amazon Web Services Region, whether or not those resources were provisioned using Cloud Control API.

" + }, + "UpdateResource":{ + "name":"UpdateResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateResourceInput"}, + "output":{"shape":"UpdateResourceOutput"}, + "errors":[ + {"shape":"ConcurrentOperationException"}, + {"shape":"ClientTokenConflictException"}, + {"shape":"UnsupportedActionException"}, + {"shape":"TypeNotFoundException"}, + {"shape":"AlreadyExistsException"}, + {"shape":"GeneralServiceException"}, + {"shape":"HandlerInternalFailureException"}, + {"shape":"InvalidCredentialsException"}, + {"shape":"InvalidRequestException"}, + {"shape":"NetworkFailureException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"NotStabilizedException"}, + {"shape":"NotUpdatableException"}, + {"shape":"ResourceConflictException"}, + {"shape":"ServiceInternalErrorException"}, + {"shape":"ServiceLimitExceededException"}, + {"shape":"ThrottlingException"}, + {"shape":"PrivateTypeException"}, + {"shape":"HandlerFailureException"} + ], + "documentation":"

Updates the specified property values in the resource.

You specify your resource property updates as a list of patch operations contained in a JSON patch document that adheres to the RFC 6902 - JavaScript Object Notation (JSON) Patch standard.

For details on how Cloud Control API performs resource update operations, see Updating a resource in the Amazon Web Services Cloud Control API User Guide.

After you have initiated a resource update request, you can monitor the progress of your request by calling GetResourceRequestStatus using the RequestToken of the ProgressEvent returned by UpdateResource.

For more information about the properties of a specific resource, refer to the related topic for the resource in the Resource and property types reference in the Amazon Web Services CloudFormation Users Guide.

" + } + }, + "shapes":{ + "AlreadyExistsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource with the name requested already exists.

", + "exception":true + }, + "CancelResourceRequestInput":{ + "type":"structure", + "required":["RequestToken"], + "members":{ + "RequestToken":{ + "shape":"RequestToken", + "documentation":"

The RequestToken of the ProgressEvent object returned by the resource operation request.

" + } + } + }, + "CancelResourceRequestOutput":{ + "type":"structure", + "members":{ + "ProgressEvent":{"shape":"ProgressEvent"} + } + }, + "ClientToken":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[-A-Za-z0-9+/=]+" + }, + "ClientTokenConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The specified client token has already been used in another resource request.

It is best practice for client tokens to be unique for each resource operation request. However, client token expire after 36 hours.

", + "exception":true + }, + "ConcurrentModificationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource is currently being modified by another operation.

", + "exception":true + }, + "ConcurrentOperationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Another resource operation is currently being performed on this resource.

", + "exception":true + }, + "CreateResourceInput":{ + "type":"structure", + "required":[ + "TypeName", + "DesiredState" + ], + "members":{ + "TypeName":{ + "shape":"TypeName", + "documentation":"

The name of the resource type.

" + }, + "TypeVersionId":{ + "shape":"TypeVersionId", + "documentation":"

For private resource types, the type version to use in this resource operation. If you do not specify a resource version, CloudFormation uses the default version.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) for Cloud Control API to use when performing this resource operation. The role specified must have the permissions required for this operation. The necessary permissions for each event handler are defined in the handlers section of the resource type definition schema.

If you do not specify a role, Cloud Control API uses a temporary session created using your Amazon Web Services user credentials.

For more information, see Specifying credentials in the Amazon Web Services Cloud Control API User Guide.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier to ensure the idempotency of the resource request. As a best practice, specify this token to ensure idempotency, so that Amazon Web Services Cloud Control API can accurately distinguish between request retries and new resource requests. You might retry a resource request to ensure that it was successfully received.

A client token is valid for 36 hours once used. After that, a resource request with the same client token is treated as a new request.

If you do not specify a client token, one is generated for inclusion in the request.

For more information, see Ensuring resource operation requests are unique in the Amazon Web Services Cloud Control API User Guide.

", + "idempotencyToken":true + }, + "DesiredState":{ + "shape":"Properties", + "documentation":"

Structured data format representing the desired state of the resource, consisting of that resource's properties and their desired values.

Cloud Control API currently supports JSON as a structured data format.

Specify the desired state as one of the following:

  • A JSON blob

  • A local path containing the desired state in JSON data format

For more information, see Composing the desired state of the resource in the Amazon Web Services Cloud Control API User Guide.

For more information about the properties of a specific resource, refer to the related topic for the resource in the Resource and property types reference in the Amazon Web Services CloudFormation Users Guide.

" + } + } + }, + "CreateResourceOutput":{ + "type":"structure", + "members":{ + "ProgressEvent":{ + "shape":"ProgressEvent", + "documentation":"

Represents the current status of the resource creation request.

After you have initiated a resource creation request, you can monitor the progress of your request by calling GetResourceRequestStatus using the RequestToken of the ProgressEvent returned by CreateResource.

" + } + } + }, + "DeleteResourceInput":{ + "type":"structure", + "required":[ + "TypeName", + "Identifier" + ], + "members":{ + "TypeName":{ + "shape":"TypeName", + "documentation":"

The name of the resource type.

" + }, + "TypeVersionId":{ + "shape":"TypeVersionId", + "documentation":"

For private resource types, the type version to use in this resource operation. If you do not specify a resource version, CloudFormation uses the default version.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) for Cloud Control API to use when performing this resource operation. The role specified must have the permissions required for this operation. The necessary permissions for each event handler are defined in the handlers section of the resource type definition schema.

If you do not specify a role, Cloud Control API uses a temporary session created using your Amazon Web Services user credentials.

For more information, see Specifying credentials in the Amazon Web Services Cloud Control API User Guide.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier to ensure the idempotency of the resource request. As a best practice, specify this token to ensure idempotency, so that Amazon Web Services Cloud Control API can accurately distinguish between request retries and new resource requests. You might retry a resource request to ensure that it was successfully received.

A client token is valid for 36 hours once used. After that, a resource request with the same client token is treated as a new request.

If you do not specify a client token, one is generated for inclusion in the request.

For more information, see Ensuring resource operation requests are unique in the Amazon Web Services Cloud Control API User Guide.

", + "idempotencyToken":true + }, + "Identifier":{ + "shape":"Identifier", + "documentation":"

The identifier for the resource.

You can specify the primary identifier, or any secondary identifier defined for the resource type in its resource schema. You can only specify one identifier. Primary identifiers can be specified as a string or JSON; secondary identifiers must be specified as JSON.

For compound primary identifiers (that is, one that consists of multiple resource properties strung together), to specify the primary identifier as a string, list the property values in the order they are specified in the primary identifier definition, separated by |.

For more information, see Identifying resources in the Amazon Web Services Cloud Control API User Guide.

" + } + } + }, + "DeleteResourceOutput":{ + "type":"structure", + "members":{ + "ProgressEvent":{ + "shape":"ProgressEvent", + "documentation":"

Represents the current status of the resource deletion request.

After you have initiated a resource deletion request, you can monitor the progress of your request by calling GetResourceRequestStatus using the RequestToken of the ProgressEvent returned by DeleteResource.

" + } + } + }, + "ErrorMessage":{ + "type":"string", + "max":1024, + "min":1 + }, + "GeneralServiceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource handler has returned that the downstream service generated an error that does not map to any other handler error code.

", + "exception":true + }, + "GetResourceInput":{ + "type":"structure", + "required":[ + "TypeName", + "Identifier" + ], + "members":{ + "TypeName":{ + "shape":"TypeName", + "documentation":"

The name of the resource type.

" + }, + "TypeVersionId":{ + "shape":"TypeVersionId", + "documentation":"

For private resource types, the type version to use in this resource operation. If you do not specify a resource version, CloudFormation uses the default version.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) for Cloud Control API to use when performing this resource operation. The role specified must have the permissions required for this operation. The necessary permissions for each event handler are defined in the handlers section of the resource type definition schema.

If you do not specify a role, Cloud Control API uses a temporary session created using your Amazon Web Services user credentials.

For more information, see Specifying credentials in the Amazon Web Services Cloud Control API User Guide.

" + }, + "Identifier":{ + "shape":"Identifier", + "documentation":"

The identifier for the resource.

You can specify the primary identifier, or any secondary identifier defined for the resource type in its resource schema. You can only specify one identifier. Primary identifiers can be specified as a string or JSON; secondary identifiers must be specified as JSON.

For compound primary identifiers (that is, one that consists of multiple resource properties strung together), to specify the primary identifier as a string, list the property values in the order they are specified in the primary identifier definition, separated by |.

For more information, see Identifying resources in the Amazon Web Services Cloud Control API User Guide.

" + } + } + }, + "GetResourceOutput":{ + "type":"structure", + "members":{ + "TypeName":{ + "shape":"TypeName", + "documentation":"

The name of the resource type.

" + }, + "ResourceDescription":{"shape":"ResourceDescription"} + } + }, + "GetResourceRequestStatusInput":{ + "type":"structure", + "required":["RequestToken"], + "members":{ + "RequestToken":{ + "shape":"RequestToken", + "documentation":"

A unique token used to track the progress of the resource operation request.

Request tokens are included in the ProgressEvent type returned by a resource operation request.

" + } + } + }, + "GetResourceRequestStatusOutput":{ + "type":"structure", + "members":{ + "ProgressEvent":{ + "shape":"ProgressEvent", + "documentation":"

Represents the current status of the resource operation request.

" + } + } + }, + "HandlerErrorCode":{ + "type":"string", + "enum":[ + "NotUpdatable", + "InvalidRequest", + "AccessDenied", + "InvalidCredentials", + "AlreadyExists", + "NotFound", + "ResourceConflict", + "Throttling", + "ServiceLimitExceeded", + "NotStabilized", + "GeneralServiceException", + "ServiceInternalError", + "ServiceTimeout", + "NetworkFailure", + "InternalFailure" + ] + }, + "HandlerFailureException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource handler has failed without a returning a more specific error code. This can include timeouts.

", + "exception":true + }, + "HandlerInternalFailureException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource handler has returned that an unexpected error occurred within the resource handler.

", + "exception":true + }, + "HandlerNextToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":".+" + }, + "Identifier":{ + "type":"string", + "max":1024, + "min":1, + "pattern":".+" + }, + "InvalidCredentialsException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource handler has returned that the credentials provided by the user are invalid.

", + "exception":true + }, + "InvalidRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource handler has returned that invalid input from the user has generated a generic exception.

", + "exception":true + }, + "ListResourceRequestsInput":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

The default is 20.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

" + }, + "ResourceRequestStatusFilter":{ + "shape":"ResourceRequestStatusFilter", + "documentation":"

The filter criteria to apply to the requests returned.

" + } + } + }, + "ListResourceRequestsOutput":{ + "type":"structure", + "members":{ + "ResourceRequestStatusSummaries":{ + "shape":"ResourceRequestStatusSummaries", + "documentation":"

The requests that match the specified filter criteria.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the request doesn't return all of the remaining results, NextToken is set to a token. To retrieve the next set of results, call ListResources again and assign that token to the request object's NextToken parameter. If the request returns all results, NextToken is set to null.

" + } + } + }, + "ListResourcesInput":{ + "type":"structure", + "required":["TypeName"], + "members":{ + "TypeName":{ + "shape":"TypeName", + "documentation":"

The name of the resource type.

" + }, + "TypeVersionId":{ + "shape":"TypeVersionId", + "documentation":"

For private resource types, the type version to use in this resource operation. If you do not specify a resource version, CloudFormation uses the default version.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) for Cloud Control API to use when performing this resource operation. The role specified must have the permissions required for this operation. The necessary permissions for each event handler are defined in the handlers section of the resource type definition schema.

If you do not specify a role, Cloud Control API uses a temporary session created using your Amazon Web Services user credentials.

For more information, see Specifying credentials in the Amazon Web Services Cloud Control API User Guide.

" + }, + "NextToken":{ + "shape":"HandlerNextToken", + "documentation":"

If the previous paginated request didn't return all of the remaining results, the response object's NextToken parameter value is set to a token. To retrieve the next set of results, call this action again and assign that token to the request object's NextToken parameter. If there are no remaining results, the previous response object's NextToken parameter is set to null.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to be returned with a single call. If the number of available results exceeds this maximum, the response includes a NextToken value that you can assign to the NextToken request parameter to get the next set of results.

The default is 20.

" + }, + "ResourceModel":{ + "shape":"Properties", + "documentation":"

The resource model to use to select the resources to return.

" + } + } + }, + "ListResourcesOutput":{ + "type":"structure", + "members":{ + "TypeName":{ + "shape":"TypeName", + "documentation":"

The name of the resource type.

" + }, + "ResourceDescriptions":{ + "shape":"ResourceDescriptions", + "documentation":"

Information about the specified resources, including primary identifier and resource model.

" + }, + "NextToken":{ + "shape":"HandlerNextToken", + "documentation":"

If the request doesn't return all of the remaining results, NextToken is set to a token. To retrieve the next set of results, call ListResources again and assign that token to the request object's NextToken parameter. If the request returns all results, NextToken is set to null.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "NetworkFailureException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource handler has returned that the request could not be completed due to networking issues, such as a failure to receive a response from the server.

", + "exception":true + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"[-A-Za-z0-9+/=]+" + }, + "NotStabilizedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource handler has returned that the downstream resource failed to complete all of its ready-state checks.

", + "exception":true + }, + "NotUpdatableException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

One or more properties included in this resource operation are defined as create-only, and therefore cannot be updated.

", + "exception":true + }, + "Operation":{ + "type":"string", + "enum":[ + "CREATE", + "DELETE", + "UPDATE" + ] + }, + "OperationStatus":{ + "type":"string", + "enum":[ + "PENDING", + "IN_PROGRESS", + "SUCCESS", + "FAILED", + "CANCEL_IN_PROGRESS", + "CANCEL_COMPLETE" + ] + }, + "OperationStatuses":{ + "type":"list", + "member":{"shape":"OperationStatus"} + }, + "Operations":{ + "type":"list", + "member":{"shape":"Operation"} + }, + "PatchDocument":{ + "type":"string", + "max":65536, + "min":1, + "pattern":"(.|\\s)*", + "sensitive":true + }, + "PrivateTypeException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Cloud Control API has not received a valid response from the resource handler, due to a configuration error. This includes issues such as the resource handler returning an invalid response, or timing out.

", + "exception":true + }, + "ProgressEvent":{ + "type":"structure", + "members":{ + "TypeName":{ + "shape":"TypeName", + "documentation":"

The name of the resource type used in the operation.

" + }, + "Identifier":{ + "shape":"Identifier", + "documentation":"

The primary identifier for the resource.

In some cases, the resource identifier may be available before the resource operation has reached a status of SUCCESS.

" + }, + "RequestToken":{ + "shape":"RequestToken", + "documentation":"

The unique token representing this resource operation request.

Use the RequestToken with GetResourceRequestStatus to return the current status of a resource operation request.

" + }, + "Operation":{ + "shape":"Operation", + "documentation":"

The resource operation type.

" + }, + "OperationStatus":{ + "shape":"OperationStatus", + "documentation":"

The current status of the resource operation request.

  • PENDING: The resource operation has not yet started.

  • IN_PROGRESS: The resource operation is currently in progress.

  • SUCCESS: The resource operation has successfully completed.

  • FAILED: The resource operation has failed. Refer to the error code and status message for more information.

  • CANCEL_IN_PROGRESS: The resource operation is in the process of being canceled.

  • CANCEL_COMPLETE: The resource operation has been canceled.

" + }, + "EventTime":{ + "shape":"Timestamp", + "documentation":"

When the resource operation request was initiated.

" + }, + "ResourceModel":{ + "shape":"Properties", + "documentation":"

A JSON string containing the resource model, consisting of each resource property and its current value.

" + }, + "StatusMessage":{ + "shape":"StatusMessage", + "documentation":"

Any message explaining the current status.

" + }, + "ErrorCode":{ + "shape":"HandlerErrorCode", + "documentation":"

For requests with a status of FAILED, the associated error code.

For error code definitions, see Handler error codes in the CloudFormation Command Line Interface User Guide for Extension Development.

" + }, + "RetryAfter":{ + "shape":"Timestamp", + "documentation":"

When to next request the status of this resource operation request.

" + } + }, + "documentation":"

Represents the current status of a resource operation request. For more information, see Managing resource operation requests in the Amazon Web Services Cloud Control API User Guide.

" + }, + "Properties":{ + "type":"string", + "max":16384, + "min":1, + "pattern":"(.|\\s)*", + "sensitive":true + }, + "RequestToken":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[-A-Za-z0-9+/=]+" + }, + "RequestTokenNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

A resource operation with the specified request token cannot be found.

", + "exception":true + }, + "ResourceConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource is temporarily unavailable to be acted upon. For example, if the resource is currently undergoing an operation and cannot be acted upon until that operation is finished.

", + "exception":true + }, + "ResourceDescription":{ + "type":"structure", + "members":{ + "Identifier":{ + "shape":"Identifier", + "documentation":"

The primary identifier for the resource.

For more information, see Identifying resources in the Amazon Web Services Cloud Control API User Guide.

" + }, + "Properties":{ + "shape":"Properties", + "documentation":"

A list of the resource properties and their current values.

" + } + }, + "documentation":"

Represents information about a provisioned resource.

" + }, + "ResourceDescriptions":{ + "type":"list", + "member":{"shape":"ResourceDescription"} + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

A resource with the specified identifier cannot be found.

", + "exception":true + }, + "ResourceRequestStatusFilter":{ + "type":"structure", + "members":{ + "Operations":{ + "shape":"Operations", + "documentation":"

The operation types to include in the filter.

" + }, + "OperationStatuses":{ + "shape":"OperationStatuses", + "documentation":"

The operation statuses to include in the filter.

  • PENDING: The operation has been requested, but not yet initiated.

  • IN_PROGRESS: The operation is currently in progress.

  • SUCCESS: The operation has successfully completed.

  • FAILED: The operation has failed.

  • CANCEL_IN_PROGRESS: The operation is currently in the process of being canceled.

  • CANCEL_COMPLETE: The operation has been canceled.

" + } + }, + "documentation":"

The filter criteria to use in determining the requests returned.

" + }, + "ResourceRequestStatusSummaries":{ + "type":"list", + "member":{"shape":"ProgressEvent"} + }, + "RoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"arn:.+:iam::[0-9]{12}:role/.+" + }, + "ServiceInternalErrorException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource handler has returned that the downstream service returned an internal error, typically with a 5XX HTTP status code.

", + "exception":true + }, + "ServiceLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The resource handler has returned that a non-transient resource limit was reached on the service side.

", + "exception":true + }, + "StatusMessage":{ + "type":"string", + "max":1024, + "pattern":"(.|\\s)*" + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The request was denied due to request throttling.

", + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "TypeName":{ + "type":"string", + "max":196, + "min":10, + "pattern":"[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}::[A-Za-z0-9]{2,64}" + }, + "TypeNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The specified extension does not exist in the CloudFormation registry.

", + "exception":true + }, + "TypeVersionId":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[A-Za-z0-9-]+" + }, + "UnsupportedActionException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The specified resource does not support this resource operation.

", + "exception":true + }, + "UpdateResourceInput":{ + "type":"structure", + "required":[ + "TypeName", + "Identifier", + "PatchDocument" + ], + "members":{ + "TypeName":{ + "shape":"TypeName", + "documentation":"

The name of the resource type.

" + }, + "TypeVersionId":{ + "shape":"TypeVersionId", + "documentation":"

For private resource types, the type version to use in this resource operation. If you do not specify a resource version, CloudFormation uses the default version.

" + }, + "RoleArn":{ + "shape":"RoleArn", + "documentation":"

The Amazon Resource Name (ARN) of the Identity and Access Management (IAM) for Cloud Control API to use when performing this resource operation. The role specified must have the permissions required for this operation. The necessary permissions for each event handler are defined in the handlers section of the resource type definition schema.

If you do not specify a role, Cloud Control API uses a temporary session created using your Amazon Web Services user credentials.

For more information, see Specifying credentials in the Amazon Web Services Cloud Control API User Guide.

" + }, + "ClientToken":{ + "shape":"ClientToken", + "documentation":"

A unique identifier to ensure the idempotency of the resource request. As a best practice, specify this token to ensure idempotency, so that Amazon Web Services Cloud Control API can accurately distinguish between request retries and new resource requests. You might retry a resource request to ensure that it was successfully received.

A client token is valid for 36 hours once used. After that, a resource request with the same client token is treated as a new request.

If you do not specify a client token, one is generated for inclusion in the request.

For more information, see Ensuring resource operation requests are unique in the Amazon Web Services Cloud Control API User Guide.

", + "idempotencyToken":true + }, + "Identifier":{ + "shape":"Identifier", + "documentation":"

The identifier for the resource.

You can specify the primary identifier, or any secondary identifier defined for the resource type in its resource schema. You can only specify one identifier. Primary identifiers can be specified as a string or JSON; secondary identifiers must be specified as JSON.

For compound primary identifiers (that is, one that consists of multiple resource properties strung together), to specify the primary identifier as a string, list the property values in the order they are specified in the primary identifier definition, separated by |.

For more information, see Identifying resources in the Amazon Web Services Cloud Control API User Guide.

" + }, + "PatchDocument":{ + "shape":"PatchDocument", + "documentation":"

A JavaScript Object Notation (JSON) document listing the patch operations that represent the updates to apply to the current resource properties. For details, see Composing the patch document in the Amazon Web Services Cloud Control API User Guide.

" + } + } + }, + "UpdateResourceOutput":{ + "type":"structure", + "members":{ + "ProgressEvent":{ + "shape":"ProgressEvent", + "documentation":"

Represents the current status of the resource update request.

Use the RequestToken of the ProgressEvent with GetResourceRequestStatus to return the current status of a resource operation request.

" + } + } + } + }, + "documentation":"

Use Amazon Web Services Cloud Control API to create, read, update, delete, and list (CRUD-L) your cloud resources that belong to a wide range of services--both Amazon Web Services and third-party. With the Cloud Control API standardized set of application programming interfaces (APIs), you can perform CRUD-L operations on any supported resources in your Amazon Web Services account. Using Cloud Control API, you won't have to generate code or scripts specific to each individual service responsible for those resources.

For more information about Amazon Web Services Cloud Control API, see the Amazon Web Services Cloud Control API User Guide.

" +} diff --git a/botocore/data/cloudcontrol/2021-09-30/waiters-2.json b/botocore/data/cloudcontrol/2021-09-30/waiters-2.json new file mode 100644 index 00000000..be633a92 --- /dev/null +++ b/botocore/data/cloudcontrol/2021-09-30/waiters-2.json @@ -0,0 +1,31 @@ +{ + "version": 2, + "waiters": { + "ResourceRequestSuccess": { + "description": "Wait until resource operation request is successful", + "operation": "GetResourceRequestStatus", + "delay": 5, + "maxAttempts": 720, + "acceptors": [ + { + "state": "success", + "matcher": "path", + "argument": "ProgressEvent.OperationStatus", + "expected": "SUCCESS" + }, + { + "state": "failure", + "matcher": "path", + "argument": "ProgressEvent.OperationStatus", + "expected": "FAILED" + }, + { + "state": "failure", + "matcher": "path", + "argument": "ProgressEvent.OperationStatus", + "expected": "CANCEL_COMPLETE" + } + ] + } + } +} \ No newline at end of file diff --git a/botocore/data/connect/2017-08-08/service-2.json b/botocore/data/connect/2017-08-08/service-2.json index a78fedb0..d3561eb7 100644 --- a/botocore/data/connect/2017-08-08/service-2.json +++ b/botocore/data/connect/2017-08-08/service-2.json @@ -245,7 +245,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Create an AppIntegration association with an Amazon Connect instance.

" + "documentation":"

Creates an AWS resource association with an Amazon Connect instance.

" }, "CreateQueue":{ "name":"CreateQueue", @@ -319,7 +319,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Creates a use case for an AppIntegration association.

" + "documentation":"

Creates a use case for an integration association.

" }, "CreateUser":{ "name":"CreateUser", @@ -402,7 +402,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes an AppIntegration association from an Amazon Connect instance. The association must not have any use cases associated with it.

" + "documentation":"

Deletes an AWS resource association from an Amazon Connect instance. The association must not have any use cases associated with it.

" }, "DeleteQuickConnect":{ "name":"DeleteQuickConnect", @@ -433,7 +433,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Deletes a use case from an AppIntegration association.

" + "documentation":"

Deletes a use case from an integration association.

" }, "DeleteUser":{ "name":"DeleteUser", @@ -1011,7 +1011,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Provides summary information about the AppIntegration associations for the specified Amazon Connect instance.

" + "documentation":"

Provides summary information about the AWS resource associations for the specified Amazon Connect instance.

" }, "ListLambdaFunctions":{ "name":"ListLambdaFunctions", @@ -1231,7 +1231,7 @@ {"shape":"InvalidRequestException"}, {"shape":"ThrottlingException"} ], - "documentation":"

Lists the use cases.

" + "documentation":"

Lists the use cases for the integration association.

" }, "ListUserHierarchyGroups":{ "name":"ListUserHierarchyGroups", @@ -1332,7 +1332,7 @@ {"shape":"DestinationNotAllowedException"}, {"shape":"OutboundContactNotPermittedException"} ], - "documentation":"

Places an outbound call to a contact, and then initiates the contact flow. It performs the actions in the contact flow that's specified (in ContactFlowId).

Agents do not initiate the outbound API, which means that they do not dial the contact. If the contact flow places an outbound call to a contact, and then puts the contact in queue, the call is then routed to the agent, like any other inbound case.

There is a 60-second dialing timeout for this operation. If the call is not connected after 60 seconds, it fails.

UK numbers with a 447 prefix are not allowed by default. Before you can dial these UK mobile numbers, you must submit a service quota increase request. For more information, see Amazon Connect Service Quotas in the Amazon Connect Administrator Guide.

" + "documentation":"

Places an outbound call to a contact, and then initiates the contact flow. It performs the actions in the contact flow that's specified (in ContactFlowId).

Agents do not initiate the outbound API, which means that they do not dial the contact. If the contact flow places an outbound call to a contact, and then puts the contact in queue, the call is then routed to the agent, like any other inbound case.

There is a 60-second dialing timeout for this operation. If the call is not connected after 60 seconds, it fails.

UK numbers with a 447 prefix are not allowed by default. Before you can dial these UK mobile numbers, you must submit a service quota increase request. For more information, see Amazon Connect Service Quotas in the Amazon Connect Administrator Guide.

Campaign calls are not allowed by default. Before you can make a call with TrafficType = CAMPAIGN, you must submit a service quota increase request. For more information, see Amazon Connect Service Quotas in the Amazon Connect Administrator Guide.

" }, "StartTaskContact":{ "name":"StartTaskContact", @@ -1967,6 +1967,20 @@ "max":100, "min":1 }, + "AnswerMachineDetectionConfig":{ + "type":"structure", + "members":{ + "EnableAnswerMachineDetection":{ + "shape":"Boolean", + "documentation":"

The flag to indicate if answer machine detection analysis needs to be performed for a voice call. If set to true, TrafficType must be set as CAMPAIGN.

" + }, + "AwaitAnswerMachinePrompt":{ + "shape":"Boolean", + "documentation":"

Wait for the answering machine prompt.

" + } + }, + "documentation":"

Configuration of the answering machine detection.

" + }, "AssociateApprovedOriginRequest":{ "type":"structure", "required":[ @@ -2203,6 +2217,11 @@ "max":128, "min":1 }, + "CampaignId":{ + "type":"string", + "max":100, + "min":1 + }, "Channel":{ "type":"string", "enum":[ @@ -2584,10 +2603,7 @@ "required":[ "InstanceId", "IntegrationType", - "IntegrationArn", - "SourceApplicationUrl", - "SourceApplicationName", - "SourceType" + "IntegrationArn" ], "members":{ "InstanceId":{ @@ -2606,15 +2622,15 @@ }, "SourceApplicationUrl":{ "shape":"URI", - "documentation":"

The URL for the external application.

" + "documentation":"

The URL for the external application. This field is only required for the EVENT integration type.

" }, "SourceApplicationName":{ "shape":"SourceApplicationName", - "documentation":"

The name of the external application.

" + "documentation":"

The name of the external application. This field is only required for the EVENT integration type.

" }, "SourceType":{ "shape":"SourceType", - "documentation":"

The type of the data source.

" + "documentation":"

The type of the data source. This field is only required for the EVENT integration type.

" }, "Tags":{ "shape":"TagMap", @@ -2627,7 +2643,7 @@ "members":{ "IntegrationAssociationId":{ "shape":"IntegrationAssociationId", - "documentation":"

The identifier for the association.

" + "documentation":"

The identifier for the integration association.

" }, "IntegrationAssociationArn":{ "shape":"ARN", @@ -2809,13 +2825,13 @@ }, "IntegrationAssociationId":{ "shape":"IntegrationAssociationId", - "documentation":"

The identifier for the AppIntegration association.

", + "documentation":"

The identifier for the integration association.

", "location":"uri", "locationName":"IntegrationAssociationId" }, "UseCaseType":{ "shape":"UseCaseType", - "documentation":"

The type of use case to associate to the AppIntegration association. Each AppIntegration association can have only one of each use case type.

" + "documentation":"

The type of use case to associate to the integration association. Each integration association can have only one of each use case type.

" }, "Tags":{ "shape":"TagMap", @@ -3088,7 +3104,7 @@ }, "IntegrationAssociationId":{ "shape":"IntegrationAssociationId", - "documentation":"

The identifier for the AppIntegration association.

", + "documentation":"

The identifier for the integration association.

", "location":"uri", "locationName":"IntegrationAssociationId" } @@ -3131,7 +3147,7 @@ }, "IntegrationAssociationId":{ "shape":"IntegrationAssociationId", - "documentation":"

The identifier for the AppIntegration association.

", + "documentation":"

The identifier for the integration association.

", "location":"uri", "locationName":"IntegrationAssociationId" }, @@ -3803,7 +3819,7 @@ }, "KeyId":{ "shape":"KeyId", - "documentation":"

The identifier of the encryption key.

" + "documentation":"

The full ARN of the encryption key.

Be sure to provide the full ARN of the encryption key, not just the ID.

" } }, "documentation":"

The encryption configuration.

" @@ -3889,7 +3905,7 @@ }, "CurrentMetrics":{ "shape":"CurrentMetrics", - "documentation":"

The metrics to retrieve. Specify the name and unit for each metric. The following metrics are available. For a description of all the metrics, see Real-time Metrics Definitions in the Amazon Connect Administrator Guide.

AGENTS_AFTER_CONTACT_WORK

Unit: COUNT

Name in real-time metrics report: ACW

AGENTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Available

AGENTS_ERROR

Unit: COUNT

Name in real-time metrics report: Error

AGENTS_NON_PRODUCTIVE

Unit: COUNT

Name in real-time metrics report: NPT (Non-Productive Time)

AGENTS_ON_CALL

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ON_CONTACT

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ONLINE

Unit: COUNT

Name in real-time metrics report: Online

AGENTS_STAFFED

Unit: COUNT

Name in real-time metrics report: Staffed

CONTACTS_IN_QUEUE

Unit: COUNT

Name in real-time metrics report: In queue

CONTACTS_SCHEDULED

Unit: COUNT

Name in real-time metrics report: Scheduled

OLDEST_CONTACT_AGE

Unit: SECONDS

When you use groupings, Unit says SECONDS but the Value is returned in MILLISECONDS. For example, if you get a response like this:

{ \"Metric\": { \"Name\": \"OLDEST_CONTACT_AGE\", \"Unit\": \"SECONDS\" }, \"Value\": 24113.0 }

The actual OLDEST_CONTACT_AGE is 24 seconds.

Name in real-time metrics report: Oldest

SLOTS_ACTIVE

Unit: COUNT

Name in real-time metrics report: Active

SLOTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Availability

" + "documentation":"

The metrics to retrieve. Specify the name and unit for each metric. The following metrics are available. For a description of all the metrics, see Real-time Metrics Definitions in the Amazon Connect Administrator Guide.

AGENTS_AFTER_CONTACT_WORK

Unit: COUNT

Name in real-time metrics report: ACW

AGENTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Available

AGENTS_ERROR

Unit: COUNT

Name in real-time metrics report: Error

AGENTS_NON_PRODUCTIVE

Unit: COUNT

Name in real-time metrics report: NPT (Non-Productive Time)

AGENTS_ON_CALL

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ON_CONTACT

Unit: COUNT

Name in real-time metrics report: On contact

AGENTS_ONLINE

Unit: COUNT

Name in real-time metrics report: Online

AGENTS_STAFFED

Unit: COUNT

Name in real-time metrics report: Staffed

CONTACTS_IN_QUEUE

Unit: COUNT

Name in real-time metrics report: In queue

CONTACTS_SCHEDULED

Unit: COUNT

Name in real-time metrics report: Scheduled

OLDEST_CONTACT_AGE

Unit: SECONDS

When you use groupings, Unit says SECONDS and the Value is returned in SECONDS.

When you do not use groupings, Unit says SECONDS but the Value is returned in MILLISECONDS. For example, if you get a response like this:

{ \"Metric\": { \"Name\": \"OLDEST_CONTACT_AGE\", \"Unit\": \"SECONDS\" }, \"Value\": 24113.0 }

The actual OLDEST_CONTACT_AGE is 24 seconds.

Name in real-time metrics report: Oldest

SLOTS_ACTIVE

Unit: COUNT

Name in real-time metrics report: Active

SLOTS_AVAILABLE

Unit: COUNT

Name in real-time metrics report: Availability

" }, "NextToken":{ "shape":"NextToken", @@ -4628,7 +4644,13 @@ }, "IntegrationType":{ "type":"string", - "enum":["EVENT"] + "enum":[ + "EVENT", + "VOICE_ID", + "PINPOINT_APP", + "WISDOM_ASSISTANT", + "WISDOM_KNOWLEDGE_BASE" + ] }, "InternalServiceException":{ "type":"structure", @@ -5129,6 +5151,12 @@ "location":"uri", "locationName":"InstanceId" }, + "IntegrationType":{ + "shape":"IntegrationType", + "documentation":"

", + "location":"querystring", + "locationName":"integrationType" + }, "NextToken":{ "shape":"NextToken", "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", @@ -5149,7 +5177,7 @@ "members":{ "IntegrationAssociationSummaryList":{ "shape":"IntegrationAssociationSummaryList", - "documentation":"

The AppIntegration associations.

" + "documentation":"

The associations.

" }, "NextToken":{ "shape":"NextToken", @@ -5669,7 +5697,7 @@ "locationName":"maxResults" } }, - "documentation":"

Provides summary information about the use cases for the specified Amazon Connect AppIntegration association.

" + "documentation":"

Provides summary information about the use cases for the specified integration association.

" }, "ListUseCasesResponse":{ "type":"structure", @@ -7015,6 +7043,18 @@ "Attributes":{ "shape":"Attributes", "documentation":"

A custom key-value pair using an attribute map. The attributes are standard Amazon Connect attributes, and can be accessed in contact flows just like any other contact attributes.

There can be up to 32,768 UTF-8 bytes across all key-value pairs per contact. Attribute keys can include only alphanumeric, dash, and underscore characters.

" + }, + "AnswerMachineDetectionConfig":{ + "shape":"AnswerMachineDetectionConfig", + "documentation":"

Configuration of the answering machine detection for this outbound call.

" + }, + "CampaignId":{ + "shape":"CampaignId", + "documentation":"

The campaign identifier of the outbound communication.

" + }, + "TrafficType":{ + "shape":"TrafficType", + "documentation":"

Denotes the class of traffic. Calls with different traffic types are handled differently by Amazon Connect. The default value is GENERAL. Use CAMPAIGN if EnableAnswerMachineDetection is set to true. For all other cases, use GENERAL.

" } } }, @@ -7242,6 +7282,13 @@ }, "TimeZone":{"type":"string"}, "Timestamp":{"type":"timestamp"}, + "TrafficType":{ + "type":"string", + "enum":[ + "GENERAL", + "CAMPAIGN" + ] + }, "URI":{ "type":"string", "max":2000, @@ -7463,7 +7510,7 @@ }, "AttributeType":{ "shape":"InstanceAttributeType", - "documentation":"

The type of attribute.

", + "documentation":"

The type of attribute.

Only allowlisted customers can consume USE_CUSTOM_TTS_VOICES. To access this feature, contact AWS Support for allowlisting.

", "location":"uri", "locationName":"AttributeType" }, @@ -7990,7 +8037,7 @@ }, "UseCaseType":{ "shape":"UseCaseType", - "documentation":"

The type of use case to associate to the AppIntegration association. Each AppIntegration association can have only one of each use case type.

" + "documentation":"

The type of use case to associate to the integration association. Each integration association can have only one of each use case type.

" } }, "documentation":"

Contains the use case.

" @@ -8006,7 +8053,10 @@ }, "UseCaseType":{ "type":"string", - "enum":["RULES_EVALUATION"] + "enum":[ + "RULES_EVALUATION", + "CONNECT_CAMPAIGNS" + ] }, "User":{ "type":"structure", diff --git a/botocore/data/dataexchange/2017-07-25/paginators-1.json b/botocore/data/dataexchange/2017-07-25/paginators-1.json index 4e42083b..1f3ae6a0 100644 --- a/botocore/data/dataexchange/2017-07-25/paginators-1.json +++ b/botocore/data/dataexchange/2017-07-25/paginators-1.json @@ -23,6 +23,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Assets" + }, + "ListEventActions": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "EventActions" } } } diff --git a/botocore/data/dataexchange/2017-07-25/service-2.json b/botocore/data/dataexchange/2017-07-25/service-2.json index b62c05d2..e0199932 100644 --- a/botocore/data/dataexchange/2017-07-25/service-2.json +++ b/botocore/data/dataexchange/2017-07-25/service-2.json @@ -83,6 +83,44 @@ ], "documentation": "

This operation creates a data set.

" }, + "CreateEventAction": { + "name": "CreateEventAction", + "http": { + "method": "POST", + "requestUri": "/v1/event-actions", + "responseCode": 201 + }, + "input": { + "shape": "CreateEventActionRequest" + }, + "output": { + "shape": "CreateEventActionResponse", + "documentation": "

201 response

" + }, + "errors": [ + { + "shape": "ThrottlingException", + "documentation": "

429 response

" + }, + { + "shape": "ValidationException", + "documentation": "

400 response

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response

" + }, + { + "shape": "ServiceLimitExceededException", + "documentation": "

402 response

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response

" + } + ], + "documentation": "

This operation creates an event action.

" + }, "CreateJob": { "name": "CreateJob", "http": { @@ -98,14 +136,6 @@ "documentation": "

201 response

" }, "errors": [ - { - "shape": "ResourceNotFoundException", - "documentation": "

404 response

" - }, - { - "shape": "ThrottlingException", - "documentation": "

429 response

" - }, { "shape": "ValidationException", "documentation": "

400 response

" @@ -117,6 +147,14 @@ { "shape": "AccessDeniedException", "documentation": "

403 response

" + }, + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response

" } ], "documentation": "

This operation creates a job.

" @@ -235,6 +273,36 @@ ], "documentation": "

This operation deletes a data set.

" }, + "DeleteEventAction": { + "name": "DeleteEventAction", + "http": { + "method": "DELETE", + "requestUri": "/v1/event-actions/{EventActionId}", + "responseCode": 204 + }, + "input": { + "shape": "DeleteEventActionRequest" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response

" + }, + { + "shape": "ValidationException", + "documentation": "

400 response

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response

" + } + ], + "documentation": "

This operation deletes the event action.

" + }, "DeleteRevision": { "name": "DeleteRevision", "http": { @@ -341,6 +409,40 @@ ], "documentation": "

This operation returns information about a data set.

" }, + "GetEventAction": { + "name": "GetEventAction", + "http": { + "method": "GET", + "requestUri": "/v1/event-actions/{EventActionId}", + "responseCode": 200 + }, + "input": { + "shape": "GetEventActionRequest" + }, + "output": { + "shape": "GetEventActionResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response

" + }, + { + "shape": "ValidationException", + "documentation": "

400 response

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response

" + } + ], + "documentation": "

This operation retrieves information about an event action.

" + }, "GetJob": { "name": "GetJob", "http": { @@ -477,6 +579,40 @@ ], "documentation": "

This operation lists your data sets. When listing by origin OWNED, results are sorted by CreatedAt in descending order. When listing by origin ENTITLED, there is no order and the maxResults parameter is ignored.

" }, + "ListEventActions": { + "name": "ListEventActions", + "http": { + "method": "GET", + "requestUri": "/v1/event-actions", + "responseCode": 200 + }, + "input": { + "shape": "ListEventActionsRequest" + }, + "output": { + "shape": "ListEventActionsResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response

" + }, + { + "shape": "ValidationException", + "documentation": "

400 response

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response

" + } + ], + "documentation": "

This operation lists your event actions.

" + }, "ListJobs": { "name": "ListJobs", "http": { @@ -710,6 +846,44 @@ ], "documentation": "

This operation updates a data set.

" }, + "UpdateEventAction": { + "name": "UpdateEventAction", + "http": { + "method": "PATCH", + "requestUri": "/v1/event-actions/{EventActionId}", + "responseCode": 200 + }, + "input": { + "shape": "UpdateEventActionRequest" + }, + "output": { + "shape": "UpdateEventActionResponse", + "documentation": "

200 response

" + }, + "errors": [ + { + "shape": "ResourceNotFoundException", + "documentation": "

404 response

" + }, + { + "shape": "ThrottlingException", + "documentation": "

429 response

" + }, + { + "shape": "ValidationException", + "documentation": "

400 response

" + }, + { + "shape": "InternalServerException", + "documentation": "

500 response

" + }, + { + "shape": "AccessDeniedException", + "documentation": "

403 response

" + } + ], + "documentation": "

This operation updates the event action.

" + }, "UpdateRevision": { "name": "UpdateRevision", "http": { @@ -771,6 +945,14 @@ "httpStatusCode": 403 } }, + "Action": { + "type": "structure", + "members": { + "ExportRevisionToS3": { + "shape": "AutoExportRevisionToS3RequestDetails" + } + } + }, "Arn": { "type": "string", "documentation": "

An Amazon Resource Name (ARN) that uniquely identifies an AWS resource.

" @@ -891,6 +1073,38 @@ "S3_SNAPSHOT" ] }, + "AutoExportRevisionDestinationEntry": { + "type": "structure", + "members": { + "Bucket": { + "shape": "__string", + "documentation": "

The S3 bucket that is the destination for the event action.

" + }, + "KeyPattern": { + "shape": "__string", + "documentation": "

A string representing the pattern for generated names of the individual assets in the revision. For more information about key patterns, see Key patterns when exporting revisions.

" + } + }, + "documentation": "

A revision destination is the Amazon S3 bucket folder destination to where the export will be sent.

", + "required": [ + "Bucket" + ] + }, + "AutoExportRevisionToS3RequestDetails": { + "type": "structure", + "members": { + "Encryption": { + "shape": "ExportServerSideEncryption" + }, + "RevisionDestination": { + "shape": "AutoExportRevisionDestinationEntry" + } + }, + "documentation": "

Details of the operation to be performed by the job.

", + "required": [ + "RevisionDestination" + ] + }, "CancelJobRequest": { "type": "structure", "members": { @@ -1018,6 +1232,53 @@ } } }, + "CreateEventActionRequest": { + "type": "structure", + "members": { + "Action": { + "shape": "Action", + "documentation": "

What occurs after a certain event.

" + }, + "Event": { + "shape": "Event", + "documentation": "

What occurs to start an action.

" + } + }, + "documentation": "

The request body for CreateEventAction.

", + "required": [ + "Action", + "Event" + ] + }, + "CreateEventActionResponse": { + "type": "structure", + "members": { + "Action": { + "shape": "Action", + "documentation": "

What occurs after a certain event.

" + }, + "Arn": { + "shape": "Arn", + "documentation": "

The ARN for the event action.

" + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

The date and time that the event action was created, in ISO 8601 format.

" + }, + "Event": { + "shape": "Event", + "documentation": "

What occurs to start an action.

" + }, + "Id": { + "shape": "Id", + "documentation": "

The unique identifier for the event action.

" + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

The date and time that the event action was last updated, in ISO 8601 format.

" + } + } + }, "CreateJobRequest": { "type": "structure", "members": { @@ -1235,6 +1496,20 @@ "DataSetId" ] }, + "DeleteEventActionRequest": { + "type": "structure", + "members": { + "EventActionId": { + "shape": "__string", + "location": "uri", + "locationName": "EventActionId", + "documentation": "

The unique identifier for the event action.

" + } + }, + "required": [ + "EventActionId" + ] + }, "DeleteRevisionRequest": { "type": "structure", "members": { @@ -1271,6 +1546,59 @@ } } }, + "Event": { + "type": "structure", + "members": { + "RevisionPublished": { + "shape": "RevisionPublished" + } + } + }, + "EventActionEntry": { + "type": "structure", + "members": { + "Action": { + "shape": "Action", + "documentation": "

What occurs after a certain event.

" + }, + "Arn": { + "shape": "Arn", + "documentation": "

The ARN for the event action.

" + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

The date and time that the event action was created, in ISO 8601 format.

" + }, + "Event": { + "shape": "Event", + "documentation": "

What occurs to start an action.

" + }, + "Id": { + "shape": "Id", + "documentation": "

The unique identifier for the event action.

" + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

The date and time that the event action was last updated, in ISO 8601 format.

" + } + }, + "documentation": "

An event action is an object that defines the relationship between a specific event and an automated action that will be taken on behalf of the customer.

", + "required": [ + "Action", + "CreatedAt", + "Event", + "Id", + "Arn", + "UpdatedAt" + ] + }, + "ExceptionCause": { + "type": "string", + "enum": [ + "InsufficientS3BucketPolicy", + "S3AccessDenied" + ] + }, "ExportAssetToSignedUrlRequestDetails": { "type": "structure", "members": { @@ -1428,14 +1756,14 @@ "members": { "KmsKeyArn": { "shape": "__string", - "documentation": "

The Amazon Resource Name (ARN) of the the AWS KMS key you want to use to encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms as an encryption type.

" + "documentation": "

The Amazon Resource Name (ARN) of the AWS KMS key you want to use to encrypt the Amazon S3 objects. This parameter is required if you choose aws:kms as an encryption type.

" }, "Type": { "shape": "ServerSideEncryptionTypes", "documentation": "

The type of server side encryption used for encrypting the objects in Amazon S3.

" } }, - "documentation": "

Encryption configuration of the export job. Includes the encryption type as well as the AWS KMS key. The KMS key is only necessary if you chose the KMS encryption type.

", + "documentation": "

Encryption configuration of the export job. Includes the encryption type in addition to the AWS KMS key. The KMS key is only necessary if you chose the KMS encryption. type.

", "required": [ "Type" ] @@ -1576,6 +1904,49 @@ } } }, + "GetEventActionRequest": { + "type": "structure", + "members": { + "EventActionId": { + "shape": "__string", + "location": "uri", + "locationName": "EventActionId", + "documentation": "

The unique identifier for the event action.

" + } + }, + "required": [ + "EventActionId" + ] + }, + "GetEventActionResponse": { + "type": "structure", + "members": { + "Action": { + "shape": "Action", + "documentation": "

What occurs after a certain event.

" + }, + "Arn": { + "shape": "Arn", + "documentation": "

The ARN for the event action.

" + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

The date and time that the event action was created, in ISO 8601 format.

" + }, + "Event": { + "shape": "Event", + "documentation": "

What occurs to start an action.

" + }, + "Id": { + "shape": "Id", + "documentation": "

The unique identifier for the event action.

" + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

The date and time that the event action was last updated, in ISO 8601 format.

" + } + } + }, "GetJobRequest": { "type": "structure", "members": { @@ -1728,8 +2099,8 @@ "required": [ "DataSetId", "Md5Hash", - "RevisionId", - "AssetName" + "AssetName", + "RevisionId" ] }, "ImportAssetFromSignedUrlResponseDetails": { @@ -1927,7 +2298,8 @@ "documentation": "The types of resource which the job error can apply to.", "enum": [ "REVISION", - "ASSET" + "ASSET", + "DATA_SET" ] }, "LimitName": { @@ -1944,7 +2316,10 @@ "Concurrent in progress jobs to import assets from Amazon S3", "Concurrent in progress jobs to import assets from a signed URL", "Concurrent in progress jobs to export assets to Amazon S3", - "Concurrent in progress jobs to export assets to a signed URL" + "Concurrent in progress jobs to export assets to a signed URL", + "Concurrent in progress jobs to export revisions to Amazon S3", + "Event actions per account", + "Auto export event actions per data set" ] }, "ListDataSetRevisionsRequest": { @@ -2022,6 +2397,42 @@ } } }, + "ListEventActionsRequest": { + "type": "structure", + "members": { + "EventSourceId": { + "shape": "__string", + "location": "querystring", + "locationName": "eventSourceId", + "documentation": "

The unique identifier for the event source.

" + }, + "MaxResults": { + "shape": "MaxResults", + "location": "querystring", + "locationName": "maxResults", + "documentation": "

The maximum number of results returned by a single call.

" + }, + "NextToken": { + "shape": "__string", + "location": "querystring", + "locationName": "nextToken", + "documentation": "

The token value retrieved from a previous call to access the next page of results.

" + } + } + }, + "ListEventActionsResponse": { + "type": "structure", + "members": { + "EventActions": { + "shape": "ListOfEventActionEntry", + "documentation": "

The event action objects listed by the request.

" + }, + "NextToken": { + "shape": "NextToken", + "documentation": "

The token value retrieved from a previous call to access the next page of results.

" + } + } + }, "ListJobsRequest": { "type": "structure", "members": { @@ -2244,7 +2655,8 @@ "DATA_SET", "REVISION", "ASSET", - "JOB" + "JOB", + "EVENT_ACTION" ] }, "ResponseDetails": { @@ -2340,6 +2752,17 @@ "UpdatedAt" ] }, + "RevisionPublished": { + "type": "structure", + "members": { + "DataSetId": { + "shape": "Id" + } + }, + "required": [ + "DataSetId" + ] + }, "S3SnapshotAsset": { "type": "structure", "members": { @@ -2636,6 +3059,54 @@ } } }, + "UpdateEventActionRequest": { + "type": "structure", + "members": { + "Action": { + "shape": "Action", + "documentation": "

What occurs after a certain event.

" + }, + "EventActionId": { + "shape": "__string", + "location": "uri", + "locationName": "EventActionId", + "documentation": "

The unique identifier for the event action.

" + } + }, + "documentation": "

The request body for UpdateEventAction.

", + "required": [ + "EventActionId" + ] + }, + "UpdateEventActionResponse": { + "type": "structure", + "members": { + "Action": { + "shape": "Action", + "documentation": "

What occurs after a certain event.

" + }, + "Arn": { + "shape": "Arn", + "documentation": "

The ARN for the event action.

" + }, + "CreatedAt": { + "shape": "Timestamp", + "documentation": "

The date and time that the event action was created, in ISO 8601 format.

" + }, + "Event": { + "shape": "Event", + "documentation": "

What occurs to start an action.

" + }, + "Id": { + "shape": "Id", + "documentation": "

The unique identifier for the event action.

" + }, + "UpdatedAt": { + "shape": "Timestamp", + "documentation": "

The date and time that the event action was last updated, in ISO 8601 format.

" + } + } + }, "UpdateRevisionRequest": { "type": "structure", "members": { @@ -2709,6 +3180,10 @@ "Message": { "shape": "__string", "documentation": "

The message that informs you about what was invalid about the request.

" + }, + "ExceptionCause": { + "shape": "ExceptionCause", + "documentation": "

The message that informs you about what the exception was.

" } }, "documentation": "

The request was invalid.

", @@ -2741,6 +3216,12 @@ "shape": "DataSetEntry" } }, + "ListOfEventActionEntry": { + "type": "list", + "member": { + "shape": "EventActionEntry" + } + }, "ListOfJobEntry": { "type": "list", "member": { diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index b69e133a..1585d725 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -3800,7 +3800,7 @@ }, "input":{"shape":"ModifyInstancePlacementRequest"}, "output":{"shape":"ModifyInstancePlacementResult"}, - "documentation":"

Modifies the placement attributes for a specified instance. You can do the following:

  • Modify the affinity between an instance and a Dedicated Host. When affinity is set to host and the instance is not associated with a specific Dedicated Host, the next time the instance is launched, it is automatically associated with the host on which it lands. If the instance is restarted or rebooted, this relationship persists.

  • Change the Dedicated Host with which an instance is associated.

  • Change the instance tenancy of an instance from host to dedicated, or from dedicated to host.

  • Move an instance to or from a placement group.

At least one attribute for affinity, host ID, tenancy, or placement group name must be specified in the request. Affinity and tenancy can be modified in the same request.

To modify the host ID, tenancy, placement group, or partition for an instance, the instance must be in the stopped state.

" + "documentation":"

Modifies the placement attributes for a specified instance. You can do the following:

  • Modify the affinity between an instance and a Dedicated Host. When affinity is set to host and the instance is not associated with a specific Dedicated Host, the next time the instance is launched, it is automatically associated with the host on which it lands. If the instance is restarted or rebooted, this relationship persists.

  • Change the Dedicated Host with which an instance is associated.

  • Change the instance tenancy of an instance.

  • Move an instance to or from a placement group.

At least one attribute for affinity, host ID, tenancy, or placement group name must be specified in the request. Affinity and tenancy can be modified in the same request.

To modify the host ID, tenancy, placement group, or partition for an instance, the instance must be in the stopped state.

" }, "ModifyLaunchTemplate":{ "name":"ModifyLaunchTemplate", @@ -25769,7 +25769,7 @@ }, "Licenses":{ "shape":"LicenseList", - "documentation":"

The license configurations.

", + "documentation":"

The license configurations for the instance.

", "locationName":"licenseSet" }, "MetadataOptions":{ @@ -25786,6 +25786,21 @@ "shape":"BootModeValues", "documentation":"

The boot mode of the instance. For more information, see Boot modes in the Amazon EC2 User Guide.

", "locationName":"bootMode" + }, + "PlatformDetails":{ + "shape":"String", + "documentation":"

The platform details value for the instance. For more information, see AMI billing information fields in the Amazon EC2 User Guide.

", + "locationName":"platformDetails" + }, + "UsageOperation":{ + "shape":"String", + "documentation":"

The usage operation value for the instance. For more information, see AMI billing information fields in the Amazon EC2 User Guide.

", + "locationName":"usageOperation" + }, + "UsageOperationUpdateTime":{ + "shape":"MillisecondDateTime", + "documentation":"

The time that the usage operation was last updated.

", + "locationName":"usageOperationUpdateTime" } }, "documentation":"

Describes an instance.

" @@ -30871,7 +30886,7 @@ }, "Tenancy":{ "shape":"HostTenancy", - "documentation":"

The tenancy for the instance.

For T3 instances, you can't change the tenancy from dedicated to host, or from host to dedicated. Attempting to make one of these unsupported tenancy changes results in the InvalidTenancy error code.

", + "documentation":"

The tenancy for the instance.

For T3 instances, you can't change the tenancy from dedicated to host, or from host to dedicated. Attempting to make one of these unsupported tenancy changes results in the InvalidTenancy error code.

", "locationName":"tenancy" }, "PartitionNumber":{ @@ -33660,7 +33675,7 @@ "locationName":"affinity" }, "GroupName":{ - "shape":"String", + "shape":"PlacementGroupName", "documentation":"

The name of the placement group the instance is in.

", "locationName":"groupName" }, diff --git a/botocore/data/elbv2/2015-12-01/service-2.json b/botocore/data/elbv2/2015-12-01/service-2.json index 64b595f1..ffa568ec 100644 --- a/botocore/data/elbv2/2015-12-01/service-2.json +++ b/botocore/data/elbv2/2015-12-01/service-2.json @@ -45,7 +45,9 @@ {"shape":"DuplicateTagKeysException"}, {"shape":"TooManyTagsException"}, {"shape":"LoadBalancerNotFoundException"}, - {"shape":"TargetGroupNotFoundException"} + {"shape":"TargetGroupNotFoundException"}, + {"shape":"ListenerNotFoundException"}, + {"shape":"RuleNotFoundException"} ], "documentation":"

Adds the specified tags to the specified Elastic Load Balancing resource. You can tag your Application Load Balancers, Network Load Balancers, Gateway Load Balancers, target groups, listeners, and rules.

Each tag consists of a key and an optional value. If a resource already has a tag with the same key, AddTags updates its value.

" }, @@ -171,7 +173,8 @@ "resultWrapper":"DeleteListenerResult" }, "errors":[ - {"shape":"ListenerNotFoundException"} + {"shape":"ListenerNotFoundException"}, + {"shape":"ResourceInUseException"} ], "documentation":"

Deletes the specified listener.

Alternatively, your listener is deleted when you delete the load balancer to which it is attached.

" }, @@ -1223,7 +1226,7 @@ }, "HealthCheckEnabled":{ "shape":"HealthCheckEnabled", - "documentation":"

Indicates whether health checks are enabled. If the target type is lambda, health checks are disabled by default but can be enabled. If the target type is instance or ip, health checks are always enabled and cannot be disabled.

" + "documentation":"

Indicates whether health checks are enabled. If the target type is lambda, health checks are disabled by default but can be enabled. If the target type is instance, ip, or alb, health checks are always enabled and cannot be disabled.

" }, "HealthCheckPath":{ "shape":"Path", @@ -1251,7 +1254,7 @@ }, "TargetType":{ "shape":"TargetTypeEnum", - "documentation":"

The type of target that you must specify when registering targets with this target group. You can't specify targets for a target group using more than one target type.

  • instance - Register targets by instance ID. This is the default value.

  • ip - Register targets by IP address. You can specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses.

  • lambda - Register a single Lambda function as a target.

" + "documentation":"

The type of target that you must specify when registering targets with this target group. You can't specify targets for a target group using more than one target type.

  • instance - Register targets by instance ID. This is the default value.

  • ip - Register targets by IP address. You can specify IP addresses from the subnets of the virtual private cloud (VPC) for the target group, the RFC 1918 range (10.0.0.0/8, 172.16.0.0/12, and 192.168.0.0/16), and the RFC 6598 range (100.64.0.0/10). You can't specify publicly routable IP addresses.

  • lambda - Register a single Lambda function as a target.

  • alb - Register a single Application Load Balancer as a target.

" }, "Tags":{ "shape":"TagList", @@ -2987,15 +2990,15 @@ "members":{ "Id":{ "shape":"TargetId", - "documentation":"

The ID of the target. If the target type of the target group is instance, specify an instance ID. If the target type is ip, specify an IP address. If the target type is lambda, specify the ARN of the Lambda function.

" + "documentation":"

The ID of the target. If the target type of the target group is instance, specify an instance ID. If the target type is ip, specify an IP address. If the target type is lambda, specify the ARN of the Lambda function. If the target type is alb, specify the ARN of the Application Load Balancer target.

" }, "Port":{ "shape":"Port", - "documentation":"

The port on which the target is listening. If the target group protocol is GENEVE, the supported port is 6081. Not used if the target is a Lambda function.

" + "documentation":"

The port on which the target is listening. If the target group protocol is GENEVE, the supported port is 6081. If the target type is alb, the targeted Application Load Balancer must have at least one listener whose port matches the target group port. Not used if the target is a Lambda function.

" }, "AvailabilityZone":{ "shape":"ZoneName", - "documentation":"

An Availability Zone or all. This determines whether the target receives traffic from the load balancer nodes in the specified Availability Zone or from all enabled Availability Zones for the load balancer.

This parameter is not supported if the target type of the target group is instance.

If the target type is ip and the IP address is in a subnet of the VPC for the target group, the Availability Zone is automatically detected and this parameter is optional. If the IP address is outside the VPC, this parameter is required.

With an Application Load Balancer, if the target type is ip and the IP address is outside the VPC for the target group, the only supported value is all.

If the target type is lambda, this parameter is optional and the only supported value is all.

" + "documentation":"

An Availability Zone or all. This determines whether the target receives traffic from the load balancer nodes in the specified Availability Zone or from all enabled Availability Zones for the load balancer.

This parameter is not supported if the target type of the target group is instance or alb.

If the target type is ip and the IP address is in a subnet of the VPC for the target group, the Availability Zone is automatically detected and this parameter is optional. If the IP address is outside the VPC, this parameter is required.

With an Application Load Balancer, if the target type is ip and the IP address is outside the VPC for the target group, the only supported value is all.

If the target type is lambda, this parameter is optional and the only supported value is all.

" } }, "documentation":"

Information about a target.

" @@ -3069,7 +3072,7 @@ }, "TargetType":{ "shape":"TargetTypeEnum", - "documentation":"

The type of target that you must specify when registering targets with this target group. The possible values are instance (register targets by instance ID), ip (register targets by IP address), or lambda (register a single Lambda function as a target).

" + "documentation":"

The type of target that you must specify when registering targets with this target group. The possible values are instance (register targets by instance ID), ip (register targets by IP address), lambda (register a single Lambda function as a target), or alb (register a single Application Load Balancer as a target).

" }, "ProtocolVersion":{ "shape":"ProtocolVersion", @@ -3249,7 +3252,8 @@ "enum":[ "instance", "ip", - "lambda" + "lambda", + "alb" ] }, "TooManyActionsException":{ diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 92fb7046..85ebfc99 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -135,6 +135,18 @@ "us-west-2" : { } } }, + "account" : { + "endpoints" : { + "aws-global" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "account.us-east-1.amazonaws.com" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-global" + }, "acm" : { "endpoints" : { "af-south-1" : { }, @@ -842,7 +854,11 @@ "protocols" : [ "https" ] }, "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, "eu-central-1" : { }, + "eu-north-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1116,6 +1132,61 @@ "us-west-2" : { } } }, + "cloudcontrolapi" : { + "endpoints" : { + "af-south-1" : { }, + "ap-east-1" : { }, + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-northeast-3" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-north-1" : { }, + "eu-south-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "eu-west-3" : { }, + "fips-ca-central-1" : { + "credentialScope" : { + "region" : "ca-central-1" + }, + "hostname" : "cloudcontrolapi-fips.ca-central-1.amazonaws.com" + }, + "fips-us-east-1" : { + "credentialScope" : { + "region" : "us-east-1" + }, + "hostname" : "cloudcontrolapi-fips.us-east-1.amazonaws.com" + }, + "fips-us-east-2" : { + "credentialScope" : { + "region" : "us-east-2" + }, + "hostname" : "cloudcontrolapi-fips.us-east-2.amazonaws.com" + }, + "fips-us-west-1" : { + "credentialScope" : { + "region" : "us-west-1" + }, + "hostname" : "cloudcontrolapi-fips.us-west-1.amazonaws.com" + }, + "fips-us-west-2" : { + "credentialScope" : { + "region" : "us-west-2" + }, + "hostname" : "cloudcontrolapi-fips.us-west-2.amazonaws.com" + }, + "me-south-1" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, "clouddirectory" : { "endpoints" : { "ap-southeast-1" : { }, @@ -1216,6 +1287,7 @@ "ap-east-1" : { }, "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-northeast-3" : { }, "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, @@ -3803,6 +3875,7 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "eu-central-1" : { }, @@ -3827,6 +3900,12 @@ }, "hostname" : "data.iotevents.ap-northeast-2.amazonaws.com" }, + "ap-south-1" : { + "credentialScope" : { + "region" : "ap-south-1" + }, + "hostname" : "data.iotevents.ap-south-1.amazonaws.com" + }, "ap-southeast-1" : { "credentialScope" : { "region" : "ap-southeast-1" @@ -4680,7 +4759,9 @@ }, "models-v2-lex" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -5406,6 +5487,24 @@ "us-west-2" : { } } }, + "quicksight" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-northeast-2" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "api" : { }, + "ca-central-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-2" : { }, + "sa-east-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "ram" : { "endpoints" : { "af-south-1" : { }, @@ -5743,7 +5842,9 @@ }, "runtime-v2-lex" : { "endpoints" : { + "af-south-1" : { }, "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -7402,6 +7503,17 @@ } } }, + "voiceid" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-2" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, "waf" : { "endpoints" : { "aws-fips" : { @@ -7811,6 +7923,18 @@ "cn-northwest-1" : { } } }, + "account" : { + "endpoints" : { + "aws-cn-global" : { + "credentialScope" : { + "region" : "cn-northwest-1" + }, + "hostname" : "account.cn-northwest-1.amazonaws.com.cn" + } + }, + "isRegionalized" : false, + "partitionEndpoint" : "aws-cn-global" + }, "acm" : { "endpoints" : { "cn-north-1" : { }, @@ -8921,6 +9045,24 @@ "us-gov-west-1" : { } } }, + "cloudcontrolapi" : { + "endpoints" : { + "fips-us-gov-east-1" : { + "credentialScope" : { + "region" : "us-gov-east-1" + }, + "hostname" : "cloudcontrolapi-fips.us-gov-east-1.amazonaws.com" + }, + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "cloudcontrolapi-fips.us-gov-west-1.amazonaws.com" + }, + "us-gov-east-1" : { }, + "us-gov-west-1" : { } + } + }, "clouddirectory" : { "endpoints" : { "us-gov-west-1" : { } @@ -9603,6 +9745,17 @@ "isRegionalized" : false, "partitionEndpoint" : "aws-us-gov-global" }, + "identitystore" : { + "endpoints" : { + "fips-us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "identitystore.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1" : { } + } + }, "inspector" : { "endpoints" : { "fips-us-gov-east-1" : { @@ -9644,6 +9797,21 @@ "us-gov-west-1" : { } } }, + "iotevents" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, + "ioteventsdata" : { + "endpoints" : { + "us-gov-west-1" : { + "credentialScope" : { + "region" : "us-gov-west-1" + }, + "hostname" : "data.iotevents.us-gov-west-1.amazonaws.com" + } + } + }, "iotsecuredtunneling" : { "endpoints" : { "fips-us-gov-east-1" : { @@ -9948,6 +10116,12 @@ "us-gov-west-1" : { } } }, + "quicksight" : { + "endpoints" : { + "api" : { }, + "us-gov-west-1" : { } + } + }, "ram" : { "endpoints" : { "us-gov-east-1" : { diff --git a/botocore/data/imagebuilder/2019-12-02/service-2.json b/botocore/data/imagebuilder/2019-12-02/service-2.json index 8dfdb2b3..be13a8ba 100644 --- a/botocore/data/imagebuilder/2019-12-02/service-2.json +++ b/botocore/data/imagebuilder/2019-12-02/service-2.json @@ -560,7 +560,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Returns the list of component build versions for the specified semantic version.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" + "documentation":"

Returns the list of component build versions for the specified semantic version.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" }, "ListComponents":{ "name":"ListComponents", @@ -579,7 +579,7 @@ {"shape":"ForbiddenException"}, {"shape":"CallRateLimitExceededException"} ], - "documentation":"

Returns the list of component build versions for the specified semantic version.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" + "documentation":"

Returns the list of component build versions for the specified semantic version.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" }, "ListContainerRecipes":{ "name":"ListContainerRecipes", @@ -982,7 +982,7 @@ }, "userDataOverride":{ "shape":"UserDataOverride", - "documentation":"

Use this property to provide commands or a command script to run when you launch your build instance.

The userDataOverride property replaces any commands that Image Builder might have added to ensure that Systems Manager is installed on your Linux build instance. If you override the user data, make sure that you add commands to install Systems Manager, if it is not pre-installed on your source image.

" + "documentation":"

Use this property to provide commands or a command script to run when you launch your build instance.

The userDataOverride property replaces any commands that Image Builder might have added to ensure that Systems Manager is installed on your Linux build instance. If you override the user data, make sure that you add commands to install Systems Manager, if it is not pre-installed on your base image.

" } }, "documentation":"

In addition to your infrastruction configuration, these settings provide an extra layer of control over your build instances. For instances where Image Builder installs the Systems Manager agent, you can choose whether to keep it for the AMI that you create. You can also specify commands to run on launch for all of your build instances.

" @@ -1019,7 +1019,7 @@ "members":{ "name":{ "shape":"AmiNameString", - "documentation":"

The name of the distribution configuration.

" + "documentation":"

The name of the output AMI.

" }, "description":{ "shape":"NonEmptyString", @@ -1147,7 +1147,7 @@ }, "supportedOsVersions":{ "shape":"OsVersionList", - "documentation":"

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

" + "documentation":"

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the base image OS version during image recipe creation.

" }, "state":{ "shape":"ComponentState", @@ -1332,7 +1332,7 @@ }, "supportedOsVersions":{ "shape":"OsVersionList", - "documentation":"

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

" + "documentation":"

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the base image OS version during image recipe creation.

" }, "state":{ "shape":"ComponentState", @@ -1389,7 +1389,7 @@ }, "version":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the component.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" + "documentation":"

The semantic version of the component.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" }, "description":{ "shape":"NonEmptyString", @@ -1401,7 +1401,7 @@ }, "supportedOsVersions":{ "shape":"OsVersionList", - "documentation":"

he operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

" + "documentation":"

he operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the base image OS version during image recipe creation.

" }, "type":{ "shape":"ComponentType", @@ -1496,7 +1496,7 @@ }, "version":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the container recipe.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" + "documentation":"

The semantic version of the container recipe.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" }, "components":{ "shape":"ComponentConfigurationList", @@ -1520,7 +1520,7 @@ }, "parentImage":{ "shape":"NonEmptyString", - "documentation":"

The source image for the container recipe.

" + "documentation":"

The base image for the container recipe.

" }, "dateCreated":{ "shape":"DateTime", @@ -1570,7 +1570,7 @@ }, "parentImage":{ "shape":"NonEmptyString", - "documentation":"

The source image for the container recipe.

" + "documentation":"

The base image for the container recipe.

" }, "dateCreated":{ "shape":"DateTime", @@ -1626,7 +1626,7 @@ }, "supportedOsVersions":{ "shape":"OsVersionList", - "documentation":"

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the parent image OS version during image recipe creation.

" + "documentation":"

The operating system (OS) version supported by the component. If the OS information is available, a prefix match is performed against the base image OS version during image recipe creation.

" }, "data":{ "shape":"InlineComponentData", @@ -1714,15 +1714,15 @@ }, "platformOverride":{ "shape":"Platform", - "documentation":"

Specifies the operating system platform when you use a custom source image.

" + "documentation":"

Specifies the operating system platform when you use a custom base image.

" }, "imageOsVersionOverride":{ "shape":"NonEmptyString", - "documentation":"

Specifies the operating system version for the source image.

" + "documentation":"

Specifies the operating system version for the base image.

" }, "parentImage":{ "shape":"NonEmptyString", - "documentation":"

The source image for the container recipe.

" + "documentation":"

The base image for the container recipe.

" }, "tags":{ "shape":"TagMap", @@ -1916,7 +1916,7 @@ }, "parentImage":{ "shape":"NonEmptyString", - "documentation":"

The parent image of the image recipe. The value of the string can be the ARN of the parent image or an AMI ID. The format for the ARN follows this example: arn:aws:imagebuilder:us-west-2:aws:image/windows-server-2016-english-full-base-x86/x.x.x. You can provide the specific version that you want to use, or you can use a wildcard in all of the fields. If you enter an AMI ID for the string value, you must have access to the AMI, and the AMI must be in the same Region in which you are using Image Builder.

" + "documentation":"

The base image of the image recipe. The value of the string can be the ARN of the base image or an AMI ID. The format for the ARN follows this example: arn:aws:imagebuilder:us-west-2:aws:image/windows-server-2016-english-full-base-x86/x.x.x. You can provide the specific version that you want to use, or you can use a wildcard in all of the fields. If you enter an AMI ID for the string value, you must have access to the AMI, and the AMI must be in the same Region in which you are using Image Builder.

" }, "blockDeviceMappings":{ "shape":"InstanceBlockDeviceMappings", @@ -2035,7 +2035,7 @@ }, "instanceTypes":{ "shape":"InstanceTypeList", - "documentation":"

The instance metadata options that you can set for the HTTP requests that pipeline builds use to launch EC2 build and test instances. For more information about instance metadata options, see one of the following links:

" + "documentation":"

The instance types of the infrastructure configuration. You can specify one or more instance types to use for this build. The service will pick one of these instance types based on availability.

" }, "instanceProfileName":{ "shape":"InstanceProfileNameType", @@ -2821,7 +2821,7 @@ }, "version":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the image.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" + "documentation":"

The semantic version of the image.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" }, "platform":{ "shape":"Platform", @@ -3027,7 +3027,7 @@ }, "parentImage":{ "shape":"NonEmptyString", - "documentation":"

The parent image of the image recipe.

" + "documentation":"

The base image of the image recipe.

" }, "blockDeviceMappings":{ "shape":"InstanceBlockDeviceMappings", @@ -3077,7 +3077,7 @@ }, "parentImage":{ "shape":"NonEmptyString", - "documentation":"

The parent image of the image recipe.

" + "documentation":"

The base image of the image recipe.

" }, "dateCreated":{ "shape":"DateTime", @@ -3221,7 +3221,7 @@ }, "version":{ "shape":"VersionNumber", - "documentation":"

Details for a specific version of an Image Builder image. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" + "documentation":"

Details for a specific version of an Image Builder image. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Assignment: For the first three nodes you can assign any positive integer value, including zero, with an upper limit of 2^30-1, or 1073741823 for each node. Image Builder automatically assigns the build number to the fourth node.

Patterns: You can use any numeric pattern that adheres to the assignment requirements for the nodes that you can assign. For example, you might choose a software version pattern, such as 1.0.0, or a date, such as 2021.01.01.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" }, "platform":{ "shape":"Platform", @@ -3271,7 +3271,7 @@ }, "semanticVersion":{ "shape":"VersionNumber", - "documentation":"

The semantic version of the component. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" + "documentation":"

The semantic version of the component. This version follows the semantic version syntax.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" }, "description":{ "shape":"NonEmptyString", @@ -3482,7 +3482,7 @@ }, "noDevice":{ "shape":"EmptyString", - "documentation":"

Use to remove a mapping from the parent image.

" + "documentation":"

Use to remove a mapping from the base image.

" } }, "documentation":"

Defines block device mappings for the instance used to configure your image.

" @@ -3503,7 +3503,7 @@ "documentation":"

Defines the block devices to attach for building an instance from this Image Builder AMI.

" } }, - "documentation":"

Defines a custom source AMI and block device mapping configurations of an instance used for building and testing container images.

" + "documentation":"

Defines a custom base AMI and block device mapping configurations of an instance used for building and testing container images.

" }, "InstanceMetadataOptions":{ "type":"structure", @@ -4019,7 +4019,7 @@ }, "imageVersionList":{ "shape":"ImageVersionList", - "documentation":"

The list of image semantic versions.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the source image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" + "documentation":"

The list of image semantic versions.

The semantic version has four nodes: <major>.<minor>.<patch>/<build>. You can assign values for the first three, and can filter on all of them.

Filtering: With semantic versioning, you have the flexibility to use wildcards (x) to specify the most recent versions or nodes when selecting the base image or components for your recipe. When you use a wildcard in any node, all nodes to the right of the first wildcard must also be wildcards.

" }, "nextToken":{ "shape":"PaginationToken", @@ -4365,7 +4365,7 @@ }, "pipelineExecutionStartCondition":{ "shape":"PipelineExecutionStartCondition", - "documentation":"

The condition configures when the pipeline should trigger a new image build. When the pipelineExecutionStartCondition is set to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE, and you use semantic version filters on the source image or components in your image recipe, EC2 Image Builder will build a new image only when there are new versions of the image or components in your recipe that match the semantic version filter. When it is set to EXPRESSION_MATCH_ONLY, it will build a new image every time the CRON expression matches the current time. For semantic version syntax, see CreateComponent in the EC2 Image Builder API Reference.

" + "documentation":"

The condition configures when the pipeline should trigger a new image build. When the pipelineExecutionStartCondition is set to EXPRESSION_MATCH_AND_DEPENDENCY_UPDATES_AVAILABLE, and you use semantic version filters on the base image or components in your image recipe, EC2 Image Builder will build a new image only when there are new versions of the image or components in your recipe that match the semantic version filter. When it is set to EXPRESSION_MATCH_ONLY, it will build a new image every time the CRON expression matches the current time. For semantic version syntax, see CreateComponent in the EC2 Image Builder API Reference.

" } }, "documentation":"

A schedule configures how often and when a pipeline will automatically create a new image.

" diff --git a/botocore/data/lambda/2015-03-31/service-2.json b/botocore/data/lambda/2015-03-31/service-2.json index 88ec34ac..6c2c0443 100644 --- a/botocore/data/lambda/2015-03-31/service-2.json +++ b/botocore/data/lambda/2015-03-31/service-2.json @@ -48,7 +48,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"PreconditionFailedException"} ], - "documentation":"

Grants an Amazon Web Services service or another account permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function.

To grant permission to another account, specify the account ID as the Principal. For Amazon Web Services services, the principal is a domain-style identifier defined by the service, like s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Services services, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function.

This action adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Lambda Function Policies.

" + "documentation":"

Grants an Amazon Web Services service or another account permission to use a function. You can apply the policy at the function level, or specify a qualifier to restrict access to a single version or alias. If you use a qualifier, the invoker must use the full Amazon Resource Name (ARN) of that version or alias to invoke the function. Note: Lambda does not support adding policies to version $LATEST.

To grant permission to another account, specify the account ID as the Principal. For Amazon Web Services services, the principal is a domain-style identifier defined by the service, like s3.amazonaws.com or sns.amazonaws.com. For Amazon Web Services services, you can also specify the ARN of the associated resource as the SourceArn. If you grant permission to a service principal without specifying the source, other accounts could potentially configure resources in their account to invoke your Lambda function.

This action adds a statement to a resource-based permissions policy for the function. For more information about function policies, see Lambda Function Policies.

" }, "CreateAlias":{ "name":"CreateAlias", @@ -99,7 +99,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and triggers the function.

For details about each event source type, see the following topics. In particular, each of the topics describes the required and optional parameters for the specific event source.

The following error handling options are only available for stream sources (DynamoDB and Kinesis):

  • BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry.

  • DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic.

  • MaximumRecordAgeInSeconds - Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires

  • MaximumRetryAttempts - Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

  • ParallelizationFactor - Process multiple batches from each shard concurrently.

" + "documentation":"

Creates a mapping between an event source and an Lambda function. Lambda reads items from the event source and triggers the function.

For details about each event source type, see the following topics.

The following error handling options are only available for stream sources (DynamoDB and Kinesis):

  • BisectBatchOnFunctionError - If the function returns an error, split the batch in two and retry.

  • DestinationConfig - Send discarded records to an Amazon SQS queue or Amazon SNS topic.

  • MaximumRecordAgeInSeconds - Discard records older than the specified age. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires

  • MaximumRetryAttempts - Discard records after the specified number of retries. The default value is infinite (-1). When set to infinite (-1), failed records are retried until the record expires.

  • ParallelizationFactor - Process multiple batches from each shard concurrently.

" }, "CreateFunction":{ "name":"CreateFunction", @@ -121,7 +121,7 @@ {"shape":"InvalidCodeSignatureException"}, {"shape":"CodeSigningConfigNotFoundException"} ], - "documentation":"

Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Services services, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing.

You set the package type to Image if the deployment package is a container image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties.

You set the package type to Zip if the deployment package is a .zip file archive. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties.

When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Function States.

A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration.

The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency).

You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set set of signing profiles, which define the trusted publishers for this function.

If another account or an Amazon Web Services service invokes your function, use AddPermission to grant permission by creating a resource-based IAM policy. You can grant permissions at the function level, on a version, or on an alias.

To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Services services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Functions.

" + "documentation":"

Creates a Lambda function. To create a function, you need a deployment package and an execution role. The deployment package is a .zip file archive or container image that contains your function code. The execution role grants the function permission to use Amazon Web Services services, such as Amazon CloudWatch Logs for log streaming and X-Ray for request tracing.

You set the package type to Image if the deployment package is a container image. For a container image, the code property must include the URI of a container image in the Amazon ECR registry. You do not need to specify the handler and runtime properties.

You set the package type to Zip if the deployment package is a .zip file archive. For a .zip file archive, the code property specifies the location of the .zip file. You must also specify the handler and runtime properties. The code in the deployment package must be compatible with the target instruction set architecture of the function (x86-64 or arm64). If you do not specify the architecture, the default value is x86-64.

When you create a function, Lambda provisions an instance of the function and its supporting resources. If your function connects to a VPC, this process can take a minute or so. During this time, you can't invoke or modify the function. The State, StateReason, and StateReasonCode fields in the response from GetFunctionConfiguration indicate when the function is ready to invoke. For more information, see Function States.

A function has an unpublished version, and can have published versions and aliases. The unpublished version changes when you update your function's code and configuration. A published version is a snapshot of your function code and configuration that can't be changed. An alias is a named resource that maps to a version, and can be changed to map to a different version. Use the Publish parameter to create version 1 of your function from its initial configuration.

The other parameters let you configure version-specific and function-level settings. You can modify version-specific settings later with UpdateFunctionConfiguration. Function-level settings apply to both the unpublished and published versions of the function, and include tags (TagResource) and per-function concurrency limits (PutFunctionConcurrency).

You can use code signing if your deployment package is a .zip file archive. To enable code signing for this function, specify the ARN of a code-signing configuration. When a user attempts to deploy a code package with UpdateFunctionCode, Lambda checks that the code package has a valid signature from a trusted publisher. The code-signing configuration includes set set of signing profiles, which define the trusted publishers for this function.

If another account or an Amazon Web Services service invokes your function, use AddPermission to grant permission by creating a resource-based IAM policy. You can grant permissions at the function level, on a version, or on an alias.

To invoke your function directly, use Invoke. To invoke your function in response to events in other Amazon Web Services services, create an event source mapping (CreateEventSourceMapping), or configure a function trigger in the other service. For more information, see Invoking Functions.

" }, "DeleteAlias":{ "name":"DeleteAlias", @@ -680,7 +680,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists the versions of an Lambda layer. Versions that have been deleted aren't listed. Specify a runtime identifier to list only versions that indicate that they're compatible with that runtime.

" + "documentation":"

Lists the versions of an Lambda layer. Versions that have been deleted aren't listed. Specify a runtime identifier to list only versions that indicate that they're compatible with that runtime. Specify a compatible architecture to include only layer versions that are compatible with that architecture.

" }, "ListLayers":{ "name":"ListLayers", @@ -696,7 +696,7 @@ {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Lists Lambda layers and shows information about the latest version of each. Specify a runtime identifier to list only layers that indicate that they're compatible with that runtime.

" + "documentation":"

Lists Lambda layers and shows information about the latest version of each. Specify a runtime identifier to list only layers that indicate that they're compatible with that runtime. Specify a compatible architecture to include only layers that are compatible with that instruction set architecture.

" }, "ListProvisionedConcurrencyConfigs":{ "name":"ListProvisionedConcurrencyConfigs", @@ -1178,7 +1178,7 @@ }, "SourceArn":{ "shape":"Arn", - "documentation":"

For Amazon Web Services services, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.

" + "documentation":"

For Amazon Web Services services, the ARN of the Amazon Web Services resource that invokes the function. For example, an Amazon S3 bucket or Amazon SNS topic.

Note that Lambda configures the comparison using the StringLike operator.

" }, "SourceAccount":{ "shape":"SourceOwner", @@ -1281,6 +1281,19 @@ }, "documentation":"

List of signing profiles that can sign a code package.

" }, + "Architecture":{ + "type":"string", + "enum":[ + "x86_64", + "arm64" + ] + }, + "ArchitecturesList":{ + "type":"list", + "member":{"shape":"Architecture"}, + "max":1, + "min":1 + }, "Arn":{ "type":"string", "pattern":"arn:(aws[a-zA-Z0-9-]*):([a-zA-Z0-9\\-])+:([a-z]{2}(-gov)?-[a-z]+-\\d{1})?:(\\d{12})?:(.*)" @@ -1400,6 +1413,11 @@ "error":{"httpStatusCode":400}, "exception":true }, + "CompatibleArchitectures":{ + "type":"list", + "member":{"shape":"Architecture"}, + "max":2 + }, "CompatibleRuntimes":{ "type":"list", "member":{"shape":"Runtime"}, @@ -1410,7 +1428,7 @@ "members":{ "ReservedConcurrentExecutions":{ "shape":"ReservedConcurrentExecutions", - "documentation":"

The number of concurrent executions that are reserved for this function. For more information, see Managing Concurrency.

" + "documentation":"

The number of concurrent executions that are reserved for this function. For more information, see Managing Concurrency.

" } } }, @@ -1488,15 +1506,15 @@ }, "Enabled":{ "shape":"Enabled", - "documentation":"

If true, the event source mapping is active. Set to false to pause polling and invocation.

" + "documentation":"

When true, the event source mapping is active. When false, Lambda pauses polling and invocation.

Default: True

" }, "BatchSize":{ "shape":"BatchSize", - "documentation":"

The maximum number of items to retrieve in a single batch.

  • Amazon Kinesis - Default 100. Max 10,000.

  • Amazon DynamoDB Streams - Default 100. Max 1,000.

  • Amazon Simple Queue Service - Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.

  • Amazon Managed Streaming for Apache Kafka - Default 100. Max 10,000.

  • Self-Managed Apache Kafka - Default 100. Max 10,000.

" + "documentation":"

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

  • Amazon Kinesis - Default 100. Max 10,000.

  • Amazon DynamoDB Streams - Default 100. Max 1,000.

  • Amazon Simple Queue Service - Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.

  • Amazon Managed Streaming for Apache Kafka - Default 100. Max 10,000.

  • Self-Managed Apache Kafka - Default 100. Max 10,000.

" }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

(Streams and SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds.

" + "documentation":"

(Streams and Amazon SQS standard queues) The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function.

Default: 0

Related setting: When you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" }, "ParallelizationFactor":{ "shape":"ParallelizationFactor", @@ -1639,6 +1657,10 @@ "CodeSigningConfigArn":{ "shape":"CodeSigningConfigArn", "documentation":"

To enable code signing for this function, specify the ARN of a code-signing configuration. A code-signing configuration includes a set of signing profiles, which define the trusted publishers for this function.

" + }, + "Architectures":{ + "shape":"ArchitecturesList", + "documentation":"

The instruction set architecture that the function supports. Enter a string array with one of the valid values. The default value is x86_64.

" } } }, @@ -1867,7 +1889,7 @@ "Type":{"shape":"String"}, "Message":{"shape":"String"} }, - "documentation":"

An error occured when reading from or writing to a connected file system.

", + "documentation":"

An error occurred when reading from or writing to a connected file system.

", "error":{"httpStatusCode":410}, "exception":true }, @@ -2005,11 +2027,11 @@ }, "BatchSize":{ "shape":"BatchSize", - "documentation":"

The maximum number of items to retrieve in a single batch.

" + "documentation":"

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

Default value: Varies by service. For Amazon SQS, the default is 10. For all other services, the default is 100.

Related setting: When you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

(Streams and Amazon SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds. The default value is zero.

" + "documentation":"

(Streams and Amazon SQS standard queues) The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function.

Default: 0

Related setting: When you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" }, "ParallelizationFactor":{ "shape":"ParallelizationFactor", @@ -2310,6 +2332,10 @@ "SigningJobArn":{ "shape":"Arn", "documentation":"

The ARN of the signing job.

" + }, + "Architectures":{ + "shape":"ArchitecturesList", + "documentation":"

The instruction set architecture that the function supports. Architecture is a string array with one of the valid values. The default architecture value is x86_64.

" } }, "documentation":"

Details about a function's configuration.

" @@ -2666,6 +2692,10 @@ "LicenseInfo":{ "shape":"LicenseInfo", "documentation":"

The layer's software license.

" + }, + "CompatibleArchitectures":{ + "shape":"CompatibleArchitectures", + "documentation":"

A list of compatible instruction set architectures.

" } } }, @@ -2903,7 +2933,7 @@ }, "LogType":{ "shape":"LogType", - "documentation":"

Set to Tail to include the execution log in the response.

", + "documentation":"

Set to Tail to include the execution log in the response. Applies to synchronously invoked functions only.

", "location":"header", "locationName":"X-Amz-Log-Type" }, @@ -3200,6 +3230,10 @@ "LicenseInfo":{ "shape":"LicenseInfo", "documentation":"

The layer's open-source license.

" + }, + "CompatibleArchitectures":{ + "shape":"CompatibleArchitectures", + "documentation":"

A list of compatible instruction set architectures.

" } }, "documentation":"

Details about a version of an Lambda layer.

" @@ -3493,6 +3527,12 @@ "documentation":"

The maximum number of versions to return.

", "location":"querystring", "locationName":"MaxItems" + }, + "CompatibleArchitecture":{ + "shape":"Architecture", + "documentation":"

The compatible instruction set architecture.

", + "location":"querystring", + "locationName":"CompatibleArchitecture" } } }, @@ -3529,6 +3569,12 @@ "documentation":"

The maximum number of layers to return.

", "location":"querystring", "locationName":"MaxItems" + }, + "CompatibleArchitecture":{ + "shape":"Architecture", + "documentation":"

The compatible instruction set architecture.

", + "location":"querystring", + "locationName":"CompatibleArchitecture" } } }, @@ -3588,7 +3634,7 @@ "members":{ "Resource":{ "shape":"FunctionArn", - "documentation":"

The function's Amazon Resource Name (ARN).

", + "documentation":"

The function's Amazon Resource Name (ARN). Note: Lambda does not support adding tags to aliases or versions.

", "location":"uri", "locationName":"ARN" } @@ -3882,6 +3928,10 @@ "LicenseInfo":{ "shape":"LicenseInfo", "documentation":"

The layer's software license. It can be any of the following:

  • An SPDX license identifier. For example, MIT.

  • The URL of a license hosted on the internet. For example, https://opensource.org/licenses/MIT.

  • The full text of the license.

" + }, + "CompatibleArchitectures":{ + "shape":"CompatibleArchitectures", + "documentation":"

A list of compatible instruction set architectures.

" } } }, @@ -3919,6 +3969,10 @@ "LicenseInfo":{ "shape":"LicenseInfo", "documentation":"

The layer's software license.

" + }, + "CompatibleArchitectures":{ + "shape":"CompatibleArchitectures", + "documentation":"

A list of compatible instruction set architectures.

" } } }, @@ -4653,15 +4707,15 @@ }, "Enabled":{ "shape":"Enabled", - "documentation":"

If true, the event source mapping is active. Set to false to pause polling and invocation.

" + "documentation":"

When true, the event source mapping is active. When false, Lambda pauses polling and invocation.

Default: True

" }, "BatchSize":{ "shape":"BatchSize", - "documentation":"

The maximum number of items to retrieve in a single batch.

  • Amazon Kinesis - Default 100. Max 10,000.

  • Amazon DynamoDB Streams - Default 100. Max 1,000.

  • Amazon Simple Queue Service - Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.

  • Amazon Managed Streaming for Apache Kafka - Default 100. Max 10,000.

  • Self-Managed Apache Kafka - Default 100. Max 10,000.

" + "documentation":"

The maximum number of records in each batch that Lambda pulls from your stream or queue and sends to your function. Lambda passes all of the records in the batch to the function in a single call, up to the payload limit for synchronous invocation (6 MB).

  • Amazon Kinesis - Default 100. Max 10,000.

  • Amazon DynamoDB Streams - Default 100. Max 1,000.

  • Amazon Simple Queue Service - Default 10. For standard queues the max is 10,000. For FIFO queues the max is 10.

  • Amazon Managed Streaming for Apache Kafka - Default 100. Max 10,000.

  • Self-Managed Apache Kafka - Default 100. Max 10,000.

" }, "MaximumBatchingWindowInSeconds":{ "shape":"MaximumBatchingWindowInSeconds", - "documentation":"

(Streams and SQS standard queues) The maximum amount of time to gather records before invoking the function, in seconds.

" + "documentation":"

(Streams and Amazon SQS standard queues) The maximum amount of time, in seconds, that Lambda spends gathering records before invoking the function.

Default: 0

Related setting: When you set BatchSize to a value greater than 10, you must set MaximumBatchingWindowInSeconds to at least 1.

" }, "DestinationConfig":{ "shape":"DestinationConfig", @@ -4738,6 +4792,10 @@ "RevisionId":{ "shape":"String", "documentation":"

Only update the function if the revision ID matches the ID that's specified. Use this option to avoid modifying a function that has changed since you last read it.

" + }, + "Architectures":{ + "shape":"ArchitecturesList", + "documentation":"

The instruction set architecture that the function supports. Enter a string array with one of the valid values. The default value is x86_64.

" } } }, @@ -4809,7 +4867,7 @@ }, "ImageConfig":{ "shape":"ImageConfig", - "documentation":"

Container image configuration values that override the values in the container image Dockerfile.

" + "documentation":"

Container image configuration values that override the values in the container image Docker file.

" } } }, diff --git a/botocore/data/license-manager/2018-08-01/service-2.json b/botocore/data/license-manager/2018-08-01/service-2.json index 6d72ae6b..7f33eadc 100644 --- a/botocore/data/license-manager/2018-08-01/service-2.json +++ b/botocore/data/license-manager/2018-08-01/service-2.json @@ -1167,6 +1167,10 @@ "Expiration":{ "shape":"ISO8601DateTime", "documentation":"

Date and time at which the license checkout expires.

" + }, + "LicenseArn":{ + "shape":"String", + "documentation":"

Amazon Resource Name (ARN) of the checkout license.

" } } }, diff --git a/botocore/data/macie2/2020-01-01/service-2.json b/botocore/data/macie2/2020-01-01/service-2.json index e1378c41..a2db6faa 100644 --- a/botocore/data/macie2/2020-01-01/service-2.json +++ b/botocore/data/macie2/2020-01-01/service-2.json @@ -2840,7 +2840,7 @@ "kmsManaged": { "shape": "__long", "locationName": "kmsManaged", - "documentation": "

The total number of buckets that use an Key Management Service (KMS) customer master key (CMK) to encrypt new objects by default. These buckets use Amazon Web Services managed KMS encryption (AWS-KMS) or customer managed KMS encryption (SSE-KMS) by default.

" + "documentation": "

The total number of buckets that use an KMS key to encrypt new objects by default, either an Amazon Web Services managed key or a customer managed key. These buckets use KMS encryption (SSE-KMS) by default.

" }, "s3Managed": { "shape": "__long", @@ -2923,7 +2923,7 @@ "eq": { "shape": "__listOf__string", "locationName": "eq", - "documentation": "

The value for the property matches (equals) the specified value. If you specify multiple values, Macie uses OR logic to join the values.

" + "documentation": "

The value for the property matches (equals) the specified value. If you specify multiple values, Amazon Macie uses OR logic to join the values.

" }, "gt": { "shape": "__long", @@ -3017,6 +3017,16 @@ "locationName": "classifiableSizeInBytes", "documentation": "

The total storage size, in bytes, of the objects that Amazon Macie can analyze in the bucket. These objects use a supported storage class and have a file name extension for a supported file or storage format.

If versioning is enabled for the bucket, Macie calculates this value based on the size of the latest version of each applicable object in the bucket. This value doesn't reflect the storage size of all versions of each applicable object in the bucket.

" }, + "errorCode": { + "shape": "BucketMetadataErrorCode", + "locationName": "errorCode", + "documentation": "

Specifies the error code for an error that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. If this value is ACCESS_DENIED, Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. If this value is null, Macie was able to retrieve and process the information.

" + }, + "errorMessage": { + "shape": "__string", + "locationName": "errorMessage", + "documentation": "

A brief description of the error (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information.

" + }, "jobDetails": { "shape": "JobDetails", "locationName": "jobDetails", @@ -3070,7 +3080,7 @@ "sizeInBytesCompressed": { "shape": "__long", "locationName": "sizeInBytesCompressed", - "documentation": "

The total storage size, in bytes, of the objects that are compressed (.gz, .gzip, .zip) files in the bucket.

If versioning is enabled for the bucket, Macie calculates this value based on the size of the latest version of each applicable object in the bucket. This value doesn't reflect the storage size of all versions of each applicable object in the bucket.

" + "documentation": "

The total storage size, in bytes, of the objects that are compressed (.gz, .gzip, .zip) files in the bucket.

If versioning is enabled for the bucket, Amazon Macie calculates this value based on the size of the latest version of each applicable object in the bucket. This value doesn't reflect the storage size of all versions of each applicable object in the bucket.

" }, "tags": { "shape": "__listOfKeyValuePair", @@ -3093,7 +3103,14 @@ "documentation": "

Specifies whether versioning is enabled for the bucket.

" } }, - "documentation": "

Provides information about an S3 bucket that Amazon Macie monitors and analyzes.

" + "documentation": "

Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes for your account. If an error occurs when Macie attempts to retrieve and process information about the bucket or the bucket's objects, the value for most of these properties is null. Exceptions are accountId, bucketArn, bucketCreatedAt, bucketName, lastUpdated, and region. To identify the cause of the error, refer to the errorCode and errorMessage values.

" + }, + "BucketMetadataErrorCode": { + "type": "string", + "documentation": "

The error code for an error that prevented Amazon Macie from retrieving and processing information about an S3 bucket and the bucket's objects.

", + "enum": [ + "ACCESS_DENIED" + ] }, "BucketPermissionConfiguration": { "type": "structure", @@ -3149,12 +3166,12 @@ "kmsMasterKeyId": { "shape": "__string", "locationName": "kmsMasterKeyId", - "documentation": "

The Amazon Resource Name (ARN) or unique identifier (key ID) for the Key Management Service (KMS) customer master key (CMK) that's used by default to encrypt objects that are added to the bucket. This value is null if the bucket uses an Amazon S3 managed key to encrypt new objects or the bucket doesn't encrypt new objects by default.

" + "documentation": "

The Amazon Resource Name (ARN) or unique identifier (key ID) for the KMS key that's used by default to encrypt objects that are added to the bucket. This value is null if the bucket uses an Amazon S3 managed key to encrypt new objects or the bucket doesn't encrypt new objects by default.

" }, "type": { "shape": "Type", "locationName": "type", - "documentation": "

The type of server-side encryption that's used by default when storing new objects in the bucket. Possible values are:

  • AES256 - New objects are encrypted with an Amazon S3 managed key and use Amazon S3 managed encryption (SSE-S3).

  • aws:kms - New objects are encrypted with an KMS CMK, specified by the kmsMasterKeyId property, and use Amazon Web Services managed KMS encryption (AWS-KMS) or customer managed KMS encryption (SSE-KMS).

  • NONE - New objects aren't encrypted by default. Default encryption is disabled for the bucket.

" + "documentation": "

The type of server-side encryption that's used by default when storing new objects in the bucket. Possible values are:

  • AES256 - New objects are encrypted with an Amazon S3 managed key. They use SSE-S3 encryption.

  • aws:kms - New objects are encrypted with an KMS key (kmsMasterKeyId), either an Amazon Web Services managed key or a customer managed key. They use SSE-KMS encryption.

  • NONE - New objects aren't encrypted by default. Default encryption is disabled for the bucket.

" } }, "documentation": "

Provides information about the default server-side encryption settings for an S3 bucket. For detailed information about these settings, see Setting default server-side encryption behavior for Amazon S3 buckets in the Amazon Simple Storage Service User Guide.

" @@ -4583,12 +4600,12 @@ "sizeInBytes": { "shape": "__long", "locationName": "sizeInBytes", - "documentation": "

The total storage size, in bytes, of the buckets.

If versioning is enabled for any of the buckets, Macie calculates this value based on the size of the latest version of each object in those buckets. This value doesn't reflect the storage size of all versions of the objects in the buckets.

" + "documentation": "

The total storage size, in bytes, of the buckets.

If versioning is enabled for any of the buckets, Amazon Macie calculates this value based on the size of the latest version of each object in those buckets. This value doesn't reflect the storage size of all versions of the objects in the buckets.

" }, "sizeInBytesCompressed": { "shape": "__long", "locationName": "sizeInBytesCompressed", - "documentation": "

The total storage size, in bytes, of the objects that are compressed (.gz, .gzip, .zip) files in the buckets.

If versioning is enabled for any of the buckets, Macie calculates this value based on the size of the latest version of each applicable object in those buckets. This value doesn't reflect the storage size of all versions of the applicable objects in the buckets.

" + "documentation": "

The total storage size, in bytes, of the objects that are compressed (.gz, .gzip, .zip) files in the buckets.

If versioning is enabled for any of the buckets, Amazon Macie calculates this value based on the size of the latest version of each applicable object in those buckets. This value doesn't reflect the storage size of all versions of the applicable objects in the buckets.

" }, "unclassifiableObjectCount": { "shape": "ObjectLevelStatistics", @@ -5863,6 +5880,16 @@ "locationName": "classifiableSizeInBytes", "documentation": "

The total storage size, in bytes, of the objects that Amazon Macie can analyze in the bucket. These objects use a supported storage class and have a file name extension for a supported file or storage format.

If versioning is enabled for the bucket, Macie calculates this value based on the size of the latest version of each applicable object in the bucket. This value doesn't reflect the storage size of all versions of each applicable object in the bucket.

" }, + "errorCode": { + "shape": "BucketMetadataErrorCode", + "locationName": "errorCode", + "documentation": "

Specifies the error code for an error that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. If this value is ACCESS_DENIED, Macie doesn't have permission to retrieve the information. For example, the bucket has a restrictive bucket policy and Amazon S3 denied the request. If this value is null, Macie was able to retrieve and process the information.

" + }, + "errorMessage": { + "shape": "__string", + "locationName": "errorMessage", + "documentation": "

A brief description of the error (errorCode) that prevented Amazon Macie from retrieving and processing information about the bucket and the bucket's objects. This value is null if Macie was able to retrieve and process the information.

" + }, "jobDetails": { "shape": "JobDetails", "locationName": "jobDetails", @@ -5886,7 +5913,7 @@ "sizeInBytesCompressed": { "shape": "__long", "locationName": "sizeInBytesCompressed", - "documentation": "

The total storage size, in bytes, of the objects that are compressed (.gz, .gzip, .zip) files in the bucket.

If versioning is enabled for the bucket, Macie calculates this value based on the size of the latest version of each applicable object in the bucket. This value doesn't reflect the storage size of all versions of each applicable object in the bucket.

" + "documentation": "

The total storage size, in bytes, of the objects that are compressed (.gz, .gzip, .zip) files in the bucket.

If versioning is enabled for the bucket, Amazon Macie calculates this value based on the size of the latest version of each applicable object in the bucket. This value doesn't reflect the storage size of all versions of each applicable object in the bucket.

" }, "unclassifiableObjectCount": { "shape": "ObjectLevelStatistics", @@ -5899,7 +5926,7 @@ "documentation": "

The total storage size, in bytes, of the objects that Amazon Macie can't analyze in the bucket. These objects don't use a supported storage class or don't have a file name extension for a supported file or storage format.

" } }, - "documentation": "

Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes.

" + "documentation": "

Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes for your account. If an error occurs when Macie attempts to retrieve and process information about the bucket or the bucket's objects, the value for most of these properties is null. Exceptions are accountId and bucketName. To identify the cause of the error, refer to the errorCode and errorMessage values.

" }, "MatchingResource": { "type": "structure", @@ -5910,7 +5937,7 @@ "documentation": "

The details of an S3 bucket that Amazon Macie monitors and analyzes.

" } }, - "documentation": "

Provides statistical data and other information about an Amazon Web Services resource that Amazon Macie monitors and analyzes.

" + "documentation": "

Provides statistical data and other information about an Amazon Web Services resource that Amazon Macie monitors and analyzes for your account.

" }, "MaxResults": { "type": "integer", @@ -5985,12 +6012,12 @@ "customerManaged": { "shape": "__long", "locationName": "customerManaged", - "documentation": "

The total number of objects that are encrypted with a customer-managed key. The objects use customer-provided server-side encryption (SSE-C).

" + "documentation": "

The total number of objects that are encrypted with a customer-provided key. The objects use customer-provided server-side encryption (SSE-C).

" }, "kmsManaged": { "shape": "__long", "locationName": "kmsManaged", - "documentation": "

The total number of objects that are encrypted with an Key Management Service (KMS) customer master key (CMK). The objects use Amazon Web Services managed KMS encryption (AWS-KMS) or customer managed KMS encryption (SSE-KMS).

" + "documentation": "

The total number of objects that are encrypted with an KMS key, either an Amazon Web Services managed key or a customer managed key. The objects use KMS encryption (SSE-KMS).

" }, "s3Managed": { "shape": "__long", @@ -6029,7 +6056,7 @@ "documentation": "

The total storage size (in bytes) or number of objects that Amazon Macie can't analyze because the objects use an unsupported storage class or don't have a file name extension for a supported file or storage format.

" } }, - "documentation": "

Provides information about the total storage size (in bytes) or number of objects that Amazon Macie can't analyze in one or more S3 buckets. In a BucketMetadata or MatchingBucket object, this data is for a specific bucket. In a GetBucketStatisticsResponse object, this data is aggregated for all the buckets in the query results. If versioning is enabled for a bucket, total storage size values are based on the size of the latest version of each applicable object in the bucket.

" + "documentation": "

Provides information about the total storage size (in bytes) or number of objects that Amazon Macie can't analyze in one or more S3 buckets. In a BucketMetadata or MatchingBucket object, this data is for a specific bucket. In a GetBucketStatisticsResponse object, this data is aggregated for the buckets in the query results. If versioning is enabled for a bucket, total storage size values are based on the size of the latest version of each applicable object in the bucket.

" }, "Occurrences": { "type": "structure", @@ -6387,7 +6414,7 @@ "kmsKeyArn": { "shape": "__string", "locationName": "kmsKeyArn", - "documentation": "

The Amazon Resource Name (ARN) of the Key Management Service (KMS) customer master key (CMK) to use for encryption of the results. This must be the ARN of an existing CMK that's in the same Amazon Web Services Region as the bucket.

" + "documentation": "

The Amazon Resource Name (ARN) of the KMS key to use for encryption of the results. This must be the ARN of an existing, symmetric, customer managed KMS key that's in the same Amazon Web Services Region as the bucket.

" } }, "documentation": "

Specifies an S3 bucket to store data classification results in, and the encryption settings to use when storing results in that bucket.

", @@ -6758,7 +6785,7 @@ "kmsMasterKeyId": { "shape": "__string", "locationName": "kmsMasterKeyId", - "documentation": "

The Amazon Resource Name (ARN) or unique identifier (key ID) for the Key Management Service (KMS) customer master key (CMK) that's used to encrypt data in the bucket or the object. If an KMS CMK isn't used, this value is null.

" + "documentation": "

The Amazon Resource Name (ARN) or unique identifier (key ID) for the KMS key that's used to encrypt data in the bucket or the object. This value is null if an KMS key isn't used to encrypt the data.

" } }, "documentation": "

Provides information about the server-side encryption settings for an S3 bucket or S3 object.

" diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index 8fc0481e..b902f062 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -2091,6 +2091,14 @@ "JOB" ] }, + "BurnInSubtitleStylePassthrough": { + "type": "string", + "documentation": "Ignore this setting unless your output captions are burned in. Choose which set of style and position values the service applies to your output captions. When you choose ENABLED, the service uses the input style and position information from your input. When you choose DISABLED, the service uses any style values that you specify in your output settings. If you don't specify values, the service uses default style and position values. When you choose DISABLED, the service ignores all style and position values from your input.", + "enum": [ + "ENABLED", + "DISABLED" + ] + }, "BurninDestinationSettings": { "type": "structure", "members": { @@ -2099,6 +2107,11 @@ "locationName": "alignment", "documentation": "If no explicit x_position or y_position is provided, setting alignment to centered will place the captions at the bottom center of the output. Similarly, setting a left alignment will align captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match." }, + "ApplyFontColor": { + "shape": "BurninSubtitleApplyFontColor", + "locationName": "applyFontColor", + "documentation": "Ignore this setting unless your input captions are STL, any type of 608, teletext, or TTML, and your output captions are burned in. Specify how the service applies the color specified in the setting Font color (BurninSubtitleFontColor). By default, this color is white. When you choose WHITE_TEXT_ONLY, the service uses the specified font color only for text that is white in the input. When you choose ALL_TEXT, the service uses the specified font color for all output captions text. If you leave both settings at their default value, your output font color is the same as your input font color." + }, "BackgroundColor": { "shape": "BurninSubtitleBackgroundColor", "locationName": "backgroundColor", @@ -2109,6 +2122,11 @@ "locationName": "backgroundOpacity", "documentation": "Specifies the opacity of the background rectangle. 255 is opaque; 0 is transparent. Leaving this parameter blank is equivalent to setting it to 0 (transparent). All burn-in and DVB-Sub font settings must match." }, + "FallbackFont": { + "shape": "BurninSubtitleFallbackFont", + "locationName": "fallbackFont", + "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you keep the default value, Best match (BEST_MATCH), MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." + }, "FontColor": { "shape": "BurninSubtitleFontColor", "locationName": "fontColor", @@ -2134,6 +2152,11 @@ "locationName": "fontSize", "documentation": "A positive integer indicates the exact font size in points. Set to 0 for automatic font size selection. All burn-in and DVB-Sub font settings must match." }, + "HexFontColor": { + "shape": "__stringMin6Max8Pattern09aFAF609aFAF2", + "locationName": "hexFontColor", + "documentation": "Ignore this setting unless your BurninSubtitleFontColor setting is HEX. Format is six or eight hexidecimal digits, representing the red, green, and blue components, with the two extra digits used for an optional alpha value. For example a value of 1122AABB is a red value of 0x11, a green value of 0x22, a blue value of 0xAA, and an alpha value of 0xBB." + }, "OutlineColor": { "shape": "BurninSubtitleOutlineColor", "locationName": "outlineColor", @@ -2164,6 +2187,11 @@ "locationName": "shadowYOffset", "documentation": "Specifies the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match." }, + "StylePassthrough": { + "shape": "BurnInSubtitleStylePassthrough", + "locationName": "stylePassthrough", + "documentation": "Ignore this setting unless your output captions are burned in. Choose which set of style and position values the service applies to your output captions. When you choose ENABLED, the service uses the input style and position information from your input. When you choose DISABLED, the service uses any style values that you specify in your output settings. If you don't specify values, the service uses default style and position values. When you choose DISABLED, the service ignores all style and position values from your input." + }, "TeletextSpacing": { "shape": "BurninSubtitleTeletextSpacing", "locationName": "teletextSpacing", @@ -2187,7 +2215,16 @@ "documentation": "If no explicit x_position or y_position is provided, setting alignment to centered will place the captions at the bottom center of the output. Similarly, setting a left alignment will align captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match.", "enum": [ "CENTERED", - "LEFT" + "LEFT", + "AUTO" + ] + }, + "BurninSubtitleApplyFontColor": { + "type": "string", + "documentation": "Ignore this setting unless your input captions are STL, any type of 608, teletext, or TTML, and your output captions are burned in. Specify how the service applies the color specified in the setting Font color (BurninSubtitleFontColor). By default, this color is white. When you choose WHITE_TEXT_ONLY, the service uses the specified font color only for text that is white in the input. When you choose ALL_TEXT, the service uses the specified font color for all output captions text. If you leave both settings at their default value, your output font color is the same as your input font color.", + "enum": [ + "WHITE_TEXT_ONLY", + "ALL_TEXT" ] }, "BurninSubtitleBackgroundColor": { @@ -2196,7 +2233,19 @@ "enum": [ "NONE", "BLACK", - "WHITE" + "WHITE", + "AUTO" + ] + }, + "BurninSubtitleFallbackFont": { + "type": "string", + "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you keep the default value, Best match (BEST_MATCH), MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", + "enum": [ + "BEST_MATCH", + "MONOSPACED_SANSSERIF", + "MONOSPACED_SERIF", + "PROPORTIONAL_SANSSERIF", + "PROPORTIONAL_SERIF" ] }, "BurninSubtitleFontColor": { @@ -2208,7 +2257,9 @@ "YELLOW", "RED", "GREEN", - "BLUE" + "BLUE", + "HEX", + "AUTO" ] }, "BurninSubtitleOutlineColor": { @@ -2220,7 +2271,8 @@ "YELLOW", "RED", "GREEN", - "BLUE" + "BLUE", + "AUTO" ] }, "BurninSubtitleShadowColor": { @@ -2229,7 +2281,8 @@ "enum": [ "NONE", "BLACK", - "WHITE" + "WHITE", + "AUTO" ] }, "BurninSubtitleTeletextSpacing": { @@ -2237,7 +2290,8 @@ "documentation": "Only applies to jobs with input captions in Teletext or STL formats. Specify whether the spacing between letters in your captions is set by the captions grid or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read if the captions are closed caption.", "enum": [ "FIXED_GRID", - "PROPORTIONAL" + "PROPORTIONAL", + "AUTO" ] }, "CancelJobRequest": { @@ -2628,6 +2682,11 @@ "locationName": "imageBasedTrickPlay", "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. When you enable Write HLS manifest (WriteHlsManifest), MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. When you enable Write DASH manifest (WriteDashManifest), MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" }, + "ImageBasedTrickPlaySettings": { + "shape": "CmafImageBasedTrickPlaySettings", + "locationName": "imageBasedTrickPlaySettings", + "documentation": "Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED" + }, "ManifestCompression": { "shape": "CmafManifestCompression", "locationName": "manifestCompression", @@ -2707,9 +2766,46 @@ "enum": [ "NONE", "THUMBNAIL", - "THUMBNAIL_AND_FULLFRAME" + "THUMBNAIL_AND_FULLFRAME", + "ADVANCED" ] }, + "CmafImageBasedTrickPlaySettings": { + "type": "structure", + "members": { + "IntervalCadence": { + "shape": "CmafIntervalCadence", + "locationName": "intervalCadence", + "documentation": "The cadence MediaConvert follows for generating thumbnails. If set to FOLLOW_IFRAME, MediaConvert generates thumbnails for each IDR frame in the output (matching the GOP cadence). If set to FOLLOW_CUSTOM, MediaConvert generates thumbnails according to the interval you specify in thumbnailInterval." + }, + "ThumbnailHeight": { + "shape": "__integerMin2Max4096", + "locationName": "thumbnailHeight", + "documentation": "Height of each thumbnail within each tile image, in pixels. Leave blank to maintain aspect ratio with thumbnail width. If following the aspect ratio would lead to a total tile height greater than 4096, then the job will be rejected. Must be divisible by 2." + }, + "ThumbnailInterval": { + "shape": "__doubleMin0Max2147483647", + "locationName": "thumbnailInterval", + "documentation": "Enter the interval, in seconds, that MediaConvert uses to generate thumbnails. If the interval you enter doesn't align with the output frame rate, MediaConvert automatically rounds the interval to align with the output frame rate. For example, if the output frame rate is 29.97 frames per second and you enter 5, MediaConvert uses a 150 frame interval to generate thumbnails." + }, + "ThumbnailWidth": { + "shape": "__integerMin8Max4096", + "locationName": "thumbnailWidth", + "documentation": "Width of each thumbnail within each tile image, in pixels. Default is 312. Must be divisible by 8." + }, + "TileHeight": { + "shape": "__integerMin1Max2048", + "locationName": "tileHeight", + "documentation": "Number of thumbnails in each column of a tile image. Set a value between 2 and 2048. Must be divisible by 2." + }, + "TileWidth": { + "shape": "__integerMin1Max512", + "locationName": "tileWidth", + "documentation": "Number of thumbnails in each row of a tile image. Set a value between 1 and 512." + } + }, + "documentation": "Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED" + }, "CmafInitializationVectorInManifest": { "type": "string", "documentation": "When you use DRM with CMAF outputs, choose whether the service writes the 128-bit encryption initialization vector in the HLS and DASH manifests.", @@ -2718,6 +2814,14 @@ "EXCLUDE" ] }, + "CmafIntervalCadence": { + "type": "string", + "documentation": "The cadence MediaConvert follows for generating thumbnails. If set to FOLLOW_IFRAME, MediaConvert generates thumbnails for each IDR frame in the output (matching the GOP cadence). If set to FOLLOW_CUSTOM, MediaConvert generates thumbnails according to the interval you specify in thumbnailInterval.", + "enum": [ + "FOLLOW_IFRAME", + "FOLLOW_CUSTOM" + ] + }, "CmafKeyProviderType": { "type": "string", "documentation": "Specify whether your DRM encryption key is static or from a key provider that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html.", @@ -3419,6 +3523,11 @@ "locationName": "imageBasedTrickPlay", "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert adds an entry in the .mpd manifest for each set of images that you generate. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" }, + "ImageBasedTrickPlaySettings": { + "shape": "DashIsoImageBasedTrickPlaySettings", + "locationName": "imageBasedTrickPlaySettings", + "documentation": "Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED" + }, "MinBufferTime": { "shape": "__integerMin0Max2147483647", "locationName": "minBufferTime", @@ -3476,7 +3585,52 @@ "enum": [ "NONE", "THUMBNAIL", - "THUMBNAIL_AND_FULLFRAME" + "THUMBNAIL_AND_FULLFRAME", + "ADVANCED" + ] + }, + "DashIsoImageBasedTrickPlaySettings": { + "type": "structure", + "members": { + "IntervalCadence": { + "shape": "DashIsoIntervalCadence", + "locationName": "intervalCadence", + "documentation": "The cadence MediaConvert follows for generating thumbnails. If set to FOLLOW_IFRAME, MediaConvert generates thumbnails for each IDR frame in the output (matching the GOP cadence). If set to FOLLOW_CUSTOM, MediaConvert generates thumbnails according to the interval you specify in thumbnailInterval." + }, + "ThumbnailHeight": { + "shape": "__integerMin1Max4096", + "locationName": "thumbnailHeight", + "documentation": "Height of each thumbnail within each tile image, in pixels. Leave blank to maintain aspect ratio with thumbnail width. If following the aspect ratio would lead to a total tile height greater than 4096, then the job will be rejected. Must be divisible by 2." + }, + "ThumbnailInterval": { + "shape": "__doubleMin0Max2147483647", + "locationName": "thumbnailInterval", + "documentation": "Enter the interval, in seconds, that MediaConvert uses to generate thumbnails. If the interval you enter doesn't align with the output frame rate, MediaConvert automatically rounds the interval to align with the output frame rate. For example, if the output frame rate is 29.97 frames per second and you enter 5, MediaConvert uses a 150 frame interval to generate thumbnails." + }, + "ThumbnailWidth": { + "shape": "__integerMin8Max4096", + "locationName": "thumbnailWidth", + "documentation": "Width of each thumbnail within each tile image, in pixels. Default is 312. Must be divisible by 8." + }, + "TileHeight": { + "shape": "__integerMin1Max2048", + "locationName": "tileHeight", + "documentation": "Number of thumbnails in each column of a tile image. Set a value between 2 and 2048. Must be divisible by 2." + }, + "TileWidth": { + "shape": "__integerMin1Max512", + "locationName": "tileWidth", + "documentation": "Number of thumbnails in each row of a tile image. Set a value between 1 and 512." + } + }, + "documentation": "Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED" + }, + "DashIsoIntervalCadence": { + "type": "string", + "documentation": "The cadence MediaConvert follows for generating thumbnails. If set to FOLLOW_IFRAME, MediaConvert generates thumbnails for each IDR frame in the output (matching the GOP cadence). If set to FOLLOW_CUSTOM, MediaConvert generates thumbnails according to the interval you specify in thumbnailInterval.", + "enum": [ + "FOLLOW_IFRAME", + "FOLLOW_CUSTOM" ] }, "DashIsoMpdProfile": { @@ -3831,6 +3985,11 @@ "locationName": "alignment", "documentation": "If no explicit x_position or y_position is provided, setting alignment to centered will place the captions at the bottom center of the output. Similarly, setting a left alignment will align captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match." }, + "ApplyFontColor": { + "shape": "DvbSubtitleApplyFontColor", + "locationName": "applyFontColor", + "documentation": "Ignore this setting unless your input captions are STL, any type of 608, teletext, or TTML, and your output captions are DVB-SUB. Specify how the service applies the color specified in the setting Font color (DvbSubtitleFontColor). By default, this color is white. When you choose WHITE_TEXT_ONLY, the service uses the specified font color only for text that is white in the input. When you choose ALL_TEXT, the service uses the specified font color for all output captions text. If you leave both settings at their default value, your output font color is the same as your input font color." + }, "BackgroundColor": { "shape": "DvbSubtitleBackgroundColor", "locationName": "backgroundColor", @@ -3856,6 +4015,11 @@ "locationName": "ddsYCoordinate", "documentation": "Use this setting, along with DDS x-coordinate (ddsXCoordinate), to specify the upper left corner of the display definition segment (DDS) display window. With this setting, specify the distance, in pixels, between the top of the frame and the top of the DDS display window. Keep the default value, 0, to have MediaConvert automatically choose this offset. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). MediaConvert uses these values to determine whether to write page position data to the DDS or to the page composition segment (PCS). All burn-in and DVB-Sub font settings must match." }, + "FallbackFont": { + "shape": "DvbSubSubtitleFallbackFont", + "locationName": "fallbackFont", + "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you keep the default value, Best match (BEST_MATCH), MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input." + }, "FontColor": { "shape": "DvbSubtitleFontColor", "locationName": "fontColor", @@ -3886,6 +4050,11 @@ "locationName": "height", "documentation": "Specify the height, in pixels, of this set of DVB-Sub captions. The default value is 576 pixels. Related setting: When you use this setting, you must set DDS handling (ddsHandling) to a value other than None (NONE). All burn-in and DVB-Sub font settings must match." }, + "HexFontColor": { + "shape": "__stringMin6Max8Pattern09aFAF609aFAF2", + "locationName": "hexFontColor", + "documentation": "Ignore this setting unless your DvbSubtitleFontColor setting is HEX. Format is six or eight hexidecimal digits, representing the red, green, and blue components, with the two extra digits used for an optional alpha value. For example a value of 1122AABB is a red value of 0x11, a green value of 0x22, a blue value of 0xAA, and an alpha value of 0xBB." + }, "OutlineColor": { "shape": "DvbSubtitleOutlineColor", "locationName": "outlineColor", @@ -3916,6 +4085,11 @@ "locationName": "shadowYOffset", "documentation": "Specifies the vertical offset of the shadow relative to the captions in pixels. A value of -2 would result in a shadow offset 2 pixels above the text. All burn-in and DVB-Sub font settings must match." }, + "StylePassthrough": { + "shape": "DvbSubtitleStylePassthrough", + "locationName": "stylePassthrough", + "documentation": "Choose which set of style and position values the service applies to your output captions. When you choose ENABLED, the service uses the input style and position information from your input. When you choose DISABLED, the service uses any style values that you specify in your output settings. If you don't specify values, the service uses default style and position values. When you choose DISABLED, the service ignores all style and position values from your input." + }, "SubtitlingType": { "shape": "DvbSubtitlingType", "locationName": "subtitlingType", @@ -3955,12 +4129,32 @@ }, "documentation": "DVB Sub Source Settings" }, + "DvbSubSubtitleFallbackFont": { + "type": "string", + "documentation": "Specify the font that you want the service to use for your burn in captions when your input captions specify a font that MediaConvert doesn't support. When you keep the default value, Best match (BEST_MATCH), MediaConvert uses a supported font that most closely matches the font that your input captions specify. When there are multiple unsupported fonts in your input captions, MediaConvert matches each font with the supported font that matches best. When you explicitly choose a replacement font, MediaConvert uses that font to replace all unsupported fonts from your input.", + "enum": [ + "BEST_MATCH", + "MONOSPACED_SANSSERIF", + "MONOSPACED_SERIF", + "PROPORTIONAL_SANSSERIF", + "PROPORTIONAL_SERIF" + ] + }, "DvbSubtitleAlignment": { "type": "string", "documentation": "If no explicit x_position or y_position is provided, setting alignment to centered will place the captions at the bottom center of the output. Similarly, setting a left alignment will align captions to the bottom left of the output. If x and y positions are given in conjunction with the alignment parameter, the font will be justified (either left or centered) relative to those coordinates. This option is not valid for source captions that are STL, 608/embedded or teletext. These source settings are already pre-defined by the caption stream. All burn-in and DVB-Sub font settings must match.", "enum": [ "CENTERED", - "LEFT" + "LEFT", + "AUTO" + ] + }, + "DvbSubtitleApplyFontColor": { + "type": "string", + "documentation": "Ignore this setting unless your input captions are STL, any type of 608, teletext, or TTML, and your output captions are DVB-SUB. Specify how the service applies the color specified in the setting Font color (DvbSubtitleFontColor). By default, this color is white. When you choose WHITE_TEXT_ONLY, the service uses the specified font color only for text that is white in the input. When you choose ALL_TEXT, the service uses the specified font color for all output captions text. If you leave both settings at their default value, your output font color is the same as your input font color.", + "enum": [ + "WHITE_TEXT_ONLY", + "ALL_TEXT" ] }, "DvbSubtitleBackgroundColor": { @@ -3969,7 +4163,8 @@ "enum": [ "NONE", "BLACK", - "WHITE" + "WHITE", + "AUTO" ] }, "DvbSubtitleFontColor": { @@ -3981,7 +4176,9 @@ "YELLOW", "RED", "GREEN", - "BLUE" + "BLUE", + "HEX", + "AUTO" ] }, "DvbSubtitleOutlineColor": { @@ -3993,7 +4190,8 @@ "YELLOW", "RED", "GREEN", - "BLUE" + "BLUE", + "AUTO" ] }, "DvbSubtitleShadowColor": { @@ -4002,7 +4200,16 @@ "enum": [ "NONE", "BLACK", - "WHITE" + "WHITE", + "AUTO" + ] + }, + "DvbSubtitleStylePassthrough": { + "type": "string", + "documentation": "Choose which set of style and position values the service applies to your output captions. When you choose ENABLED, the service uses the input style and position information from your input. When you choose DISABLED, the service uses any style values that you specify in your output settings. If you don't specify values, the service uses default style and position values. When you choose DISABLED, the service ignores all style and position values from your input.", + "enum": [ + "ENABLED", + "DISABLED" ] }, "DvbSubtitleTeletextSpacing": { @@ -4010,7 +4217,8 @@ "documentation": "Only applies to jobs with input captions in Teletext or STL formats. Specify whether the spacing between letters in your captions is set by the captions grid or varies depending on letter width. Choose fixed grid to conform to the spacing specified in the captions file more accurately. Choose proportional to make the text easier to read if the captions are closed caption.", "enum": [ "FIXED_GRID", - "PROPORTIONAL" + "PROPORTIONAL", + "AUTO" ] }, "DvbSubtitlingType": { @@ -4601,7 +4809,7 @@ "documentation": "The action to take on content advisory XDS packets. If you select PASSTHROUGH, packets will not be changed. If you select STRIP, any packets will be removed in output captions." } }, - "documentation": "Hexadecimal value as per EIA-608 Line 21 Data Services, section 9.5.1.5 05h Content Advisory." + "documentation": "If your source content has EIA-608 Line 21 Data Services, enable this feature to specify what MediaConvert does with the Extended Data Services (XDS) packets. You can choose to pass through XDS packets, or remove them from the output. For more information about XDS, see EIA-608 Line Data Services, section 9.5.1.5 05h Content Advisory." }, "F4vMoovPlacement": { "type": "string", @@ -6042,6 +6250,11 @@ "locationName": "imageBasedTrickPlay", "documentation": "Specify whether MediaConvert generates images for trick play. Keep the default value, None (NONE), to not generate any images. Choose Thumbnail (THUMBNAIL) to generate tiled thumbnails. Choose Thumbnail and full frame (THUMBNAIL_AND_FULLFRAME) to generate tiled thumbnails and full-resolution images of single frames. MediaConvert creates a child manifest for each set of images that you generate and adds corresponding entries to the parent manifest. A common application for these images is Roku trick mode. The thumbnails and full-frame images that MediaConvert creates with this feature are compatible with this Roku specification: https://developer.roku.com/docs/developer-program/media-playback/trick-mode/hls-and-dash.md" }, + "ImageBasedTrickPlaySettings": { + "shape": "HlsImageBasedTrickPlaySettings", + "locationName": "imageBasedTrickPlaySettings", + "documentation": "Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED" + }, "ManifestCompression": { "shape": "HlsManifestCompression", "locationName": "manifestCompression", @@ -6139,9 +6352,46 @@ "enum": [ "NONE", "THUMBNAIL", - "THUMBNAIL_AND_FULLFRAME" + "THUMBNAIL_AND_FULLFRAME", + "ADVANCED" ] }, + "HlsImageBasedTrickPlaySettings": { + "type": "structure", + "members": { + "IntervalCadence": { + "shape": "HlsIntervalCadence", + "locationName": "intervalCadence", + "documentation": "The cadence MediaConvert follows for generating thumbnails. If set to FOLLOW_IFRAME, MediaConvert generates thumbnails for each IDR frame in the output (matching the GOP cadence). If set to FOLLOW_CUSTOM, MediaConvert generates thumbnails according to the interval you specify in thumbnailInterval." + }, + "ThumbnailHeight": { + "shape": "__integerMin2Max4096", + "locationName": "thumbnailHeight", + "documentation": "Height of each thumbnail within each tile image, in pixels. Leave blank to maintain aspect ratio with thumbnail width. If following the aspect ratio would lead to a total tile height greater than 4096, then the job will be rejected. Must be divisible by 2." + }, + "ThumbnailInterval": { + "shape": "__doubleMin0Max2147483647", + "locationName": "thumbnailInterval", + "documentation": "Enter the interval, in seconds, that MediaConvert uses to generate thumbnails. If the interval you enter doesn't align with the output frame rate, MediaConvert automatically rounds the interval to align with the output frame rate. For example, if the output frame rate is 29.97 frames per second and you enter 5, MediaConvert uses a 150 frame interval to generate thumbnails." + }, + "ThumbnailWidth": { + "shape": "__integerMin8Max4096", + "locationName": "thumbnailWidth", + "documentation": "Width of each thumbnail within each tile image, in pixels. Default is 312. Must be divisible by 8." + }, + "TileHeight": { + "shape": "__integerMin1Max2048", + "locationName": "tileHeight", + "documentation": "Number of thumbnails in each column of a tile image. Set a value between 2 and 2048. Must be divisible by 2." + }, + "TileWidth": { + "shape": "__integerMin1Max512", + "locationName": "tileWidth", + "documentation": "Number of thumbnails in each row of a tile image. Set a value between 1 and 512." + } + }, + "documentation": "Tile and thumbnail settings applicable when imageBasedTrickPlay is ADVANCED" + }, "HlsInitializationVectorInManifest": { "type": "string", "documentation": "The Initialization Vector is a 128-bit number used in conjunction with the key for encrypting blocks. If set to INCLUDE, Initialization Vector is listed in the manifest. Otherwise Initialization Vector is not in the manifest.", @@ -6150,6 +6400,14 @@ "EXCLUDE" ] }, + "HlsIntervalCadence": { + "type": "string", + "documentation": "The cadence MediaConvert follows for generating thumbnails. If set to FOLLOW_IFRAME, MediaConvert generates thumbnails for each IDR frame in the output (matching the GOP cadence). If set to FOLLOW_CUSTOM, MediaConvert generates thumbnails according to the interval you specify in thumbnailInterval.", + "enum": [ + "FOLLOW_IFRAME", + "FOLLOW_CUSTOM" + ] + }, "HlsKeyProviderType": { "type": "string", "documentation": "Specify whether your DRM encryption key is static or from a key provider that follows the SPEKE standard. For more information about SPEKE, see https://docs.aws.amazon.com/speke/latest/documentation/what-is-speke.html.", @@ -6933,7 +7191,7 @@ "ExtendedDataServices": { "shape": "ExtendedDataServices", "locationName": "extendedDataServices", - "documentation": "Hexadecimal value as per EIA-608 Line 21 Data Services, section 9.5.1.5 05h Content Advisory." + "documentation": "If your source content has EIA-608 Line 21 Data Services, enable this feature to specify what MediaConvert does with the Extended Data Services (XDS) packets. You can choose to pass through XDS packets, or remove them from the output. For more information about XDS, see EIA-608 Line Data Services, section 9.5.1.5 05h Content Advisory." }, "Inputs": { "shape": "__listOfInput", @@ -7094,7 +7352,7 @@ "ExtendedDataServices": { "shape": "ExtendedDataServices", "locationName": "extendedDataServices", - "documentation": "Hexadecimal value as per EIA-608 Line 21 Data Services, section 9.5.1.5 05h Content Advisory." + "documentation": "If your source content has EIA-608 Line 21 Data Services, enable this feature to specify what MediaConvert does with the Extended Data Services (XDS) packets. You can choose to pass through XDS packets, or remove them from the output. For more information about XDS, see EIA-608 Line Data Services, section 9.5.1.5 05h Content Advisory." }, "Inputs": { "shape": "__listOfInputTemplate", @@ -11922,6 +12180,11 @@ "min": 1, "max": 20 }, + "__integerMin1Max2048": { + "type": "integer", + "min": 1, + "max": 2048 + }, "__integerMin1Max2147483640": { "type": "integer", "min": 1, @@ -11947,6 +12210,16 @@ "min": 1, "max": 4 }, + "__integerMin1Max4096": { + "type": "integer", + "min": 1, + "max": 4096 + }, + "__integerMin1Max512": { + "type": "integer", + "min": 1, + "max": 512 + }, "__integerMin1Max6": { "type": "integer", "min": 1, @@ -11987,6 +12260,11 @@ "min": 2, "max": 2147483647 }, + "__integerMin2Max4096": { + "type": "integer", + "min": 2, + "max": 4096 + }, "__integerMin32000Max192000": { "type": "integer", "min": 32000, @@ -12057,6 +12335,11 @@ "min": 8, "max": 12 }, + "__integerMin8Max4096": { + "type": "integer", + "min": 8, + "max": 4096 + }, "__integerMin96Max600": { "type": "integer", "min": 96, @@ -12469,6 +12752,12 @@ "max": 3, "pattern": "^[A-Za-z]{3}$" }, + "__stringMin6Max8Pattern09aFAF609aFAF2": { + "type": "string", + "min": 6, + "max": 8, + "pattern": "^[0-9a-fA-F]{6}([0-9a-fA-F]{2})?$" + }, "__stringMin9Max19PatternAZ26EastWestCentralNorthSouthEastWest1912": { "type": "string", "min": 9, diff --git a/botocore/data/network-firewall/2020-11-12/service-2.json b/botocore/data/network-firewall/2020-11-12/service-2.json index 03520551..ac8b05d0 100644 --- a/botocore/data/network-firewall/2020-11-12/service-2.json +++ b/botocore/data/network-firewall/2020-11-12/service-2.json @@ -151,7 +151,8 @@ {"shape":"InvalidRequestException"}, {"shape":"InternalServerError"}, {"shape":"ResourceNotFoundException"}, - {"shape":"ThrottlingException"} + {"shape":"ThrottlingException"}, + {"shape":"InvalidResourcePolicyException"} ], "documentation":"

Deletes a resource policy that you created in a PutResourcePolicy request.

" }, @@ -531,7 +532,7 @@ "type":"string", "max":255, "min":1, - "pattern":"^([a-fA-F\\d:\\.]+/\\d{1,3})$" + "pattern":"^([a-fA-F\\d:\\.]+($|/\\d{1,3}))$" }, "Addresses":{ "type":"list", @@ -1264,7 +1265,15 @@ }, "StatefulRuleGroupReferences":{ "shape":"StatefulRuleGroupReferences", - "documentation":"

References to the stateless rule groups that are used in the policy. These define the inspection criteria in stateful rules.

" + "documentation":"

References to the stateful rule groups that are used in the policy. These define the inspection criteria in stateful rules.

" + }, + "StatefulDefaultActions":{ + "shape":"StatefulActions", + "documentation":"

The default actions to take on a packet that doesn't match any stateful rules.

" + }, + "StatefulEngineOptions":{ + "shape":"StatefulEngineOptions", + "documentation":"

Additional options governing how Network Firewall handles stateful rules. The stateful rule groups that you use in your policy must have stateful rule options settings that are compatible with these settings.

" } }, "documentation":"

The firewall policy defines the behavior of a firewall using a collection of stateless and stateful rule groups and other settings. You can use one firewall policy for multiple firewalls.

This, along with FirewallPolicyResponse, define the policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

" @@ -1314,6 +1323,18 @@ "Tags":{ "shape":"TagList", "documentation":"

The key:value pairs to associate with the resource.

" + }, + "ConsumedStatelessRuleCapacity":{ + "shape":"RuleCapacity", + "documentation":"

The number of capacity units currently consumed by the policy's stateless rules.

" + }, + "ConsumedStatefulRuleCapacity":{ + "shape":"RuleCapacity", + "documentation":"

The number of capacity units currently consumed by the policy's stateful rules.

" + }, + "NumberOfAssociations":{ + "shape":"NumberOfAssociations", + "documentation":"

The number of firewalls that are associated with this firewall policy.

" } }, "documentation":"

The high-level properties of a firewall policy. This, along with the FirewallPolicy, define the policy. You can retrieve all objects for a firewall policy by calling DescribeFirewallPolicy.

" @@ -1396,7 +1417,7 @@ }, "SourcePort":{ "shape":"Port", - "documentation":"

The source port to inspect for. You can specify an individual port, for example 1994 and you can specify a port range, for example 1990-1994. To match with any port, specify ANY.

" + "documentation":"

The source port to inspect for. You can specify an individual port, for example 1994 and you can specify a port range, for example 1990:1994. To match with any port, specify ANY.

" }, "Direction":{ "shape":"StatefulRuleDirection", @@ -1408,10 +1429,10 @@ }, "DestinationPort":{ "shape":"Port", - "documentation":"

The destination port to inspect for. You can specify an individual port, for example 1994 and you can specify a port range, for example 1990-1994. To match with any port, specify ANY.

" + "documentation":"

The destination port to inspect for. You can specify an individual port, for example 1994 and you can specify a port range, for example 1990:1994. To match with any port, specify ANY.

" } }, - "documentation":"

The 5-tuple criteria for AWS Network Firewall to use to inspect packet headers in stateful traffic flow inspection. Traffic flows that match the criteria are a match for the corresponding StatefulRule.

" + "documentation":"

The basic rule criteria for AWS Network Firewall to use to inspect packet headers in stateful traffic flow inspection. Traffic flows that match the criteria are a match for the corresponding StatefulRule.

" }, "IPSet":{ "type":"structure", @@ -1468,7 +1489,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

", + "documentation":"

The policy statement failed validation.

", "exception":true }, "InvalidTokenException":{ @@ -1688,11 +1709,11 @@ }, "SourcePorts":{ "shape":"PortRanges", - "documentation":"

The source ports to inspect for. If not specified, this matches with any source port. This setting is only used for protocols 6 (TCP) and 17 (UDP).

You can specify individual ports, for example 1994 and you can specify port ranges, for example 1990-1994.

" + "documentation":"

The source ports to inspect for. If not specified, this matches with any source port. This setting is only used for protocols 6 (TCP) and 17 (UDP).

You can specify individual ports, for example 1994 and you can specify port ranges, for example 1990:1994.

" }, "DestinationPorts":{ "shape":"PortRanges", - "documentation":"

The destination ports to inspect for. If not specified, this matches with any destination port. This setting is only used for protocols 6 (TCP) and 17 (UDP).

You can specify individual ports, for example 1994 and you can specify port ranges, for example 1990-1994.

" + "documentation":"

The destination ports to inspect for. If not specified, this matches with any destination port. This setting is only used for protocols 6 (TCP) and 17 (UDP).

You can specify individual ports, for example 1994 and you can specify port ranges, for example 1990:1994.

" }, "Protocols":{ "shape":"ProtocolNumbers", @@ -1705,6 +1726,7 @@ }, "documentation":"

Criteria for Network Firewall to use to inspect an individual packet in stateless rule inspection. Each match attributes set can include one or more items such as IP address, CIDR range, port number, protocol, and TCP flags.

" }, + "NumberOfAssociations":{"type":"integer"}, "PaginationMaxResults":{ "type":"integer", "max":100, @@ -1869,7 +1891,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

", + "documentation":"

Unable to change the resource because your account doesn't own it.

", "exception":true }, "ResourceStatus":{ @@ -1909,6 +1931,10 @@ "RulesSource":{ "shape":"RulesSource", "documentation":"

The stateful rules or stateless rules for the rule group.

" + }, + "StatefulRuleOptions":{ + "shape":"StatefulRuleOptions", + "documentation":"

Additional options governing how Network Firewall handles stateful rules. The policies where you use your stateful rule group must have stateful rule options settings that are compatible with these settings.

" } }, "documentation":"

The object that defines the rules in a rule group. This, along with RuleGroupResponse, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

AWS Network Firewall uses a rule group to inspect and control network traffic. You define stateless rule groups to inspect individual packets and you define stateful rule groups to inspect packets in the context of their traffic flow.

To use a rule group, you include it by reference in an Network Firewall firewall policy, then you use the policy in a firewall. You can reference a rule group from more than one firewall policy, and you can use a firewall policy in more than one firewall.

" @@ -1966,6 +1992,14 @@ "Tags":{ "shape":"TagList", "documentation":"

The key:value pairs to associate with the resource.

" + }, + "ConsumedCapacity":{ + "shape":"RuleCapacity", + "documentation":"

The number of capacity units currently consumed by the rule group rules.

" + }, + "NumberOfAssociations":{ + "shape":"NumberOfAssociations", + "documentation":"

The number of firewall policies that use this rule group.

" } }, "documentation":"

The high-level properties of a rule group. This, along with the RuleGroup, define the rule group. You can retrieve all objects for a rule group by calling DescribeRuleGroup.

" @@ -2000,6 +2034,13 @@ "type":"list", "member":{"shape":"RuleOption"} }, + "RuleOrder":{ + "type":"string", + "enum":[ + "DEFAULT_ACTION_ORDER", + "STRICT_ORDER" + ] + }, "RuleTargets":{ "type":"list", "member":{"shape":"CollectionMember_String"} @@ -2037,7 +2078,7 @@ }, "StatefulRules":{ "shape":"StatefulRules", - "documentation":"

The 5-tuple stateful inspection criteria. This contains an array of individual 5-tuple stateful rules to be used together in a stateful rule group.

" + "documentation":"

An array of individual stateful rules inspection criteria to be used together in a stateful rule group. Use this option to specify simple Suricata rules with protocol, source and destination, ports, direction, and rule options. For information about the Suricata Rules format, see Rules Format.

" }, "StatelessRulesAndCustomActions":{ "shape":"StatelessRulesAndCustomActions", @@ -2060,18 +2101,18 @@ }, "TargetTypes":{ "shape":"TargetTypes", - "documentation":"

The protocols you want to inspect. Specify TLS_SNI for HTTPS. Specity HTTP_HOST for HTTP. You can specify either or both.

" + "documentation":"

The protocols you want to inspect. Specify TLS_SNI for HTTPS. Specify HTTP_HOST for HTTP. You can specify either or both.

" }, "GeneratedRulesType":{ "shape":"GeneratedRulesType", "documentation":"

Whether you want to allow or deny access to the domains in your target list.

" } }, - "documentation":"

Stateful inspection criteria for a domain list rule group.

For HTTPS traffic, domain filtering is SNI-based. It uses the server name indicator extension of the TLS handshake.

By default, Network Firewall domain list inspection only includes traffic coming from the VPC where you deploy the firewall. To inspect traffic from IP addresses outside of the deployment VPC, you set the HOME_NET rule variable to include the CIDR range of the deployment VPC plus the other CIDR ranges. For more information, see RuleVariables in this guide and Stateful domain list rule groups in AWS Network Firewall in the Network Firewall Developer Guide

" + "documentation":"

Stateful inspection criteria for a domain list rule group.

For HTTPS traffic, domain filtering is SNI-based. It uses the server name indicator extension of the TLS handshake.

By default, Network Firewall domain list inspection only includes traffic coming from the VPC where you deploy the firewall. To inspect traffic from IP addresses outside of the deployment VPC, you set the HOME_NET rule variable to include the CIDR range of the deployment VPC plus the other CIDR ranges. For more information, see RuleVariables in this guide and Stateful domain list rule groups in AWS Network Firewall in the Network Firewall Developer Guide.

" }, "RulesString":{ "type":"string", - "max":1000000, + "max":2000000, "min":0 }, "Setting":{ @@ -2098,6 +2139,20 @@ "ALERT" ] }, + "StatefulActions":{ + "type":"list", + "member":{"shape":"CollectionMember_String"} + }, + "StatefulEngineOptions":{ + "type":"structure", + "members":{ + "RuleOrder":{ + "shape":"RuleOrder", + "documentation":"

Indicates how to manage the order of stateful rule evaluation for the policy. By default, Network Firewall leaves the rule evaluation order up to the Suricata rule processing engine. If you set this to STRICT_ORDER, your rules are evaluated in the exact order that you provide them in the policy. With strict ordering, the rule groups are evaluated by order of priority, starting from the lowest number, and the rules in each rule group are processed in the order that they're defined.

" + } + }, + "documentation":"

Configuration settings for the handling of the stateful rule groups in a firewall policy.

" + }, "StatefulRule":{ "type":"structure", "required":[ @@ -2112,14 +2167,14 @@ }, "Header":{ "shape":"Header", - "documentation":"

The stateful 5-tuple inspection criteria for this rule, used to inspect traffic flows.

" + "documentation":"

The stateful inspection criteria for this rule, used to inspect traffic flows.

" }, "RuleOptions":{ "shape":"RuleOptions", - "documentation":"

" + "documentation":"

Additional options for the rule. These are the Suricata RuleOptions settings.

" } }, - "documentation":"

A single 5-tuple stateful rule, for use in a stateful rule group.

" + "documentation":"

A single Suricata rules specification, for use in a stateful rule group. Use this option to specify a simple Suricata rule with protocol, source and destination, ports, direction, and rule options. For information about the Suricata Rules format, see Rules Format.

" }, "StatefulRuleDirection":{ "type":"string", @@ -2135,6 +2190,11 @@ "ResourceArn":{ "shape":"ResourceArn", "documentation":"

The Amazon Resource Name (ARN) of the stateful rule group.

" + }, + "Priority":{ + "shape":"Priority", + "documentation":"

An integer setting that indicates the order in which to run the stateful rule groups in a single FirewallPolicy. This setting only applies to firewall policies that specify the STRICT_ORDER rule order in the stateful engine options settings.

Network Firewall evalutes each stateful rule group against a packet starting with the group that has the lowest priority setting. You must ensure that the priority settings are unique within each policy.

You can change the priority settings of your rule groups at any time. To make it easier to insert rule groups later, number them so there's a wide range in between, for example use 100, 200, and so on.

", + "box":true } }, "documentation":"

Identifier for a single stateful rule group, used in a firewall policy to refer to a rule group.

" @@ -2143,6 +2203,16 @@ "type":"list", "member":{"shape":"StatefulRuleGroupReference"} }, + "StatefulRuleOptions":{ + "type":"structure", + "members":{ + "RuleOrder":{ + "shape":"RuleOrder", + "documentation":"

Indicates how to manage the order of the rule evaluation for the rule group. By default, Network Firewall leaves the rule evaluation order up to the Suricata rule processing engine. If you set this to STRICT_ORDER, your rules are evaluated in the exact order that they're listed in your Suricata rules string.

" + } + }, + "documentation":"

Additional options governing how Network Firewall handles the rule group. You can only use these for stateful rule groups.

" + }, "StatefulRuleProtocol":{ "type":"string", "enum":[ @@ -2188,7 +2258,7 @@ }, "Priority":{ "shape":"Priority", - "documentation":"

A setting that indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group. Network Firewall evaluates the rules in a rule group starting with the lowest priority setting. You must ensure that the priority settings are unique for the rule group.

Each stateless rule group uses exactly one StatelessRulesAndCustomActions object, and each StatelessRulesAndCustomActions contains exactly one StatelessRules object. To ensure unique priority settings for your rule groups, set unique priorities for the stateless rules that you define inside any single StatelessRules object.

You can change the priority settings of your rules at any time. To make it easier to insert rules later, number them so there's a wide range in between, for example use 100, 200, and so on.

" + "documentation":"

Indicates the order in which to run this rule relative to all of the rules that are defined for a stateless rule group. Network Firewall evaluates the rules in a rule group starting with the lowest priority setting. You must ensure that the priority settings are unique for the rule group.

Each stateless rule group uses exactly one StatelessRulesAndCustomActions object, and each StatelessRulesAndCustomActions contains exactly one StatelessRules object. To ensure unique priority settings for your rule groups, set unique priorities for the stateless rules that you define inside any single StatelessRules object.

You can change the priority settings of your rules at any time. To make it easier to insert rules later, number them so there's a wide range in between, for example use 100, 200, and so on.

" } }, "documentation":"

A single stateless rule. This is used in StatelessRulesAndCustomActions.

" @@ -2756,5 +2826,5 @@ "member":{"shape":"VpcId"} } }, - "documentation":"

This is the API Reference for AWS Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors.

  • The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the AWS REST APIs, see AWS APIs.

    To access Network Firewall using the REST API endpoint: https://network-firewall.<region>.amazonaws.com

  • Alternatively, you can use one of the AWS SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

  • For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide.

Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or AWS Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source intrusion detection system (IDS) engine. For information about Suricata, see the Suricata website.

You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples:

  • Allow domains or IP addresses for known AWS service endpoints, such as Amazon S3, and block all other forms of traffic.

  • Use custom lists of known bad domains to limit the types of domain names that your applications can access.

  • Perform deep packet inspection on traffic entering or leaving your VPC.

  • Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used.

To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

To start using Network Firewall, do the following:

  1. (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC.

  2. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall.

  3. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have.

  4. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior.

  5. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy.

  6. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints.

" + "documentation":"

This is the API Reference for AWS Network Firewall. This guide is for developers who need detailed information about the Network Firewall API actions, data types, and errors.

  • The REST API requires you to handle connection details, such as calculating signatures, handling request retries, and error handling. For general information about using the AWS REST APIs, see AWS APIs.

    To access Network Firewall using the REST API endpoint: https://network-firewall.<region>.amazonaws.com

  • Alternatively, you can use one of the AWS SDKs to access an API that's tailored to the programming language or platform that you're using. For more information, see AWS SDKs.

  • For descriptions of Network Firewall features, including and step-by-step instructions on how to use them through the Network Firewall console, see the Network Firewall Developer Guide.

Network Firewall is a stateful, managed, network firewall and intrusion detection and prevention service for Amazon Virtual Private Cloud (Amazon VPC). With Network Firewall, you can filter traffic at the perimeter of your VPC. This includes filtering traffic going to and coming from an internet gateway, NAT gateway, or over VPN or AWS Direct Connect. Network Firewall uses rules that are compatible with Suricata, a free, open source intrusion detection system (IDS) engine. AWS Network Firewall supports Suricata version 5.0.2. For information about Suricata, see the Suricata website.

You can use Network Firewall to monitor and protect your VPC traffic in a number of ways. The following are just a few examples:

  • Allow domains or IP addresses for known AWS service endpoints, such as Amazon S3, and block all other forms of traffic.

  • Use custom lists of known bad domains to limit the types of domain names that your applications can access.

  • Perform deep packet inspection on traffic entering or leaving your VPC.

  • Use stateful protocol detection to filter protocols like HTTPS, regardless of the port used.

To enable Network Firewall for your VPCs, you perform steps in both Amazon VPC and in Network Firewall. For information about using Amazon VPC, see Amazon VPC User Guide.

To start using Network Firewall, do the following:

  1. (Optional) If you don't already have a VPC that you want to protect, create it in Amazon VPC.

  2. In Amazon VPC, in each Availability Zone where you want to have a firewall endpoint, create a subnet for the sole use of Network Firewall.

  3. In Network Firewall, create stateless and stateful rule groups, to define the components of the network traffic filtering behavior that you want your firewall to have.

  4. In Network Firewall, create a firewall policy that uses your rule groups and specifies additional default traffic filtering behavior.

  5. In Network Firewall, create a firewall and specify your new firewall policy and VPC subnets. Network Firewall creates a firewall endpoint in each subnet that you specify, with the behavior that's defined in the firewall policy.

  6. In Amazon VPC, use ingress routing enhancements to route traffic through the new firewall endpoints.

" } diff --git a/botocore/data/pinpoint/2016-12-01/service-2.json b/botocore/data/pinpoint/2016-12-01/service-2.json index bbd1f4b5..381e1b33 100644 --- a/botocore/data/pinpoint/2016-12-01/service-2.json +++ b/botocore/data/pinpoint/2016-12-01/service-2.json @@ -5957,6 +5957,10 @@ "Wait": { "shape": "WaitActivity", "documentation": "

The settings for a wait activity. This type of activity waits for a certain amount of time or until a specific date and time before moving participants to the next activity in a journey.

" + }, + "ContactCenter": { + "shape": "ContactCenterActivity", + "documentation": "

The settings for a connect activity. This type of activity initiates a contact center call to participants.

" } }, "documentation": "

Specifies the configuration and other settings for an activity in a journey.

" @@ -6892,6 +6896,15 @@ }, "documentation": "

Specifies the settings for a yes/no split activity in a journey. This type of activity sends participants down one of two paths in a journey, based on conditions that you specify.

To create yes/no split activities that send participants down different paths based on push notification events (such as Open or Received events), your mobile app has to specify the User ID and Endpoint ID values. For more information, see Integrating Amazon Pinpoint with your application in the Amazon Pinpoint Developer Guide.

" }, + "ContactCenterActivity": { + "type": "structure", + "members": { + "NextActivity": { + "shape": "__string", + "documentation": "

The unique identifier for the next activity to perform after the this activity.

" + } + } + }, "ConflictException": { "type": "structure", "members": { @@ -11683,6 +11696,20 @@ }, "documentation": "

Specifies the message configuration for a push notification that's sent to participants in a journey.

" }, + "JourneyChannelSettings": { + "type": "structure", + "members": { + "ConnectCampaignArn": { + "shape": "__string", + "documentation": "

Amazon Resource Name (ARN) of the Connect Campaign.

" + }, + "ConnectCampaignExecutionRoleArn": { + "shape": "__string", + "documentation": "

IAM role ARN to be assumed when invoking Connect campaign execution APIs for dialing.

" + } + }, + "documentation": "

The channel-specific configurations for the journey.

" + }, "JourneyResponse": { "type": "structure", "members": { @@ -11748,12 +11775,16 @@ "documentation": "

This object is not used or supported.

" }, "WaitForQuietTime": { - "shape": "__boolean", - "documentation": "

Specifies whether endpoints in quiet hours should enter a wait till the end of their quiet hours.

" + "shape": "__boolean", + "documentation": "

Specifies whether endpoints in quiet hours should enter a wait till the end of their quiet hours.

" }, "RefreshOnSegmentUpdate": { - "shape": "__boolean", - "documentation": "

Specifies whether a journey should be refreshed on segment update.

" + "shape": "__boolean", + "documentation": "

Specifies whether a journey should be refreshed on segment update.

" + }, + "JourneyChannelSettings": { + "shape": "JourneyChannelSettings", + "documentation": "

The channel-specific configurations for the journey.

" } }, "documentation": "

Provides information about the status, configuration, and other settings for a journey.

", diff --git a/botocore/data/sesv2/2019-09-27/service-2.json b/botocore/data/sesv2/2019-09-27/service-2.json index 53919a41..01180f44 100644 --- a/botocore/data/sesv2/2019-09-27/service-2.json +++ b/botocore/data/sesv2/2019-09-27/service-2.json @@ -112,7 +112,7 @@ {"shape":"BadRequestException"}, {"shape":"ConcurrentModificationException"} ], - "documentation":"

Create a new pool of dedicated IP addresses. A pool can include one or more dedicated IP addresses that are associated with your AWS account. You can associate a pool with a configuration set. When you send an email that uses that configuration set, the message is sent from one of the addresses in the associated pool.

" + "documentation":"

Create a new pool of dedicated IP addresses. A pool can include one or more dedicated IP addresses that are associated with your Amazon Web Services account. You can associate a pool with a configuration set. When you send an email that uses that configuration set, the message is sent from one of the addresses in the associated pool.

" }, "CreateDeliverabilityTestReport":{ "name":"CreateDeliverabilityTestReport", @@ -151,7 +151,7 @@ {"shape":"ConcurrentModificationException"}, {"shape":"NotFoundException"} ], - "documentation":"

Starts the process of verifying an email identity. An identity is an email address or domain that you use when you send email. Before you can use an identity to send email, you first have to verify it. By verifying an identity, you demonstrate that you're the owner of the identity, and that you've given Amazon SES API v2 permission to send email from the identity.

When you verify an email address, Amazon SES sends an email to the address. Your email address is verified as soon as you follow the link in the verification email.

When you verify a domain without specifying the DkimSigningAttributes object, this operation provides a set of DKIM tokens. You can convert these tokens into CNAME records, which you then add to the DNS configuration for your domain. Your domain is verified when Amazon SES detects these records in the DNS configuration for your domain. This verification method is known as Easy DKIM.

Alternatively, you can perform the verification process by providing your own public-private key pair. This verification method is known as Bring Your Own DKIM (BYODKIM). To use BYODKIM, your call to the CreateEmailIdentity operation has to include the DkimSigningAttributes object. When you specify this object, you provide a selector (a component of the DNS record name that identifies the public key that you want to use for DKIM authentication) and a private key.

When you verify a domain, this operation provides a set of DKIM tokens, which you can convert into CNAME tokens. You add these CNAME tokens to the DNS configuration for your domain. Your domain is verified when Amazon SES detects these records in the DNS configuration for your domain. For some DNS providers, it can take 72 hours or more to complete the domain verification process.

Additionally, you can associate an existing configuration set with the email identity that you're verifying.

" + "documentation":"

Starts the process of verifying an email identity. An identity is an email address or domain that you use when you send email. Before you can use an identity to send email, you first have to verify it. By verifying an identity, you demonstrate that you're the owner of the identity, and that you've given Amazon SES API v2 permission to send email from the identity.

When you verify an email address, Amazon SES sends an email to the address. Your email address is verified as soon as you follow the link in the verification email.

When you verify a domain without specifying the DkimSigningAttributes object, this operation provides a set of DKIM tokens. You can convert these tokens into CNAME records, which you then add to the DNS configuration for your domain. Your domain is verified when Amazon SES detects these records in the DNS configuration for your domain. This verification method is known as Easy DKIM.

Alternatively, you can perform the verification process by providing your own public-private key pair. This verification method is known as Bring Your Own DKIM (BYODKIM). To use BYODKIM, your call to the CreateEmailIdentity operation has to include the DkimSigningAttributes object. When you specify this object, you provide a selector (a component of the DNS record name that identifies the public key to use for DKIM authentication) and a private key.

When you verify a domain, this operation provides a set of DKIM tokens, which you can convert into CNAME tokens. You add these CNAME tokens to the DNS configuration for your domain. Your domain is verified when Amazon SES detects these records in the DNS configuration for your domain. For some DNS providers, it can take 72 hours or more to complete the domain verification process.

Additionally, you can associate an existing configuration set with the email identity that you're verifying.

" }, "CreateEmailIdentityPolicy":{ "name":"CreateEmailIdentityPolicy", @@ -276,7 +276,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Deletes an existing custom verification email template.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" + "documentation":"

Deletes an existing custom verification email template.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" }, "DeleteDedicatedIpPool":{ "name":"DeleteDedicatedIpPool", @@ -367,7 +367,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Obtain information about the email-sending status and capabilities of your Amazon SES account in the current AWS Region.

" + "documentation":"

Obtain information about the email-sending status and capabilities of your Amazon SES account in the current Amazon Web Services Region.

" }, "GetBlacklistReports":{ "name":"GetBlacklistReports", @@ -487,7 +487,7 @@ {"shape":"NotFoundException"}, {"shape":"BadRequestException"} ], - "documentation":"

List the dedicated IP addresses that are associated with your AWS account.

" + "documentation":"

List the dedicated IP addresses that are associated with your Amazon Web Services account.

" }, "GetDeliverabilityDashboardOptions":{ "name":"GetDeliverabilityDashboardOptions", @@ -502,7 +502,7 @@ {"shape":"LimitExceededException"}, {"shape":"BadRequestException"} ], - "documentation":"

Retrieve information about the status of the Deliverability dashboard for your account. When the Deliverability dashboard is enabled, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.

When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon SES Pricing.

" + "documentation":"

Retrieve information about the status of the Deliverability dashboard for your account. When the Deliverability dashboard is enabled, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.

When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other Amazon Web Services services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon SES Pricing.

" }, "GetDeliverabilityTestReport":{ "name":"GetDeliverabilityTestReport", @@ -679,7 +679,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Lists the existing custom verification email templates for your account in the current AWS Region.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" + "documentation":"

Lists the existing custom verification email templates for your account in the current Amazon Web Services Region.

For more information about custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" }, "ListDedicatedIpPools":{ "name":"ListDedicatedIpPools", @@ -693,7 +693,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

List all of the dedicated IP pools that exist in your AWS account in the current Region.

" + "documentation":"

List all of the dedicated IP pools that exist in your Amazon Web Services account in the current Region.

" }, "ListDeliverabilityTestReports":{ "name":"ListDeliverabilityTestReports", @@ -737,7 +737,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Returns a list of all of the email identities that are associated with your AWS account. An identity can be either an email address or a domain. This operation returns identities that are verified as well as those that aren't. This operation returns identities that are associated with Amazon SES and Amazon Pinpoint.

" + "documentation":"

Returns a list of all of the email identities that are associated with your Amazon Web Services account. An identity can be either an email address or a domain. This operation returns identities that are verified as well as those that aren't. This operation returns identities that are associated with Amazon SES and Amazon Pinpoint.

" }, "ListEmailTemplates":{ "name":"ListEmailTemplates", @@ -751,7 +751,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Lists the email templates present in your Amazon SES account in the current AWS Region.

You can execute this operation no more than once per second.

" + "documentation":"

Lists the email templates present in your Amazon SES account in the current Amazon Web Services Region.

You can execute this operation no more than once per second.

" }, "ListImportJobs":{ "name":"ListImportJobs", @@ -882,7 +882,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Enable or disable collection of reputation metrics for emails that you send using a particular configuration set in a specific AWS Region.

" + "documentation":"

Enable or disable collection of reputation metrics for emails that you send using a particular configuration set in a specific Amazon Web Services Region.

" }, "PutConfigurationSetSendingOptions":{ "name":"PutConfigurationSetSendingOptions", @@ -897,7 +897,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Enable or disable email sending for messages that use a particular configuration set in a specific AWS Region.

" + "documentation":"

Enable or disable email sending for messages that use a particular configuration set in a specific Amazon Web Services Region.

" }, "PutConfigurationSetSuppressionOptions":{ "name":"PutConfigurationSetSuppressionOptions", @@ -942,7 +942,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Move a dedicated IP address to an existing dedicated IP pool.

The dedicated IP address that you specify must already exist, and must be associated with your AWS account.

The dedicated IP pool you specify must already exist. You can create a new pool by using the CreateDedicatedIpPool operation.

" + "documentation":"

Move a dedicated IP address to an existing dedicated IP pool.

The dedicated IP address that you specify must already exist, and must be associated with your Amazon Web Services account.

The dedicated IP pool you specify must already exist. You can create a new pool by using the CreateDedicatedIpPool operation.

" }, "PutDedicatedIpWarmupAttributes":{ "name":"PutDedicatedIpWarmupAttributes", @@ -974,7 +974,7 @@ {"shape":"LimitExceededException"}, {"shape":"BadRequestException"} ], - "documentation":"

Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.

When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon SES Pricing.

" + "documentation":"

Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email. You also gain the ability to perform predictive inbox placement tests.

When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other Amazon Web Services services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon SES Pricing.

" }, "PutEmailIdentityConfigurationSetAttributes":{ "name":"PutEmailIdentityConfigurationSetAttributes", @@ -1019,7 +1019,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"BadRequestException"} ], - "documentation":"

Used to configure or change the DKIM authentication settings for an email domain identity. You can use this operation to do any of the following:

  • Update the signing attributes for an identity that uses Bring Your Own DKIM (BYODKIM).

  • Change from using no DKIM authentication to using Easy DKIM.

  • Change from using no DKIM authentication to using BYODKIM.

  • Change from using Easy DKIM to using BYODKIM.

  • Change from using BYODKIM to using Easy DKIM.

" + "documentation":"

Used to configure or change the DKIM authentication settings for an email domain identity. You can use this operation to do any of the following:

  • Update the signing attributes for an identity that uses Bring Your Own DKIM (BYODKIM).

  • Update the key length that should be used for Easy DKIM.

  • Change from using no DKIM authentication to using Easy DKIM.

  • Change from using no DKIM authentication to using BYODKIM.

  • Change from using Easy DKIM to using BYODKIM.

  • Change from using BYODKIM to using Easy DKIM.

" }, "PutEmailIdentityFeedbackAttributes":{ "name":"PutEmailIdentityFeedbackAttributes", @@ -1102,7 +1102,7 @@ {"shape":"NotFoundException"}, {"shape":"BadRequestException"} ], - "documentation":"

Adds an email address to the list of identities for your Amazon SES account in the current AWS Region and attempts to verify it. As a result of executing this operation, a customized verification email is sent to the specified address.

To use this operation, you must first create a custom verification email template. For more information about creating and using custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" + "documentation":"

Adds an email address to the list of identities for your Amazon SES account in the current Amazon Web Services Region and attempts to verify it. As a result of executing this operation, a customized verification email is sent to the specified address.

To use this operation, you must first create a custom verification email template. For more information about creating and using custom verification email templates, see Using Custom Verification Email Templates in the Amazon SES Developer Guide.

You can execute this operation no more than once per second.

" }, "SendEmail":{ "name":"SendEmail", @@ -1122,7 +1122,7 @@ {"shape":"NotFoundException"}, {"shape":"BadRequestException"} ], - "documentation":"

Sends an email message. You can use the Amazon SES API v2 to send two types of messages:

  • Simple – A standard email message. When you create this type of message, you specify the sender, the recipient, and the message body, and Amazon SES assembles the message for you.

  • Raw – A raw, MIME-formatted email message. When you send this type of email, you have to specify all of the message headers, as well as the message body. You can use this message type to send messages that contain attachments. The message that you specify has to be a valid MIME message.

  • Templated – A message that contains personalization tags. When you send this type of email, Amazon SES API v2 automatically replaces the tags with values that you specify.

" + "documentation":"

Sends an email message. You can use the Amazon SES API v2 to send the following types of messages:

  • Simple – A standard email message. When you create this type of message, you specify the sender, the recipient, and the message body, and Amazon SES assembles the message for you.

  • Raw – A raw, MIME-formatted email message. When you send this type of email, you have to specify all of the message headers, as well as the message body. You can use this message type to send messages that contain attachments. The message that you specify has to be a valid MIME message.

  • Templated – A message that contains personalization tags. When you send this type of email, Amazon SES API v2 automatically replaces the tags with values that you specify.

" }, "TagResource":{ "name":"TagResource", @@ -1337,7 +1337,7 @@ }, "BehaviorOnMxFailure":{ "type":"string", - "documentation":"

The action that you want to take if the required MX record can't be found when you send an email. When you set this value to UseDefaultValue, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.

These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending, Failed, and TemporaryFailure states.

", + "documentation":"

The action to take if the required MX record can't be found when you send an email. When you set this value to UseDefaultValue, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.

These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending, Failed, and TemporaryFailure states.

", "enum":[ "USE_DEFAULT_VALUE", "REJECT_MESSAGE" @@ -1499,7 +1499,7 @@ }, "DimensionValueSource":{ "shape":"DimensionValueSource", - "documentation":"

The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. If you want to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail or SendRawEmail API, choose messageTag. If you want to use your own email headers, choose emailHeader. If you want to use link tags, choose linkTags.

" + "documentation":"

The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. To use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail or SendRawEmail API, choose messageTag. To use your own email headers, choose emailHeader. To use link tags, choose linkTags.

" }, "DefaultDimensionValue":{ "shape":"DefaultDimensionValue", @@ -1597,7 +1597,7 @@ }, "ContactListImportAction":{ "shape":"ContactListImportAction", - "documentation":"

>The type of action that you want to perform on the addresses. Acceptable values:

  • PUT: add the addresses to the contact list. If the record already exists, it will override it with the new value.

  • DELETE: remove the addresses from the contact list.

" + "documentation":"

>The type of action to perform on the addresses. The following are the possible values:

  • PUT: add the addresses to the contact list. If the record already exists, it will override it with the new value.

  • DELETE: remove the addresses from the contact list.

" } }, "documentation":"

An object that contains details about the action of a contact list.

" @@ -1635,7 +1635,7 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set that you want to add an event destination to.

", + "documentation":"

The name of the configuration set .

", "location":"uri", "locationName":"ConfigurationSetName" }, @@ -1662,7 +1662,7 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set.

" + "documentation":"

The name of the configuration set. The name can contain up to 64 alphanumeric characters, including letters, numbers, hyphens (-) and underscores (_) only.

" }, "TrackingOptions":{ "shape":"TrackingOptions", @@ -1682,7 +1682,7 @@ }, "Tags":{ "shape":"TagList", - "documentation":"

An array of objects that define the tags (keys and values) that you want to associate with the configuration set.

" + "documentation":"

An array of objects that define the tags (keys and values) to associate with the configuration set.

" }, "SuppressionOptions":{"shape":"SuppressionOptions"} }, @@ -1876,7 +1876,7 @@ "members":{ "EmailIdentity":{ "shape":"Identity", - "documentation":"

The email identity for which you want to create a policy.

", + "documentation":"

The email identity.

", "location":"uri", "locationName":"EmailIdentity" }, @@ -1905,15 +1905,15 @@ "members":{ "EmailIdentity":{ "shape":"Identity", - "documentation":"

The email address or domain that you want to verify.

" + "documentation":"

The email address or domain to verify.

" }, "Tags":{ "shape":"TagList", - "documentation":"

An array of objects that define the tags (keys and values) that you want to associate with the email identity.

" + "documentation":"

An array of objects that define the tags (keys and values) to associate with the email identity.

" }, "DkimSigningAttributes":{ "shape":"DkimSigningAttributes", - "documentation":"

If your request includes this object, Amazon SES configures the identity to use Bring Your Own DKIM (BYODKIM) for DKIM authentication purposes, as opposed to the default method, Easy DKIM.

You can only specify this object if the email identity is a domain, as opposed to an address.

" + "documentation":"

If your request includes this object, Amazon SES configures the identity to use Bring Your Own DKIM (BYODKIM) for DKIM authentication purposes, or, configures the key length to be used for Easy DKIM.

You can only specify this object if the email identity is a domain, as opposed to an address.

" }, "ConfigurationSetName":{ "shape":"ConfigurationSetName", @@ -1927,7 +1927,7 @@ "members":{ "IdentityType":{ "shape":"IdentityType", - "documentation":"

The email identity type.

" + "documentation":"

The email identity type. Note: the MANAGED_DOMAIN identity type is not supported.

" }, "VerifiedForSendingStatus":{ "shape":"Enabled", @@ -1949,7 +1949,7 @@ "members":{ "TemplateName":{ "shape":"EmailTemplateName", - "documentation":"

The name of the template you want to create.

" + "documentation":"

The name of the template.

" }, "TemplateContent":{ "shape":"EmailTemplateContent", @@ -1994,7 +1994,7 @@ }, "CustomRedirectDomain":{ "type":"string", - "documentation":"

The domain that you want to use for tracking open and click events.

" + "documentation":"

The domain to use for tracking open and click events.

" }, "CustomVerificationEmailTemplateMetadata":{ "type":"structure", @@ -2087,7 +2087,7 @@ "DedicatedIpList":{ "type":"list", "member":{"shape":"DedicatedIp"}, - "documentation":"

A list of dedicated IP addresses that are associated with your AWS account.

" + "documentation":"

A list of dedicated IP addresses that are associated with your Amazon Web Services account.

" }, "DefaultDimensionValue":{ "type":"string", @@ -2102,13 +2102,13 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set that contains the event destination that you want to delete.

", + "documentation":"

The name of the configuration set that contains the event destination to delete.

", "location":"uri", "locationName":"ConfigurationSetName" }, "EventDestinationName":{ "shape":"EventDestinationName", - "documentation":"

The name of the event destination that you want to delete.

", + "documentation":"

The name of the event destination to delete.

", "location":"uri", "locationName":"EventDestinationName" } @@ -2127,7 +2127,7 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set that you want to delete.

", + "documentation":"

The name of the configuration set.

", "location":"uri", "locationName":"ConfigurationSetName" } @@ -2230,7 +2230,7 @@ "members":{ "EmailIdentity":{ "shape":"Identity", - "documentation":"

The email identity for which you want to delete a policy.

", + "documentation":"

The email identity.

", "location":"uri", "locationName":"EmailIdentity" }, @@ -2255,7 +2255,7 @@ "members":{ "EmailIdentity":{ "shape":"Identity", - "documentation":"

The identity (that is, the email address or domain) that you want to delete.

", + "documentation":"

The identity (that is, the email address or domain) to delete.

", "location":"uri", "locationName":"EmailIdentity" } @@ -2370,7 +2370,7 @@ }, "SendingPoolName":{ "shape":"PoolName", - "documentation":"

The name of the dedicated IP pool that you want to associate with the configuration set.

" + "documentation":"

The name of the dedicated IP pool to associate with the configuration set.

" } }, "documentation":"

Used to associate a configuration set with a dedicated IP pool.

" @@ -2392,7 +2392,7 @@ "documentation":"

An array that contains the email addresses of the \"BCC\" (blind carbon copy) recipients for the email.

" } }, - "documentation":"

An object that describes the recipients for an email.

" + "documentation":"

An object that describes the recipients for an email.

Amazon SES does not support the SMTPUTF8 extension, as described in RFC6531. For this reason, the local part of a destination email address (the part of the email address that precedes the @ sign) may only contain 7-bit ASCII characters. If the domain part of an address (the part after the @ sign) contains non-ASCII characters, they must be encoded using Punycode, as described in RFC3492.

" }, "DimensionName":{ "type":"string", @@ -2400,7 +2400,7 @@ }, "DimensionValueSource":{ "type":"string", - "documentation":"

The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. If you want to use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail or SendRawEmail API, choose messageTag. If you want to use your own email headers, choose emailHeader. If you want to use link tags, choose linkTags.

", + "documentation":"

The location where the Amazon SES API v2 finds the value of a dimension to publish to Amazon CloudWatch. To use the message tags that you specify using an X-SES-MESSAGE-TAGS header or a parameter to the SendEmail or SendRawEmail API, choose messageTag. To use your own email headers, choose emailHeader. To use link tags, choose linkTags.

", "enum":[ "MESSAGE_TAG", "EMAIL_HEADER", @@ -2425,28 +2425,40 @@ }, "SigningAttributesOrigin":{ "shape":"DkimSigningAttributesOrigin", - "documentation":"

A string that indicates how DKIM was configured for the identity. There are two possible values:

  • AWS_SES – Indicates that DKIM was configured for the identity by using Easy DKIM.

  • EXTERNAL – Indicates that DKIM was configured for the identity by using Bring Your Own DKIM (BYODKIM).

" + "documentation":"

A string that indicates how DKIM was configured for the identity. These are the possible values:

  • AWS_SES – Indicates that DKIM was configured for the identity by using Easy DKIM.

  • EXTERNAL – Indicates that DKIM was configured for the identity by using Bring Your Own DKIM (BYODKIM).

" + }, + "NextSigningKeyLength":{ + "shape":"DkimSigningKeyLength", + "documentation":"

[Easy DKIM] The key length of the future DKIM key pair to be generated. This can be changed at most once per day.

" + }, + "CurrentSigningKeyLength":{ + "shape":"DkimSigningKeyLength", + "documentation":"

[Easy DKIM] The key length of the DKIM key pair in use.

" + }, + "LastKeyGenerationTimestamp":{ + "shape":"Timestamp", + "documentation":"

[Easy DKIM] The last time a key pair was generated for this identity.

" } }, "documentation":"

An object that contains information about the DKIM authentication status for an email identity.

Amazon SES determines the authentication status by searching for specific records in the DNS configuration for the domain. If you used Easy DKIM to set up DKIM authentication, Amazon SES tries to find three unique CNAME records in the DNS configuration for your domain. If you provided a public key to perform DKIM authentication, Amazon SES tries to find a TXT record that uses the selector that you specified. The value of the TXT record must be a public key that's paired with the private key that you specified in the process of creating the identity

" }, "DkimSigningAttributes":{ "type":"structure", - "required":[ - "DomainSigningSelector", - "DomainSigningPrivateKey" - ], "members":{ "DomainSigningSelector":{ "shape":"Selector", - "documentation":"

A string that's used to identify a public key in the DNS configuration for a domain.

" + "documentation":"

[Bring Your Own DKIM] A string that's used to identify a public key in the DNS configuration for a domain.

" }, "DomainSigningPrivateKey":{ "shape":"PrivateKey", - "documentation":"

A private key that's used to generate a DKIM signature.

The private key must use 1024-bit RSA encryption, and must be encoded using base64 encoding.

" + "documentation":"

[Bring Your Own DKIM] A private key that's used to generate a DKIM signature.

The private key must use 1024 or 2048-bit RSA encryption, and must be encoded using base64 encoding.

" + }, + "NextSigningKeyLength":{ + "shape":"DkimSigningKeyLength", + "documentation":"

[Easy DKIM] The key length of the future DKIM key pair to be generated. This can be changed at most once per day.

" } }, - "documentation":"

An object that contains information about the tokens used for setting up Bring Your Own DKIM (BYODKIM).

" + "documentation":"

An object that contains configuration for Bring Your Own DKIM (BYODKIM), or, for Easy DKIM

" }, "DkimSigningAttributesOrigin":{ "type":"string", @@ -2455,6 +2467,13 @@ "EXTERNAL" ] }, + "DkimSigningKeyLength":{ + "type":"string", + "enum":[ + "RSA_1024_BIT", + "RSA_2048_BIT" + ] + }, "DkimStatus":{ "type":"string", "documentation":"

The DKIM authentication status of the identity. The status can be one of the following:

  • PENDING – The verification process was initiated, but Amazon SES hasn't yet detected the DKIM records in the DNS configuration for the domain.

  • SUCCESS – The verification process completed successfully.

  • FAILED – The verification process failed. This typically occurs when Amazon SES fails to find the DKIM records in the DNS configuration of the domain.

  • TEMPORARY_FAILURE – A temporary issue is preventing Amazon SES from determining the DKIM authentication status of the domain.

  • NOT_STARTED – The DKIM verification process hasn't been initiated for the domain.

", @@ -2544,7 +2563,7 @@ "members":{ "Domain":{ "shape":"Domain", - "documentation":"

A verified domain that’s associated with your AWS account and currently has an active Deliverability dashboard subscription.

" + "documentation":"

A verified domain that’s associated with your Amazon Web Services account and currently has an active Deliverability dashboard subscription.

" }, "SubscriptionStartDate":{ "shape":"Timestamp", @@ -2819,26 +2838,26 @@ }, "ProductionAccessEnabled":{ "shape":"Enabled", - "documentation":"

Indicates whether or not your account has production access in the current AWS Region.

If the value is false, then your account is in the sandbox. When your account is in the sandbox, you can only send email to verified identities. Additionally, the maximum number of emails you can send in a 24-hour period (your sending quota) is 200, and the maximum number of emails you can send per second (your maximum sending rate) is 1.

If the value is true, then your account has production access. When your account has production access, you can send email to any address. The sending quota and maximum sending rate for your account vary based on your specific use case.

" + "documentation":"

Indicates whether or not your account has production access in the current Amazon Web Services Region.

If the value is false, then your account is in the sandbox. When your account is in the sandbox, you can only send email to verified identities. Additionally, the maximum number of emails you can send in a 24-hour period (your sending quota) is 200, and the maximum number of emails you can send per second (your maximum sending rate) is 1.

If the value is true, then your account has production access. When your account has production access, you can send email to any address. The sending quota and maximum sending rate for your account vary based on your specific use case.

" }, "SendQuota":{ "shape":"SendQuota", - "documentation":"

An object that contains information about the per-day and per-second sending limits for your Amazon SES account in the current AWS Region.

" + "documentation":"

An object that contains information about the per-day and per-second sending limits for your Amazon SES account in the current Amazon Web Services Region.

" }, "SendingEnabled":{ "shape":"Enabled", - "documentation":"

Indicates whether or not email sending is enabled for your Amazon SES account in the current AWS Region.

" + "documentation":"

Indicates whether or not email sending is enabled for your Amazon SES account in the current Amazon Web Services Region.

" }, "SuppressionAttributes":{ "shape":"SuppressionAttributes", - "documentation":"

An object that contains information about the email address suppression preferences for your account in the current AWS Region.

" + "documentation":"

An object that contains information about the email address suppression preferences for your account in the current Amazon Web Services Region.

" }, "Details":{ "shape":"AccountDetails", "documentation":"

An object that defines your account details.

" } }, - "documentation":"

A list of details about the email-sending capabilities of your Amazon SES account in the current AWS Region.

" + "documentation":"

A list of details about the email-sending capabilities of your Amazon SES account in the current Amazon Web Services Region.

" }, "GetBlacklistReportsRequest":{ "type":"structure", @@ -2893,7 +2912,7 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set that you want to obtain more information about.

", + "documentation":"

The name of the configuration set.

", "location":"uri", "locationName":"ConfigurationSetName" } @@ -3082,7 +3101,7 @@ "members":{ "Ip":{ "shape":"Ip", - "documentation":"

The IP address that you want to obtain more information about. The value you specify has to be a dedicated IP address that's assocaited with your AWS account.

", + "documentation":"

The IP address that you want to obtain more information about. The value you specify has to be a dedicated IP address that's assocaited with your Amazon Web Services account.

", "location":"uri", "locationName":"IP" } @@ -3128,20 +3147,20 @@ "members":{ "DedicatedIps":{ "shape":"DedicatedIpList", - "documentation":"

A list of dedicated IP addresses that are associated with your AWS account.

" + "documentation":"

A list of dedicated IP addresses that are associated with your Amazon Web Services account.

" }, "NextToken":{ "shape":"NextToken", "documentation":"

A token that indicates that there are additional dedicated IP addresses to list. To view additional addresses, issue another request to GetDedicatedIps, passing this token in the NextToken parameter.

" } }, - "documentation":"

Information about the dedicated IP addresses that are associated with your AWS account.

" + "documentation":"

Information about the dedicated IP addresses that are associated with your Amazon Web Services account.

" }, "GetDeliverabilityDashboardOptionsRequest":{ "type":"structure", "members":{ }, - "documentation":"

Retrieve information about the status of the Deliverability dashboard for your AWS account. When the Deliverability dashboard is enabled, you gain access to reputation, deliverability, and other metrics for your domains. You also gain the ability to perform predictive inbox placement tests.

When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.

" + "documentation":"

Retrieve information about the status of the Deliverability dashboard for your Amazon Web Services account. When the Deliverability dashboard is enabled, you gain access to reputation, deliverability, and other metrics for your domains. You also gain the ability to perform predictive inbox placement tests.

When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other Amazon Web Services services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.

" }, "GetDeliverabilityDashboardOptionsResponse":{ "type":"structure", @@ -3291,7 +3310,7 @@ "members":{ "EmailIdentity":{ "shape":"Identity", - "documentation":"

The email identity that you want to retrieve policies for.

", + "documentation":"

The email identity.

", "location":"uri", "locationName":"EmailIdentity" } @@ -3314,7 +3333,7 @@ "members":{ "EmailIdentity":{ "shape":"Identity", - "documentation":"

The email identity that you want to retrieve details for.

", + "documentation":"

The email identity.

", "location":"uri", "locationName":"EmailIdentity" } @@ -3326,7 +3345,7 @@ "members":{ "IdentityType":{ "shape":"IdentityType", - "documentation":"

The email identity type.

" + "documentation":"

The email identity type. Note: the MANAGED_DOMAIN identity type is not supported.

" }, "FeedbackForwardingStatus":{ "shape":"Enabled", @@ -3365,7 +3384,7 @@ "members":{ "TemplateName":{ "shape":"EmailTemplateName", - "documentation":"

The name of the template you want to retrieve.

", + "documentation":"

The name of the template.

", "location":"uri", "locationName":"TemplateName" } @@ -3381,7 +3400,7 @@ "members":{ "TemplateName":{ "shape":"EmailTemplateName", - "documentation":"

The name of the template you want to retrieve.

" + "documentation":"

The name of the template.

" }, "TemplateContent":{ "shape":"EmailTemplateContent", @@ -3478,7 +3497,7 @@ "members":{ "IdentityType":{ "shape":"IdentityType", - "documentation":"

The email identity type. The identity type can be one of the following:

  • EMAIL_ADDRESS – The identity is an email address.

  • DOMAIN – The identity is a domain.

  • MANAGED_DOMAIN – The identity is a domain that is managed by AWS.

" + "documentation":"

The email identity type. Note: the MANAGED_DOMAIN type is not supported for email identity types.

" }, "IdentityName":{ "shape":"Identity", @@ -3497,7 +3516,6 @@ }, "IdentityType":{ "type":"string", - "documentation":"

The email identity type. The identity type can be one of the following:

  • EMAIL_ADDRESS – The identity is an email address.

  • DOMAIN – The identity is a domain.

", "enum":[ "EMAIL_ADDRESS", "DOMAIN", @@ -3575,7 +3593,7 @@ "documentation":"

An array of strings, one for each major email provider that the inbox placement data applies to.

" } }, - "documentation":"

An object that contains information about the inbox placement data settings for a verified domain that’s associated with your AWS account. This data is available only if you enabled the Deliverability dashboard for the domain.

" + "documentation":"

An object that contains information about the inbox placement data settings for a verified domain that’s associated with your Amazon Web Services account. This data is available only if you enabled the Deliverability dashboard for the domain.

" }, "InvalidNextTokenException":{ "type":"structure", @@ -3680,21 +3698,21 @@ "locationName":"PageSize" } }, - "documentation":"

A request to obtain a list of configuration sets for your Amazon SES account in the current AWS Region.

" + "documentation":"

A request to obtain a list of configuration sets for your Amazon SES account in the current Amazon Web Services Region.

" }, "ListConfigurationSetsResponse":{ "type":"structure", "members":{ "ConfigurationSets":{ "shape":"ConfigurationSetNameList", - "documentation":"

An array that contains all of the configuration sets in your Amazon SES account in the current AWS Region.

" + "documentation":"

An array that contains all of the configuration sets in your Amazon SES account in the current Amazon Web Services Region.

" }, "NextToken":{ "shape":"NextToken", "documentation":"

A token that indicates that there are additional configuration sets to list. To view additional configuration sets, issue another request to ListConfigurationSets, and pass this token in the NextToken parameter.

" } }, - "documentation":"

A list of configuration sets in your Amazon SES account in the current AWS Region.

" + "documentation":"

A list of configuration sets in your Amazon SES account in the current Amazon Web Services Region.

" }, "ListContactListsRequest":{ "type":"structure", @@ -3836,7 +3854,7 @@ "members":{ "DedicatedIpPools":{ "shape":"ListOfDedicatedIpPools", - "documentation":"

A list of all of the dedicated IP pools that are associated with your AWS account in the current Region.

" + "documentation":"

A list of all of the dedicated IP pools that are associated with your Amazon Web Services account in the current Region.

" }, "NextToken":{ "shape":"NextToken", @@ -3950,14 +3968,14 @@ "locationName":"PageSize" } }, - "documentation":"

A request to list all of the email identities associated with your AWS account. This list includes identities that you've already verified, identities that are unverified, and identities that were verified in the past, but are no longer verified.

" + "documentation":"

A request to list all of the email identities associated with your Amazon Web Services account. This list includes identities that you've already verified, identities that are unverified, and identities that were verified in the past, but are no longer verified.

" }, "ListEmailIdentitiesResponse":{ "type":"structure", "members":{ "EmailIdentities":{ "shape":"IdentityInfoList", - "documentation":"

An array that includes all of the email identities associated with your AWS account.

" + "documentation":"

An array that includes all of the email identities associated with your Amazon Web Services account.

" }, "NextToken":{ "shape":"NextToken", @@ -3982,7 +4000,7 @@ "locationName":"PageSize" } }, - "documentation":"

Represents a request to list the email templates present in your Amazon SES account in the current AWS Region. For more information, see the Amazon SES Developer Guide.

" + "documentation":"

Represents a request to list the email templates present in your Amazon SES account in the current Amazon Web Services Region. For more information, see the Amazon SES Developer Guide.

" }, "ListEmailTemplatesResponse":{ "type":"structure", @@ -4060,7 +4078,7 @@ "ListOfDedicatedIpPools":{ "type":"list", "member":{"shape":"PoolName"}, - "documentation":"

A list of dedicated IP pools that are associated with your AWS account.

" + "documentation":"

A list of dedicated IP pools that are associated with your Amazon Web Services account.

" }, "ListSuppressedDestinationsRequest":{ "type":"structure", @@ -4152,14 +4170,14 @@ }, "BehaviorOnMxFailure":{ "shape":"BehaviorOnMxFailure", - "documentation":"

The action that you want to take if the required MX record can't be found when you send an email. When you set this value to UseDefaultValue, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.

These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending, Failed, and TemporaryFailure states.

" + "documentation":"

The action to take if the required MX record can't be found when you send an email. When you set this value to UseDefaultValue, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.

These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending, Failed, and TemporaryFailure states.

" } }, "documentation":"

A list of attributes that are associated with a MAIL FROM domain.

" }, "MailFromDomainName":{ "type":"string", - "documentation":"

The domain that you want to use as a MAIL FROM domain.

" + "documentation":"

The domain to use as a MAIL FROM domain.

" }, "MailFromDomainNotVerifiedException":{ "type":"structure", @@ -4289,7 +4307,7 @@ "members":{ "ApplicationArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon Pinpoint project that you want to send email events to.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon Pinpoint project to send email events to.

" } }, "documentation":"

An object that defines an Amazon Pinpoint project destination for email events. You can send email event data to a Amazon Pinpoint project to view metrics using the Transactional Messaging dashboards that are built in to Amazon Pinpoint. For more information, see Transactional Messaging Charts in the Amazon Pinpoint User Guide.

" @@ -4354,7 +4372,7 @@ "members":{ "AutoWarmupEnabled":{ "shape":"Enabled", - "documentation":"

Enables or disables the automatic warm-up feature for dedicated IP addresses that are associated with your Amazon SES account in the current AWS Region. Set to true to enable the automatic warm-up feature, or set to false to disable it.

" + "documentation":"

Enables or disables the automatic warm-up feature for dedicated IP addresses that are associated with your Amazon SES account in the current Amazon Web Services Region. Set to true to enable the automatic warm-up feature, or set to false to disable it.

" } }, "documentation":"

A request to enable or disable the automatic IP address warm-up feature.

" @@ -4395,7 +4413,7 @@ }, "ProductionAccessEnabled":{ "shape":"EnabledWrapper", - "documentation":"

Indicates whether or not your account should have production access in the current AWS Region.

If the value is false, then your account is in the sandbox. When your account is in the sandbox, you can only send email to verified identities. Additionally, the maximum number of emails you can send in a 24-hour period (your sending quota) is 200, and the maximum number of emails you can send per second (your maximum sending rate) is 1.

If the value is true, then your account has production access. When your account has production access, you can send email to any address. The sending quota and maximum sending rate for your account vary based on your specific use case.

" + "documentation":"

Indicates whether or not your account should have production access in the current Amazon Web Services Region.

If the value is false, then your account is in the sandbox. When your account is in the sandbox, you can only send email to verified identities. Additionally, the maximum number of emails you can send in a 24-hour period (your sending quota) is 200, and the maximum number of emails you can send per second (your maximum sending rate) is 1.

If the value is true, then your account has production access. When your account has production access, you can send email to any address. The sending quota and maximum sending rate for your account vary based on your specific use case.

" } }, "documentation":"

A request to submit new account details.

" @@ -4411,7 +4429,7 @@ "members":{ "SendingEnabled":{ "shape":"Enabled", - "documentation":"

Enables or disables your account's ability to send email. Set to true to enable email sending, or set to false to disable email sending.

If AWS paused your account's ability to send email, you can't use this operation to resume your account's ability to send email.

" + "documentation":"

Enables or disables your account's ability to send email. Set to true to enable email sending, or set to false to disable email sending.

If Amazon Web Services paused your account's ability to send email, you can't use this operation to resume your account's ability to send email.

" } }, "documentation":"

A request to change the ability of your account to send email.

" @@ -4444,7 +4462,7 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set that you want to associate with a dedicated IP pool.

", + "documentation":"

The name of the configuration set to associate with a dedicated IP pool.

", "location":"uri", "locationName":"ConfigurationSetName" }, @@ -4454,7 +4472,7 @@ }, "SendingPoolName":{ "shape":"SendingPoolName", - "documentation":"

The name of the dedicated IP pool that you want to associate with the configuration set.

" + "documentation":"

The name of the dedicated IP pool to associate with the configuration set.

" } }, "documentation":"

A request to associate a configuration set with a dedicated IP pool.

" @@ -4471,7 +4489,7 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set that you want to enable or disable reputation metric tracking for.

", + "documentation":"

The name of the configuration set.

", "location":"uri", "locationName":"ConfigurationSetName" }, @@ -4494,7 +4512,7 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set that you want to enable or disable email sending for.

", + "documentation":"

The name of the configuration set to enable or disable email sending for.

", "location":"uri", "locationName":"ConfigurationSetName" }, @@ -4517,7 +4535,7 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set that you want to change the suppression list preferences for.

", + "documentation":"

The name of the configuration set to change the suppression list preferences for.

", "location":"uri", "locationName":"ConfigurationSetName" }, @@ -4540,13 +4558,13 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set that you want to add a custom tracking domain to.

", + "documentation":"

The name of the configuration set.

", "location":"uri", "locationName":"ConfigurationSetName" }, "CustomRedirectDomain":{ "shape":"CustomRedirectDomain", - "documentation":"

The domain that you want to use to track open and click events.

" + "documentation":"

The domain to use to track open and click events.

" } }, "documentation":"

A request to add a custom domain for tracking open and click events to a configuration set.

" @@ -4566,7 +4584,7 @@ "members":{ "Ip":{ "shape":"Ip", - "documentation":"

The IP address that you want to move to the dedicated IP pool. The value you specify has to be a dedicated IP address that's associated with your AWS account.

", + "documentation":"

The IP address that you want to move to the dedicated IP pool. The value you specify has to be a dedicated IP address that's associated with your Amazon Web Services account.

", "location":"uri", "locationName":"IP" }, @@ -4622,7 +4640,7 @@ "documentation":"

An array of objects, one for each verified domain that you use to send email and enabled the Deliverability dashboard for.

" } }, - "documentation":"

Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email using Amazon SES API v2. You also gain the ability to perform predictive inbox placement tests.

When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other AWS services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.

" + "documentation":"

Enable or disable the Deliverability dashboard. When you enable the Deliverability dashboard, you gain access to reputation, deliverability, and other metrics for the domains that you use to send email using Amazon SES API v2. You also gain the ability to perform predictive inbox placement tests.

When you use the Deliverability dashboard, you pay a monthly subscription charge, in addition to any other fees that you accrue by using Amazon SES and other Amazon Web Services services. For more information about the features and cost of a Deliverability dashboard subscription, see Amazon Pinpoint Pricing.

" }, "PutDeliverabilityDashboardOptionResponse":{ "type":"structure", @@ -4636,13 +4654,13 @@ "members":{ "EmailIdentity":{ "shape":"Identity", - "documentation":"

The email address or domain that you want to associate with a configuration set.

", + "documentation":"

The email address or domain to associate with a configuration set.

", "location":"uri", "locationName":"EmailIdentity" }, "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The configuration set that you want to associate with an email identity.

" + "documentation":"

The configuration set to associate with an email identity.

" } }, "documentation":"

A request to associate a configuration set with an email identity.

" @@ -4659,7 +4677,7 @@ "members":{ "EmailIdentity":{ "shape":"Identity", - "documentation":"

The email identity that you want to change the DKIM settings for.

", + "documentation":"

The email identity.

", "location":"uri", "locationName":"EmailIdentity" }, @@ -4685,17 +4703,17 @@ "members":{ "EmailIdentity":{ "shape":"Identity", - "documentation":"

The email identity that you want to configure DKIM for.

", + "documentation":"

The email identity.

", "location":"uri", "locationName":"EmailIdentity" }, "SigningAttributesOrigin":{ "shape":"DkimSigningAttributesOrigin", - "documentation":"

The method that you want to use to configure DKIM for the identity. There are two possible values:

  • AWS_SES – Configure DKIM for the identity by using Easy DKIM.

  • EXTERNAL – Configure DKIM for the identity by using Bring Your Own DKIM (BYODKIM).

" + "documentation":"

The method to use to configure DKIM for the identity. There are the following possible values:

  • AWS_SES – Configure DKIM for the identity by using Easy DKIM.

  • EXTERNAL – Configure DKIM for the identity by using Bring Your Own DKIM (BYODKIM).

" }, "SigningAttributes":{ "shape":"DkimSigningAttributes", - "documentation":"

An object that contains information about the private key and selector that you want to use to configure DKIM for the identity. This object is only required if you want to configure Bring Your Own DKIM (BYODKIM) for the identity.

" + "documentation":"

An object that contains information about the private key and selector that you want to use to configure DKIM for the identity for Bring Your Own DKIM (BYODKIM) for the identity, or, configures the key length to be used for Easy DKIM.

" } }, "documentation":"

A request to change the DKIM attributes for an email identity.

" @@ -4720,7 +4738,7 @@ "members":{ "EmailIdentity":{ "shape":"Identity", - "documentation":"

The email identity that you want to configure bounce and complaint feedback forwarding for.

", + "documentation":"

The email identity.

", "location":"uri", "locationName":"EmailIdentity" }, @@ -4743,7 +4761,7 @@ "members":{ "EmailIdentity":{ "shape":"Identity", - "documentation":"

The verified email identity that you want to set up the custom MAIL FROM domain for.

", + "documentation":"

The verified email identity.

", "location":"uri", "locationName":"EmailIdentity" }, @@ -4753,7 +4771,7 @@ }, "BehaviorOnMxFailure":{ "shape":"BehaviorOnMxFailure", - "documentation":"

The action that you want to take if the required MX record isn't found when you send an email. When you set this value to UseDefaultValue, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.

These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending, Failed, and TemporaryFailure states.

" + "documentation":"

The action to take if the required MX record isn't found when you send an email. When you set this value to UseDefaultValue, the mail is sent using amazonses.com as the MAIL FROM domain. When you set this value to RejectMessage, the Amazon SES API v2 returns a MailFromDomainNotVerified error, and doesn't attempt to deliver the email.

These behaviors are taken when the custom MAIL FROM domain configuration is in the Pending, Failed, and TemporaryFailure states.

" } }, "documentation":"

A request to configure the custom MAIL FROM domain for a verified identity.

" @@ -4851,7 +4869,7 @@ "documentation":"

The date and time (in Unix time) when the reputation metrics were last given a fresh start. When your account is given a fresh start, your reputation metrics are calculated starting from the date of the fresh start.

" } }, - "documentation":"

Enable or disable collection of reputation metrics for emails that you send using this configuration set in the current AWS Region.

" + "documentation":"

Enable or disable collection of reputation metrics for emails that you send using this configuration set in the current Amazon Web Services Region.

" }, "ReviewDetails":{ "type":"structure", @@ -4896,7 +4914,7 @@ "members":{ "FromEmailAddress":{ "shape":"EmailAddress", - "documentation":"

The email address that you want to use as the \"From\" address for the email. The address that you specify has to be verified.

" + "documentation":"

The email address to use as the \"From\" address for the email. The address that you specify has to be verified.

" }, "FromEmailAddressIdentityArn":{ "shape":"AmazonResourceName", @@ -4928,7 +4946,7 @@ }, "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set that you want to use when sending the email.

" + "documentation":"

The name of the configuration set to use when sending the email.

" } }, "documentation":"

Represents a request to send email messages to multiple destinations using Amazon SES. For more information, see the Amazon SES Developer Guide.

" @@ -4937,7 +4955,10 @@ "type":"structure", "required":["BulkEmailEntryResults"], "members":{ - "BulkEmailEntryResults":{"shape":"BulkEmailEntryResultList"} + "BulkEmailEntryResults":{ + "shape":"BulkEmailEntryResultList", + "documentation":"

One object per intended recipient. Check each response object and retry any messages with a failure status.

" + } }, "documentation":"

The following data is returned in JSON format by the service.

" }, @@ -4979,7 +5000,7 @@ "members":{ "FromEmailAddress":{ "shape":"EmailAddress", - "documentation":"

The email address that you want to use as the \"From\" address for the email. The address that you specify has to be verified.

" + "documentation":"

The email address to use as the \"From\" address for the email. The address that you specify has to be verified.

" }, "FromEmailAddressIdentityArn":{ "shape":"AmazonResourceName", @@ -5011,7 +5032,7 @@ }, "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set that you want to use when sending the email.

" + "documentation":"

The name of the configuration set to use when sending the email.

" }, "ListManagementOptions":{ "shape":"ListManagementOptions", @@ -5035,18 +5056,18 @@ "members":{ "Max24HourSend":{ "shape":"Max24HourSend", - "documentation":"

The maximum number of emails that you can send in the current AWS Region over a 24-hour period. This value is also called your sending quota.

" + "documentation":"

The maximum number of emails that you can send in the current Amazon Web Services Region over a 24-hour period. This value is also called your sending quota.

" }, "MaxSendRate":{ "shape":"MaxSendRate", - "documentation":"

The maximum number of emails that you can send per second in the current AWS Region. This value is also called your maximum sending rate or your maximum TPS (transactions per second) rate.

" + "documentation":"

The maximum number of emails that you can send per second in the current Amazon Web Services Region. This value is also called your maximum sending rate or your maximum TPS (transactions per second) rate.

" }, "SentLast24Hours":{ "shape":"SentLast24Hours", - "documentation":"

The number of emails sent from your Amazon SES account in the current AWS Region over the past 24 hours.

" + "documentation":"

The number of emails sent from your Amazon SES account in the current Amazon Web Services Region over the past 24 hours.

" } }, - "documentation":"

An object that contains information about the per-day and per-second sending limits for your Amazon SES account in the current AWS Region.

" + "documentation":"

An object that contains information about the per-day and per-second sending limits for your Amazon SES account in the current Amazon Web Services Region.

" }, "SendingOptions":{ "type":"structure", @@ -5056,7 +5077,7 @@ "documentation":"

If true, email sending is enabled for the configuration set. If false, email sending is disabled for the configuration set.

" } }, - "documentation":"

Used to enable or disable email sending for messages that use this configuration set in the current AWS Region.

" + "documentation":"

Used to enable or disable email sending for messages that use this configuration set in the current Amazon Web Services Region.

" }, "SendingPausedException":{ "type":"structure", @@ -5068,7 +5089,7 @@ }, "SendingPoolName":{ "type":"string", - "documentation":"

The name of the dedicated IP pool that you want to associate with the configuration set.

" + "documentation":"

The name of the dedicated IP pool to associate with the configuration set.

" }, "SentLast24Hours":{"type":"double"}, "SnsDestination":{ @@ -5077,7 +5098,7 @@ "members":{ "TopicArn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the Amazon SNS topic that you want to publish email events to. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

" + "documentation":"

The Amazon Resource Name (ARN) of the Amazon SNS topic to publish email events to. For more information about Amazon SNS topics, see the Amazon SNS Developer Guide.

" } }, "documentation":"

An object that defines an Amazon SNS destination for email events. You can use Amazon SNS to send notification when certain email events occur.

" @@ -5170,7 +5191,7 @@ "documentation":"

A list that contains the reasons that email addresses will be automatically added to the suppression list for your account. This list can contain any or all of the following:

  • COMPLAINT – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a complaint.

  • BOUNCE – Amazon SES adds an email address to the suppression list for your account when a message sent to that address results in a hard bounce.

" } }, - "documentation":"

An object that contains information about the email address suppression preferences for your account in the current AWS Region.

" + "documentation":"

An object that contains information about the email address suppression preferences for your account in the current Amazon Web Services Region.

" }, "SuppressionListDestination":{ "type":"structure", @@ -5178,14 +5199,14 @@ "members":{ "SuppressionListImportAction":{ "shape":"SuppressionListImportAction", - "documentation":"

The type of action that you want to perform on the address. Acceptable values:

  • PUT: add the addresses to the suppression list. If the record already exists, it will override it with the new value.

  • DELETE: remove the addresses from the suppression list.

" + "documentation":"

The type of action to perform on the address. The following are possible values:

  • PUT: add the addresses to the suppression list. If the record already exists, it will override it with the new value.

  • DELETE: remove the addresses from the suppression list.

" } }, "documentation":"

An object that contains details about the action of suppression list.

" }, "SuppressionListImportAction":{ "type":"string", - "documentation":"

The type of action that you want to perform on the address. Acceptable values:

  • PUT: add the addresses to the suppression list.

  • DELETE: remove the address from the suppression list.

", + "documentation":"

The type of action to perform on the address. The following are possible values:

  • PUT: add the addresses to the suppression list.

  • DELETE: remove the address from the suppression list.

", "enum":[ "DELETE", "PUT" @@ -5229,7 +5250,7 @@ "documentation":"

The optional part of a key-value pair that defines a tag. The maximum length of a tag value is 256 characters. The minimum length is 0 characters. If you don't want a resource to have a specific tag value, don't specify a value for this parameter. If you don't specify a value, Amazon SES sets the value to an empty string.

" } }, - "documentation":"

An object that defines the tags that are associated with a resource. A tag is a label that you optionally define and associate with a resource. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.

Each tag consists of a required tag key and an associated tag value, both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:

  • Tag keys and values are case sensitive.

  • For each associated resource, each tag key must be unique and it can have only one value.

  • The aws: prefix is reserved for use by AWS; you can’t use it in any tag keys or values that you define. In addition, you can't edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.

  • You can associate tags with public or shared resources, but the tags are available only for your AWS account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified AWS Region for your AWS account.

" + "documentation":"

An object that defines the tags that are associated with a resource. A tag is a label that you optionally define and associate with a resource. Tags can help you categorize and manage resources in different ways, such as by purpose, owner, environment, or other criteria. A resource can have as many as 50 tags.

Each tag consists of a required tag key and an associated tag value, both of which you define. A tag key is a general label that acts as a category for a more specific tag value. A tag value acts as a descriptor within a tag key. A tag key can contain as many as 128 characters. A tag value can contain as many as 256 characters. The characters can be Unicode letters, digits, white space, or one of the following symbols: _ . : / = + -. The following additional restrictions apply to tags:

  • Tag keys and values are case sensitive.

  • For each associated resource, each tag key must be unique and it can have only one value.

  • The aws: prefix is reserved for use by Amazon Web Services; you can’t use it in any tag keys or values that you define. In addition, you can't edit or remove tag keys or values that use this prefix. Tags that use this prefix don’t count against the limit of 50 tags per resource.

  • You can associate tags with public or shared resources, but the tags are available only for your Amazon Web Services account, not any other accounts that share the resource. In addition, the tags are available only for resources that are located in the specified Amazon Web Services Region for your Amazon Web Services account.

" }, "TagKey":{"type":"string"}, "TagKeyList":{ @@ -5294,7 +5315,7 @@ "members":{ "TemplateName":{ "shape":"EmailTemplateName", - "documentation":"

The name of the template that you want to render.

", + "documentation":"

The name of the template.

", "location":"uri", "locationName":"TemplateName" }, @@ -5407,10 +5428,10 @@ "members":{ "CustomRedirectDomain":{ "shape":"CustomRedirectDomain", - "documentation":"

The domain that you want to use for tracking open and click events.

" + "documentation":"

The domain to use for tracking open and click events.

" } }, - "documentation":"

An object that defines the tracking options for a configuration set. When you use the Amazon SES API v2 to send an email, it contains an invisible image that's used to track when recipients open your email. If your email contains links, those links are changed slightly in order to track when recipients click them.

These images and links include references to a domain operated by AWS. You can optionally configure the Amazon SES to use a domain that you operate for these images and links.

" + "documentation":"

An object that defines the tracking options for a configuration set. When you use the Amazon SES API v2 to send an email, it contains an invisible image that's used to track when recipients open your email. If your email contains links, those links are changed slightly in order to track when recipients click them.

These images and links include references to a domain operated by Amazon Web Services. You can optionally configure the Amazon SES to use a domain that you operate for these images and links.

" }, "UnsubscribeAll":{"type":"boolean"}, "UntagResourceRequest":{ @@ -5449,13 +5470,13 @@ "members":{ "ConfigurationSetName":{ "shape":"ConfigurationSetName", - "documentation":"

The name of the configuration set that contains the event destination that you want to modify.

", + "documentation":"

The name of the configuration set that contains the event destination to modify.

", "location":"uri", "locationName":"ConfigurationSetName" }, "EventDestinationName":{ "shape":"EventDestinationName", - "documentation":"

The name of the event destination that you want to modify.

", + "documentation":"

The name of the event destination.

", "location":"uri", "locationName":"EventDestinationName" }, @@ -5591,7 +5612,7 @@ "members":{ "EmailIdentity":{ "shape":"Identity", - "documentation":"

The email identity for which you want to update policy.

", + "documentation":"

The email identity.

", "location":"uri", "locationName":"EmailIdentity" }, @@ -5623,7 +5644,7 @@ "members":{ "TemplateName":{ "shape":"EmailTemplateName", - "documentation":"

The name of the template you want to update.

", + "documentation":"

The name of the template.

", "location":"uri", "locationName":"TemplateName" }, @@ -5689,5 +5710,5 @@ "sensitive":true } }, - "documentation":"Amazon SES API v2

Welcome to the Amazon SES API v2 Reference. This guide provides information about the Amazon SES API v2, including supported operations, data types, parameters, and schemas.

Amazon SES is an AWS service that you can use to send email messages to your customers.

If you're new to Amazon SES API v2, you might find it helpful to also review the Amazon Simple Email Service Developer Guide. The Amazon SES Developer Guide provides information and code samples that demonstrate how to use Amazon SES API v2 features programmatically.

The Amazon SES API v2 is available in several AWS Regions and it provides an endpoint for each of these Regions. For a list of all the Regions and endpoints where the API is currently available, see AWS Service Endpoints in the Amazon Web Services General Reference. To learn more about AWS Regions, see Managing AWS Regions in the Amazon Web Services General Reference.

In each Region, AWS maintains multiple Availability Zones. These Availability Zones are physically isolated from each other, but are united by private, low-latency, high-throughput, and highly redundant network connections. These Availability Zones enable us to provide very high levels of availability and redundancy, while also minimizing latency. To learn more about the number of Availability Zones that are available in each Region, see AWS Global Infrastructure.

" + "documentation":"Amazon SES API v2

Amazon SES is an Amazon Web Services service that you can use to send email messages to your customers.

If you're new to Amazon SES API v2, you might find it helpful to review the Amazon Simple Email Service Developer Guide. The Amazon SES Developer Guide provides information and code samples that demonstrate how to use Amazon SES API v2 features programmatically.

" } diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index c352c1ec..be48c7bf 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -45,7 +45,7 @@ {"shape":"OpsItemInvalidParameterException"}, {"shape":"OpsItemRelatedItemAlreadyExistsException"} ], - "documentation":"

Associates a related resource to a Systems Manager OpsCenter OpsItem. For example, you can associate an Incident Manager incident or analysis with an OpsItem. Incident Manager is a capability of Amazon Web Services Systems Manager.

" + "documentation":"

Associates a related item to a Systems Manager OpsCenter OpsItem. For example, you can associate an Incident Manager incident or analysis with an OpsItem. Incident Manager and OpsCenter are capabilities of Amazon Web Services Systems Manager.

" }, "CancelCommand":{ "name":"CancelCommand", @@ -841,7 +841,7 @@ {"shape":"InvalidFilterValue"}, {"shape":"InvalidNextToken"} ], - "documentation":"

Get information about a parameter.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

" + "documentation":"

Get information about a parameter.

Request results are returned on a best-effort basis. If you specify MaxResults in the request, the response includes information up to the limit specified. The number of items returned, however, can be between zero and the value of MaxResults. If the service reaches an internal limit while processing the results, it stops the operation and returns the matching values up to that point and a NextToken. You can specify the NextToken in a subsequent call to get the next set of results.

If you change the KMS key alias for the KMS key used to encrypt a parameter, then you must also update the key alias the parameter uses to reference KMS. Otherwise, DescribeParameters retrieves whatever the original key alias was referencing.

" }, "DescribePatchBaselines":{ "name":"DescribePatchBaselines", @@ -925,7 +925,7 @@ {"shape":"OpsItemNotFoundException"}, {"shape":"OpsItemInvalidParameterException"} ], - "documentation":"

Deletes the association between an OpsItem and a related resource. For example, this API operation can delete an Incident Manager incident from an OpsItem. Incident Manager is a capability of Amazon Web Services Systems Manager.

" + "documentation":"

Deletes the association between an OpsItem and a related item. For example, this API operation can delete an Incident Manager incident from an OpsItem. Incident Manager is a capability of Amazon Web Services Systems Manager.

" }, "GetAutomationExecution":{ "name":"GetAutomationExecution", @@ -1211,7 +1211,7 @@ {"shape":"InvalidNextToken"}, {"shape":"InvalidKeyId"} ], - "documentation":"

Retrieves the history of all changes to a parameter.

" + "documentation":"

Retrieves the history of all changes to a parameter.

If you change the KMS key alias for the KMS key used to encrypt a parameter, then you must also update the key alias the parameter uses to reference KMS. Otherwise, GetParameterHistory retrieves whatever the original key alias was referencing.

" }, "GetParameters":{ "name":"GetParameters", @@ -1490,7 +1490,7 @@ {"shape":"InternalServerError"}, {"shape":"OpsItemInvalidParameterException"} ], - "documentation":"

Lists all related-item resources associated with an OpsItem.

" + "documentation":"

Lists all related-item resources associated with a Systems Manager OpsCenter OpsItem. OpsCenter is a capability of Amazon Web Services Systems Manager.

" }, "ListOpsMetadata":{ "name":"ListOpsMetadata", @@ -2301,7 +2301,7 @@ }, "ResourceType":{ "shape":"OpsItemRelatedItemAssociationResourceType", - "documentation":"

The type of resource that you want to associate with an OpsItem. OpsCenter supports the following types:

AWS::SSMIncidents::IncidentRecord: an Incident Manager incident. Incident Manager is a capability of Amazon Web Services Systems Manager.

AWS::SSM::Document: a Systems Manager (SSM) document.

" + "documentation":"

The type of resource that you want to associate with an OpsItem. OpsCenter supports the following types:

AWS::SSMIncidents::IncidentRecord: an Incident Manager incident.

AWS::SSM::Document: a Systems Manager (SSM) document.

" }, "ResourceUri":{ "shape":"OpsItemRelatedItemAssociationResourceUri", @@ -2350,7 +2350,7 @@ }, "Targets":{ "shape":"Targets", - "documentation":"

The instances targeted by the request to create an association.

" + "documentation":"

The instances targeted by the request to create an association. You can target all instances in an Amazon Web Services account by specifying the InstanceIds key with a value of *.

" }, "LastExecutionDate":{ "shape":"DateTime", @@ -3711,7 +3711,7 @@ }, "value":{ "shape":"CommandFilterValue", - "documentation":"

The filter value. Valid values for each filter key are as follows:

  • InvokedAfter: Specify a timestamp to limit your results. For example, specify 2021-07-07T00:00:00Z to see a list of command executions occurring July 7, 2021, and later.

  • InvokedBefore: Specify a timestamp to limit your results. For example, specify 2021-07-07T00:00:00Z to see a list of command executions from before July 7, 2021.

  • Status: Specify a valid command status to see a list of all command executions with that status. Status values you can specify include:

    • Pending

    • InProgress

    • Success

    • Cancelled

    • Failed

    • TimedOut

    • Cancelling

  • DocumentName: Specify name of the Amazon Web Services Systems Manager document (SSM document) for which you want to see command execution results. For example, specify AWS-RunPatchBaseline to see command executions that used this SSM document to perform security patching operations on instances.

  • ExecutionStage: Specify one of the following values:

    • Executing: Returns a list of command executions that are currently still running.

    • Complete: Returns a list of command executions that have already completed.

" + "documentation":"

The filter value. Valid values for each filter key are as follows:

  • InvokedAfter: Specify a timestamp to limit your results. For example, specify 2021-07-07T00:00:00Z to see a list of command executions occurring July 7, 2021, and later.

  • InvokedBefore: Specify a timestamp to limit your results. For example, specify 2021-07-07T00:00:00Z to see a list of command executions from before July 7, 2021.

  • Status: Specify a valid command status to see a list of all command executions with that status. The status choices depend on the API you call.

    The status values you can specify for ListCommands are:

    • Pending

    • InProgress

    • Success

    • Cancelled

    • Failed

    • TimedOut (this includes both Delivery and Execution time outs)

    • AccessDenied

    • DeliveryTimedOut

    • ExecutionTimedOut

    • Incomplete

    • NoInstancesInTag

    • LimitExceeded

    The status values you can specify for ListCommandInvocations are:

    • Pending

    • InProgress

    • Delayed

    • Success

    • Cancelled

    • Failed

    • TimedOut (this includes both Delivery and Execution time outs)

    • AccessDenied

    • DeliveryTimedOut

    • ExecutionTimedOut

    • Undeliverable

    • InvalidPlatform

    • Terminated

  • DocumentName: Specify name of the Amazon Web Services Systems Manager document (SSM document) for which you want to see command execution results. For example, specify AWS-RunPatchBaseline to see command executions that used this SSM document to perform security patching operations on instances.

  • ExecutionStage: Specify one of the following values:

    • Executing: Returns a list of command executions that are currently still running.

    • Complete: Returns a list of command executions that have already completed.

" } }, "documentation":"

Describes a command filter.

An instance ID can't be specified when a command status is Pending because the command hasn't run on the instance yet.

" @@ -3755,7 +3755,7 @@ }, "InstanceName":{ "shape":"InstanceTagName", - "documentation":"

The name of the invocation target. For EC2 instances this is the value for the aws:Name tag. For on-premises instances, this is the name of the instance.

" + "documentation":"

The fully qualified host name of the managed instance.

" }, "Comment":{ "shape":"Comment", @@ -4373,7 +4373,7 @@ }, "Targets":{ "shape":"Targets", - "documentation":"

The targets for the association. You can target instances by using tags, Amazon Web Services resource groups, all instances in an Amazon Web Services account, or individual instance IDs. For more information about choosing targets for an association, see Using targets and rate controls with State Manager associations in the Amazon Web Services Systems Manager User Guide.

" + "documentation":"

The targets for the association. You can target instances by using tags, Amazon Web Services resource groups, all instances in an Amazon Web Services account, or individual instance IDs. You can target all instances in an Amazon Web Services account by specifying the InstanceIds key with a value of *. For more information about choosing targets for an association, see Using targets and rate controls with State Manager associations in the Amazon Web Services Systems Manager User Guide.

" }, "ScheduleExpression":{ "shape":"ScheduleExpression", @@ -6293,11 +6293,11 @@ "members":{ "OpsItemId":{ "shape":"OpsItemId", - "documentation":"

The ID of the OpsItem for which you want to delete an association between the OpsItem and a related resource.

" + "documentation":"

The ID of the OpsItem for which you want to delete an association between the OpsItem and a related item.

" }, "AssociationId":{ "shape":"OpsItemRelatedItemAssociationId", - "documentation":"

The ID of the association for which you want to delete an association between the OpsItem and a related resource.

" + "documentation":"

The ID of the association for which you want to delete an association between the OpsItem and a related item.

" } } }, @@ -7771,6 +7771,11 @@ "Description":{ "shape":"MaintenanceWindowDescription", "documentation":"

The retrieved task description.

" + }, + "CutoffBehavior":{ + "shape":"MaintenanceWindowTaskCutoffBehavior", + "documentation":"

The action to take on tasks when the maintenance window cutoff time is reached. CONTINUE_TASK means that tasks continue to run. For Automation, Lambda, Step Functions tasks, CANCEL_TASK means that currently running task invocations continue, but no new task invocations are started. For Run Command tasks, CANCEL_TASK means the system attempts to stop the task by sending a CancelCommand operation.

", + "box":true } } }, @@ -7958,7 +7963,7 @@ }, "ParameterFilters":{ "shape":"ParameterStringFilterList", - "documentation":"

Filters to limit the request results.

For GetParametersByPath, the following filter Key names are supported: Type, KeyId, Label, and DataType.

The following Key values are not supported for GetParametersByPath: tag, Name, Path, and Tier.

" + "documentation":"

Filters to limit the request results.

The following Key values are supported for GetParametersByPath: Type, KeyId, and Label.

The following Key values aren't supported for GetParametersByPath: tag, DataType, Name, Path, and Tier.

" }, "WithDecryption":{ "shape":"Boolean", @@ -10725,6 +10730,11 @@ "Description":{ "shape":"MaintenanceWindowDescription", "documentation":"

A description of the task.

" + }, + "CutoffBehavior":{ + "shape":"MaintenanceWindowTaskCutoffBehavior", + "documentation":"

The specification for whether tasks should continue to run after the cutoff time specified in the maintenance windows is reached.

", + "box":true } }, "documentation":"

Information about a task defined for a maintenance window.

" @@ -10734,6 +10744,13 @@ "max":1600, "min":1 }, + "MaintenanceWindowTaskCutoffBehavior":{ + "type":"string", + "enum":[ + "CONTINUE_TASK", + "CANCEL_TASK" + ] + }, "MaintenanceWindowTaskId":{ "type":"string", "max":36, @@ -12261,7 +12278,7 @@ "members":{ "Key":{ "shape":"ParameterStringFilterKey", - "documentation":"

The name of the filter.

The ParameterStringFilter object is used by the DescribeParameters and GetParametersByPath API operations. However, not all of the pattern values listed for Key can be used with both operations.

For DescribeActions, all of the listed patterns are valid, with the exception of Label.

For GetParametersByPath, the following patterns listed for Key aren't valid: tag, Name, Path, and Tier.

For examples of Amazon Web Services CLI commands demonstrating valid parameter filter constructions, see Searching for Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.

" + "documentation":"

The name of the filter.

The ParameterStringFilter object is used by the DescribeParameters and GetParametersByPath API operations. However, not all of the pattern values listed for Key can be used with both operations.

For DescribeActions, all of the listed patterns are valid except Label.

For GetParametersByPath, the following patterns listed for Key aren't valid: tag, DataType, Name, Path, and Tier.

For examples of Amazon Web Services CLI commands demonstrating valid parameter filter constructions, see Searching for Systems Manager parameters in the Amazon Web Services Systems Manager User Guide.

" }, "Option":{ "shape":"ParameterStringQueryOption", @@ -13325,6 +13342,11 @@ "shape":"ClientToken", "documentation":"

User-provided idempotency token.

", "idempotencyToken":true + }, + "CutoffBehavior":{ + "shape":"MaintenanceWindowTaskCutoffBehavior", + "documentation":"

Indicates whether tasks should continue to run after the cutoff time specified in the maintenance windows is reached.

  • CONTINUE_TASK: When the cutoff time is reached, any tasks that are running continue. The default value.

  • CANCEL_TASK:

    • For Automation, Lambda, Step Functions tasks: When the cutoff time is reached, any task invocations that are already running continue, but no new task invocations are started.

    • For Run Command tasks: When the cutoff time is reached, the system sends a CancelCommand operation that attempts to cancel the command associated with the task. However, there is no guarantee that the command will be terminated and the underlying process stopped.

    The status for tasks that are not completed is TIMED_OUT.

", + "box":true } } }, @@ -13951,7 +13973,7 @@ "members":{ "OutputS3Region":{ "shape":"S3Region", - "documentation":"

(Deprecated) You can no longer specify this parameter. The system ignores it. Instead, Amazon Web Services Systems Manager automatically determines the Region of the S3 bucket.

" + "documentation":"

The Amazon Web Services Region of the S3 bucket.

" }, "OutputS3BucketName":{ "shape":"S3BucketName", @@ -14046,7 +14068,7 @@ }, "DocumentName":{ "shape":"DocumentARN", - "documentation":"

The name of the Amazon Web Services Systems Manager document (SSM document) to run. This can be a public document or a custom document. To run a shared document belonging to another account, specify the document ARN. For more information about how to use shared documents, see Using shared SSM documents in the Amazon Web Services Systems Manager User Guide.

" + "documentation":"

The name of the Amazon Web Services Systems Manager document (SSM document) to run. This can be a public document or a custom document. To run a shared document belonging to another account, specify the document Amazon Resource Name (ARN). For more information about how to use shared documents, see Using shared SSM documents in the Amazon Web Services Systems Manager User Guide.

If you specify a document name or ARN that hasn't been shared with your account, you receive an InvalidDocument error.

" }, "DocumentVersion":{ "shape":"DocumentVersion", @@ -14510,6 +14532,10 @@ "shape":"IdempotencyToken", "documentation":"

The user-provided idempotency token. The token must be unique, is case insensitive, enforces the UUID format, and can't be reused.

" }, + "AutoApprove":{ + "shape":"Boolean", + "documentation":"

Indicates whether the change request can be approved automatically without the need for manual approvals.

If AutoApprovable is enabled in a change template, then setting AutoApprove to true in StartChangeRequestExecution creates a change request that bypasses approver review.

Change Calendar restrictions are not bypassed in this scenario. If the state of an associated calendar is CLOSED, change freeze approvers must still grant permission for this change request to run. If they don't, the change won't be processed until the calendar state is again OPEN.

" + }, "Runbooks":{ "shape":"Runbooks", "documentation":"

Information about the Automation runbooks that are run during the runbook workflow.

The Automation runbooks specified for the runbook workflow can't run until all required approvals for the change request have been received.

" @@ -15562,6 +15588,11 @@ "shape":"Boolean", "documentation":"

If True, then all fields that are required by the RegisterTaskWithMaintenanceWindow operation are also required for this API request. Optional fields that aren't specified are set to null.

", "box":true + }, + "CutoffBehavior":{ + "shape":"MaintenanceWindowTaskCutoffBehavior", + "documentation":"

Indicates whether tasks should continue to run after the cutoff time specified in the maintenance windows is reached.

  • CONTINUE_TASK: When the cutoff time is reached, any tasks that are running continue. The default value.

  • CANCEL_TASK:

    • For Automation, Lambda, Step Functions tasks: When the cutoff time is reached, any task invocations that are already running continue, but no new task invocations are started.

    • For Run Command tasks: When the cutoff time is reached, the system sends a CancelCommand operation that attempts to cancel the command associated with the task. However, there is no guarantee that the command will be terminated and the underlying process stopped.

    The status for tasks that are not completed is TIMED_OUT.

", + "box":true } } }, @@ -15619,6 +15650,11 @@ "Description":{ "shape":"MaintenanceWindowDescription", "documentation":"

The updated task description.

" + }, + "CutoffBehavior":{ + "shape":"MaintenanceWindowTaskCutoffBehavior", + "documentation":"

The specification for whether tasks should continue to run after the cutoff time specified in the maintenance windows is reached.

", + "box":true } } }, @@ -15900,7 +15936,7 @@ }, "SettingValue":{ "shape":"ServiceSettingValue", - "documentation":"

The new value to specify for the service setting. For the /ssm/parameter-store/default-parameter-tier setting ID, the setting value can be one of the following.

  • Standard

  • Advanced

  • Intelligent-Tiering

For the /ssm/parameter-store/high-throughput-enabled, and /ssm/managed-instance/activation-tier setting IDs, the setting value can be true or false.

For the /ssm/automation/customer-script-log-destination setting ID, the setting value can be CloudWatch.

For the /ssm/automation/customer-script-log-group-name setting ID, the setting value can be the name of an Amazon CloudWatch Logs log group.

For the /ssm/documents/console/public-sharing-permission setting ID, the setting value can be Enable or Disable.

" + "documentation":"

The new value to specify for the service setting. The following list specifies the available values for each setting.

  • /ssm/parameter-store/default-parameter-tier: Standard, Advanced, Intelligent-Tiering

  • /ssm/parameter-store/high-throughput-enabled: true or false

  • /ssm/managed-instance/activation-tier: true or false

  • /ssm/automation/customer-script-log-destination: CloudWatch

  • /ssm/automation/customer-script-log-group-name: the name of an Amazon CloudWatch Logs log group

  • /ssm/documents/console/public-sharing-permission: Enable or Disable

  • /ssm/managed-instance/activation-tier: standard or advanced

" } }, "documentation":"

The request body of the UpdateServiceSetting API operation.

" diff --git a/botocore/data/ssm/2014-11-06/waiters-2.json b/botocore/data/ssm/2014-11-06/waiters-2.json index 30b333a8..43f5237f 100644 --- a/botocore/data/ssm/2014-11-06/waiters-2.json +++ b/botocore/data/ssm/2014-11-06/waiters-2.json @@ -53,6 +53,11 @@ "matcher": "path", "state": "failure", "argument": "Status" + }, + { + "state": "retry", + "matcher": "error", + "expected": "InvocationDoesNotExist" } ] } diff --git a/botocore/data/synthetics/2017-10-11/service-2.json b/botocore/data/synthetics/2017-10-11/service-2.json index 71603c73..fcf0b35e 100644 --- a/botocore/data/synthetics/2017-10-11/service-2.json +++ b/botocore/data/synthetics/2017-10-11/service-2.json @@ -209,6 +209,26 @@ } }, "shapes":{ + "ArtifactConfigInput":{ + "type":"structure", + "members":{ + "S3Encryption":{ + "shape":"S3EncryptionConfig", + "documentation":"

A structure that contains the configuration of the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3. Artifact encryption functionality is available only for canaries that use Synthetics runtime version syn-nodejs-puppeteer-3.3 or later. For more information, see Encrypting canary artifacts

" + } + }, + "documentation":"

A structure that contains the configuration for canary artifacts, including the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3.

" + }, + "ArtifactConfigOutput":{ + "type":"structure", + "members":{ + "S3Encryption":{ + "shape":"S3EncryptionConfig", + "documentation":"

A structure that contains the configuration of encryption settings for canary artifacts that are stored in Amazon S3.

" + } + }, + "documentation":"

A structure that contains the configuration for canary artifacts, including the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3.

" + }, "BaseScreenshot":{ "type":"structure", "required":["ScreenshotName"], @@ -308,6 +328,10 @@ "Tags":{ "shape":"TagMap", "documentation":"

The list of key-value pairs that are associated with the canary.

" + }, + "ArtifactConfig":{ + "shape":"ArtifactConfigOutput", + "documentation":"

A structure that contains the configuration for canary artifacts, including the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3.

" } }, "documentation":"

This structure contains all information about one canary in your account.

" @@ -646,6 +670,10 @@ "Tags":{ "shape":"TagMap", "documentation":"

A list of key-value pairs to associate with the canary. You can associate as many as 50 tags with a canary.

Tags can help you organize and categorize your resources. You can also use them to scope user permissions, by granting a user permission to access or change only the resources that have certain tag values.

" + }, + "ArtifactConfig":{ + "shape":"ArtifactConfigInput", + "documentation":"

A structure that contains the configuration for canary artifacts, including the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3.

" } } }, @@ -753,6 +781,13 @@ } } }, + "EncryptionMode":{ + "type":"string", + "enum":[ + "SSE_S3", + "SSE_KMS" + ] + }, "EnvironmentVariableName":{ "type":"string", "pattern":"[a-zA-Z]([a-zA-Z0-9_])+" @@ -833,6 +868,12 @@ "error":{"httpStatusCode":500}, "exception":true }, + "KmsKeyArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"arn:(aws[a-zA-Z-]*)?:kms:[a-z]{2}((-gov)|(-iso(b?)))?-[a-z]+-\\d{1}:\\d{12}:key/[\\w\\-\\/]+" + }, "ListTagsForResourceRequest":{ "type":"structure", "required":["ResourceArn"], @@ -926,6 +967,20 @@ "type":"list", "member":{"shape":"RuntimeVersion"} }, + "S3EncryptionConfig":{ + "type":"structure", + "members":{ + "EncryptionMode":{ + "shape":"EncryptionMode", + "documentation":"

The encryption method to use for artifacts created by this canary. Specify SSE_S3 to use server-side encryption (SSE) with an Amazon S3-managed key. Specify SSE-KMS to use server-side encryption with a customer-managed KMS key.

If you omit this parameter, an Amazon Web Services-managed KMS key is used.

" + }, + "KmsKeyArn":{ + "shape":"KmsKeyArn", + "documentation":"

The ARN of the customer-managed KMS key to use, if you specify SSE-KMS for EncryptionMode

" + } + }, + "documentation":"

A structure that contains the configuration of encryption-at-rest settings for canary artifacts that the canary uploads to Amazon S3.

For more information, see Encrypting canary artifacts

" + }, "SecurityGroupId":{"type":"string"}, "SecurityGroupIds":{ "type":"list", @@ -1107,6 +1162,14 @@ "VisualReference":{ "shape":"VisualReferenceInput", "documentation":"

Defines the screenshots to use as the baseline for comparisons during visual monitoring comparisons during future runs of this canary. If you omit this parameter, no changes are made to any baseline screenshots that the canary might be using already.

Visual monitoring is supported only on canaries running the syn-puppeteer-node-3.2 runtime or later. For more information, see Visual monitoring and Visual monitoring blueprint

" + }, + "ArtifactS3Location":{ + "shape":"String", + "documentation":"

The location in Amazon S3 where Synthetics stores artifacts from the test runs of this canary. Artifacts include the log file, screenshots, and HAR files. The name of the S3 bucket can't include a period (.).

" + }, + "ArtifactConfig":{ + "shape":"ArtifactConfigInput", + "documentation":"

A structure that contains the configuration for canary artifacts, including the encryption-at-rest settings for artifacts that the canary uploads to Amazon S3.

" } } }, diff --git a/botocore/data/transfer/2018-11-05/service-2.json b/botocore/data/transfer/2018-11-05/service-2.json index e4b95838..bd19807f 100644 --- a/botocore/data/transfer/2018-11-05/service-2.json +++ b/botocore/data/transfer/2018-11-05/service-2.json @@ -616,7 +616,7 @@ }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" + "documentation":"

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" }, "Policy":{ "shape":"Policy", @@ -735,7 +735,7 @@ }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]

In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" + "documentation":"

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock your user down to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" }, "Policy":{ "shape":"Policy", @@ -794,11 +794,11 @@ }, "Steps":{ "shape":"WorkflowSteps", - "documentation":"

Specifies the details for the steps that are in the specified workflow.

The TYPE specifies which of the following actions is being taken for this step.

  • Copy: copy the file to another location

  • Custom: custom step with a lambda target

  • Delete: delete the file

  • Tag: add a tag to the file

For file location, you specify either the S3 bucket and key, or the EFS filesystem ID and path.

" + "documentation":"

Specifies the details for the steps that are in the specified workflow.

The TYPE specifies which of the following actions is being taken for this step.

  • Copy: copy the file to another location

  • Custom: custom step with a lambda target

  • Delete: delete the file

  • Tag: add a tag to the file

Currently, copying and tagging are supported only on S3.

For file location, you specify either the S3 bucket and key, or the EFS filesystem ID and path.

" }, "OnExceptionSteps":{ "shape":"WorkflowSteps", - "documentation":"

Specifies the steps (actions) to take if any errors are encountered during execution of the workflow.

" + "documentation":"

Specifies the steps (actions) to take if errors are encountered during execution of the workflow.

For custom steps, the lambda function needs to send FAILURE to the call back API to kick off the exception steps. Additionally, if the lambda does not send SUCCESS before it times out, the exception steps are executed.

" }, "Tags":{ "shape":"Tags", @@ -909,7 +909,7 @@ "documentation":"

The name of the step, used as an identifier.

" } }, - "documentation":"

The name of the step, used to identify the step that is being deleted.

" + "documentation":"

The name of the step, used to identify the delete step.

" }, "DeleteUserRequest":{ "type":"structure", @@ -1337,7 +1337,7 @@ }, "OnExceptionSteps":{ "shape":"WorkflowSteps", - "documentation":"

Specifies the steps (actions) to take if any errors are encountered during execution of the workflow.

" + "documentation":"

Specifies the steps (actions) to take if errors are encountered during execution of the workflow.

" }, "WorkflowId":{ "shape":"WorkflowId", @@ -1375,7 +1375,7 @@ "documentation":"

The pathname for the folder being used by a workflow.

" } }, - "documentation":"

Specifies the details for the file location for the file being used in the workflow. Only applicable if you are using Amazon EFS for storage.

You need to provide the file system ID and the pathname. The pathname can represent either a path or a file. This is determined by whether or not you end the path value with the forward slash (/) character. If the final character is \"/\", then your file is copied to the folder, and its name does not change. If, rather, the final character is alphanumeric, your uploaded file is renamed to the path value. In this case, if a file with that name already exists, it is overwritten.

For example, if your path is shared-files/bob/, your uploaded files are copied to the shared-files/bob/, folder. If your path is shared-files/today, each uploaded file is copied to the shared-files folder and named today: each upload overwrites the previous version of the bob file.

" + "documentation":"

Reserved for future use.

" }, "EfsFileSystemId":{ "type":"string", @@ -1460,7 +1460,7 @@ }, "OnExceptionSteps":{ "shape":"ExecutionStepResults", - "documentation":"

Specifies the steps (actions) to take if any errors are encountered during execution of the workflow.

" + "documentation":"

Specifies the steps (actions) to take if errors are encountered during execution of the workflow.

" } }, "documentation":"

Specifies the steps in the workflow, as well as the steps to execute in case of any errors during workflow execution.

" @@ -1642,7 +1642,7 @@ }, "EfsFileLocation":{ "shape":"EfsFileLocation", - "documentation":"

Specifies the details for the Amazon EFS file being copied.

" + "documentation":"

Reserved for future use.

" } }, "documentation":"

Specifies the location for the file being copied. Only applicable for the Copy type of workflow steps.

" @@ -2249,21 +2249,21 @@ "documentation":"

The entity tag is a hash of the object. The ETag reflects changes only to the contents of an object, not its metadata.

" } }, - "documentation":"

Specifies the details for the file location for the file being used in the workflow. Only applicable if you are using S3 storage.

You need to provide the bucket and key. The key can represent either a path or a file. This is determined by whether or not you end the key value with the forward slash (/) character. If the final character is \"/\", then your file is copied to the folder, and its name does not change. If, rather, the final character is alphanumeric, your uploaded file is renamed to the path value. In this case, if a file with that name already exists, it is overwritten.

For example, if your path is shared-files/bob/, your uploaded files are copied to the shared-files/bob/, folder. If your path is shared-files/today, each uploaded file is copied to the shared-files folder and named today: each upload overwrites the previous version of the bob file.

" + "documentation":"

Specifies the details for the file location for the file being used in the workflow. Only applicable if you are using S3 storage.

" }, "S3InputFileLocation":{ "type":"structure", "members":{ "Bucket":{ "shape":"S3Bucket", - "documentation":"

Specifies the S3 bucket that contains the file being copied.

" + "documentation":"

Specifies the S3 bucket for the customer input file.

" }, "Key":{ "shape":"S3Key", "documentation":"

The name assigned to the file when it was created in S3. You use the object key to retrieve the object.

" } }, - "documentation":"

Specifies the details for the S3 file being copied.

" + "documentation":"

Specifies the customer input S3 file location. If it is used inside copyStepDetails.DestinationFileLocation, it should be the S3 copy destination.

You need to provide the bucket and key. The key can represent either a path or a file. This is determined by whether or not you end the key value with the forward slash (/) character. If the final character is \"/\", then your file is copied to the folder, and its name does not change. If, rather, the final character is alphanumeric, your uploaded file is renamed to the path value. In this case, if a file with that name already exists, it is overwritten.

For example, if your path is shared-files/bob/, your uploaded files are copied to the shared-files/bob/, folder. If your path is shared-files/today, each uploaded file is copied to the shared-files folder and named today: each upload overwrites the previous version of the bob file.

" }, "S3Key":{ "type":"string", @@ -2662,7 +2662,7 @@ }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" + "documentation":"

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to / and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" }, "Policy":{ "shape":"Policy", @@ -2777,7 +2777,7 @@ }, "HomeDirectoryMappings":{ "shape":"HomeDirectoryMappings", - "documentation":"

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"your-personal-report.pdf\", \"Target\": \"/bucket3/customized-reports/${transfer:UserName}.pdf\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" + "documentation":"

Logical directory mappings that specify what Amazon S3 or Amazon EFS paths and keys should be visible to your user and how you want to make them visible. You must specify the Entry and Target pair, where Entry shows how the path is made visible and Target is the actual Amazon S3 or Amazon EFS path. If you only specify a target, it is displayed as is. You also must ensure that your Amazon Web Services Identity and Access Management (IAM) role provides access to paths in Target. This value can only be set when HomeDirectoryType is set to LOGICAL.

The following is an Entry and Target pair example.

[ { \"Entry\": \"/directory1\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

In most cases, you can use this value instead of the session policy to lock down your user to the designated home directory (\"chroot\"). To do this, you can set Entry to '/' and set Target to the HomeDirectory parameter value.

The following is an Entry and Target pair example for chroot.

[ { \"Entry:\": \"/\", \"Target\": \"/bucket_name/home/mydirectory\" } ]

If the target of a logical directory entry does not exist in Amazon S3 or EFS, the entry is ignored. As a workaround, you can use the Amazon S3 API or EFS API to create 0 byte objects as place holders for your directory. If using the CLI, use the s3api or efsapi call instead of s3 or efs so you can use the put-object operation. For example, you use the following: aws s3api put-object --bucket bucketname --key path/to/folder/. Make sure that the end of the key name ends in a / for it to be considered a folder.

" }, "Policy":{ "shape":"Policy", @@ -2913,7 +2913,7 @@ }, "CopyStepDetails":{ "shape":"CopyStepDetails", - "documentation":"

Details for a step that performs a file copy.

Consists of the following values:

  • A description

  • An S3 or EFS location for the destination of the file copy.

  • A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE.

" + "documentation":"

Details for a step that performs a file copy.

Consists of the following values:

  • A description

  • An S3 location for the destination of the file copy.

  • A flag that indicates whether or not to overwrite an existing file of the same name. The default is FALSE.

" }, "CustomStepDetails":{ "shape":"CustomStepDetails", @@ -2921,7 +2921,7 @@ }, "DeleteStepDetails":{ "shape":"DeleteStepDetails", - "documentation":"

You need to specify the name of the file to be deleted.

" + "documentation":"

Details for a step that deletes the file.

" }, "TagStepDetails":{ "shape":"TagStepDetails", diff --git a/botocore/data/voice-id/2021-09-27/paginators-1.json b/botocore/data/voice-id/2021-09-27/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/voice-id/2021-09-27/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/voice-id/2021-09-27/service-2.json b/botocore/data/voice-id/2021-09-27/service-2.json new file mode 100644 index 00000000..5d3c677b --- /dev/null +++ b/botocore/data/voice-id/2021-09-27/service-2.json @@ -0,0 +1,1949 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2021-09-27", + "endpointPrefix":"voiceid", + "jsonVersion":"1.0", + "protocol":"json", + "serviceFullName":"Amazon Voice ID", + "serviceId":"Voice ID", + "signatureVersion":"v4", + "signingName":"voiceid", + "targetPrefix":"VoiceID", + "uid":"voice-id-2021-09-27" + }, + "operations":{ + "CreateDomain":{ + "name":"CreateDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateDomainRequest"}, + "output":{"shape":"CreateDomainResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates a domain that contains all Amazon Connect Voice ID data, such as speakers, fraudsters, customer audio, and voiceprints.

", + "idempotent":true + }, + "DeleteDomain":{ + "name":"DeleteDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDomainRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes the specified domain from the Amazon Connect Voice ID system.

" + }, + "DeleteFraudster":{ + "name":"DeleteFraudster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteFraudsterRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes the specified fraudster from the Amazon Connect Voice ID system.

" + }, + "DeleteSpeaker":{ + "name":"DeleteSpeaker", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteSpeakerRequest"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Deletes the specified speaker from the Amazon Connect Voice ID system.

" + }, + "DescribeDomain":{ + "name":"DescribeDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDomainRequest"}, + "output":{"shape":"DescribeDomainResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Describes the specified domain.

" + }, + "DescribeFraudster":{ + "name":"DescribeFraudster", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFraudsterRequest"}, + "output":{"shape":"DescribeFraudsterResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Describes the specified fraudster.

" + }, + "DescribeFraudsterRegistrationJob":{ + "name":"DescribeFraudsterRegistrationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeFraudsterRegistrationJobRequest"}, + "output":{"shape":"DescribeFraudsterRegistrationJobResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Describes the specified fraudster registration job.

" + }, + "DescribeSpeaker":{ + "name":"DescribeSpeaker", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpeakerRequest"}, + "output":{"shape":"DescribeSpeakerResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Describes the specified speaker.

" + }, + "DescribeSpeakerEnrollmentJob":{ + "name":"DescribeSpeakerEnrollmentJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeSpeakerEnrollmentJobRequest"}, + "output":{"shape":"DescribeSpeakerEnrollmentJobResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Describes the specified speaker enrollment job.

" + }, + "EvaluateSession":{ + "name":"EvaluateSession", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"EvaluateSessionRequest"}, + "output":{"shape":"EvaluateSessionResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Evaluates a specified session based on audio data accumulated during a streaming Amazon Connect Voice ID call.

" + }, + "ListDomains":{ + "name":"ListDomains", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDomainsRequest"}, + "output":{"shape":"ListDomainsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Lists all the domains in the Amazon Web Services account.

" + }, + "ListFraudsterRegistrationJobs":{ + "name":"ListFraudsterRegistrationJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListFraudsterRegistrationJobsRequest"}, + "output":{"shape":"ListFraudsterRegistrationJobsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Lists all the fraudster registration jobs in the domain with the given JobStatus. If JobStatus is not provided, this lists all fraudster registration jobs in the given domain.

" + }, + "ListSpeakerEnrollmentJobs":{ + "name":"ListSpeakerEnrollmentJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSpeakerEnrollmentJobsRequest"}, + "output":{"shape":"ListSpeakerEnrollmentJobsResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Lists all the speaker enrollment jobs in the domain with the specified JobStatus. If JobStatus is not provided, this lists all jobs with all possible speaker enrollment job statuses.

" + }, + "ListSpeakers":{ + "name":"ListSpeakers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListSpeakersRequest"}, + "output":{"shape":"ListSpeakersResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Lists all speakers in a specified domain.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Lists all tags associated with a specified Voice ID resource.

" + }, + "OptOutSpeaker":{ + "name":"OptOutSpeaker", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"OptOutSpeakerRequest"}, + "output":{"shape":"OptOutSpeakerResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Opts out a speaker from Voice ID system. A speaker can be opted out regardless of whether or not they already exist in the system. If they don't yet exist, a new speaker is created in an opted out state. If they already exist, their existing status is overridden and they are opted out. Enrollment and evaluation authentication requests are rejected for opted out speakers, and opted out speakers have no voice embeddings stored in the system.

" + }, + "StartFraudsterRegistrationJob":{ + "name":"StartFraudsterRegistrationJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartFraudsterRegistrationJobRequest"}, + "output":{"shape":"StartFraudsterRegistrationJobResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Starts a new batch fraudster registration job using provided details.

", + "idempotent":true + }, + "StartSpeakerEnrollmentJob":{ + "name":"StartSpeakerEnrollmentJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartSpeakerEnrollmentJobRequest"}, + "output":{"shape":"StartSpeakerEnrollmentJobResponse"}, + "errors":[ + {"shape":"ServiceQuotaExceededException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Starts a new batch speaker enrollment job using specified details.

", + "idempotent":true + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Tags an Amazon Connect Voice ID resource with the provided list of tags.

" + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Removes specified tags from a specified Amazon Connect Voice ID resource.

" + }, + "UpdateDomain":{ + "name":"UpdateDomain", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateDomainRequest"}, + "output":{"shape":"UpdateDomainResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"}, + {"shape":"ValidationException"}, + {"shape":"ConflictException"}, + {"shape":"InternalServerException"}, + {"shape":"ThrottlingException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Updates the specified domain. This API has clobber behavior, and clears and replaces all attributes. If an optional field, such as 'Description' is not provided, it is removed from the domain.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

You do not have sufficient permissions to perform this action. Check the error message and try again.

", + "exception":true + }, + "AmazonResourceName":{ + "type":"string", + "max":1011, + "min":1, + "pattern":"^arn:aws(-[^:]+)?:voiceid.+:[0-9]{12}:domain/[a-zA-Z0-9]{22}$" + }, + "Arn":{ + "type":"string", + "pattern":"^arn:aws(-[^:]+)?:voiceid.+:[0-9]{12}:domain/[a-zA-Z0-9]{22}$" + }, + "AuthenticationConfiguration":{ + "type":"structure", + "required":["AcceptanceThreshold"], + "members":{ + "AcceptanceThreshold":{ + "shape":"Score", + "documentation":"

The minimum threshold needed to successfully authenticate a speaker.

" + } + }, + "documentation":"

The configuration used to authenticate a speaker during a session.

" + }, + "AuthenticationDecision":{ + "type":"string", + "enum":[ + "ACCEPT", + "REJECT", + "NOT_ENOUGH_SPEECH", + "SPEAKER_NOT_ENROLLED", + "SPEAKER_OPTED_OUT", + "SPEAKER_ID_NOT_PROVIDED" + ] + }, + "AuthenticationResult":{ + "type":"structure", + "members":{ + "AudioAggregationEndedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp indicating when audio aggregation ended for this authentication result.

" + }, + "AudioAggregationStartedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp indicating when audio aggregation started for this authentication result.

" + }, + "AuthenticationResultId":{ + "shape":"UniqueIdLarge", + "documentation":"

The unique identifier for this authentication result. Because there can be multiple authentications for a given session, this field helps to identify if the returned result is from a previous streaming activity or a new result. Note that in absence of any new streaming activity, AcceptanceThreshold changes, or SpeakerId changes, Voice ID always returns cached Authentication Result for this API.

" + }, + "Configuration":{ + "shape":"AuthenticationConfiguration", + "documentation":"

The AuthenticationConfiguration used to generate this authentication result.

" + }, + "CustomerSpeakerId":{ + "shape":"CustomerSpeakerId", + "documentation":"

The client-provided identifier for the speaker whose authentication result is produced. Only present if a SpeakerId is provided for the session.

" + }, + "Decision":{ + "shape":"AuthenticationDecision", + "documentation":"

The authentication decision produced by Voice ID, processed against the current session state and streamed audio of the speaker.

" + }, + "GeneratedSpeakerId":{ + "shape":"GeneratedSpeakerId", + "documentation":"

The service-generated identifier for the speaker whose authentication result is produced.

" + }, + "Score":{ + "shape":"Score", + "documentation":"

The authentication score for the speaker whose authentication result is produced. This value is only present if the authentication decision is either ACCEPT or REJECT.

" + } + }, + "documentation":"

The authentication result produced by Voice ID, processed against the current session state and streamed audio of the speaker.

" + }, + "ClientTokenString":{ + "type":"string", + "max":64, + "min":1, + "pattern":"^[a-zA-Z0-9-_]+$" + }, + "ConflictException":{ + "type":"structure", + "members":{ + "ConflictType":{ + "shape":"ConflictType", + "documentation":"

The type of conflict which caused a ConflictException. Possible types and the corresponding error messages are as follows:

  • DOMAIN_NOT_ACTIVE: The domain is not active.

  • CANNOT_CHANGE_SPEAKER_AFTER_ENROLLMENT: You cannot change the speaker ID after an enrollment has been requested.

  • ENROLLMENT_ALREADY_EXISTS: There is already an enrollment for this session.

  • SPEAKER_NOT_SET: You must set the speaker ID before requesting an enrollment.

  • SPEAKER_OPTED_OUT: You cannot request an enrollment for an opted out speaker.

  • CONCURRENT_CHANGES: The request could not be processed as the resource was modified by another request during execution.

" + }, + "Message":{"shape":"String"} + }, + "documentation":"

The request failed due to a conflict. Check the ConflictType and error message for more details.

", + "exception":true + }, + "ConflictType":{ + "type":"string", + "enum":[ + "ANOTHER_ACTIVE_STREAM", + "DOMAIN_NOT_ACTIVE", + "CANNOT_CHANGE_SPEAKER_AFTER_ENROLLMENT", + "ENROLLMENT_ALREADY_EXISTS", + "SPEAKER_NOT_SET", + "SPEAKER_OPTED_OUT", + "CONCURRENT_CHANGES" + ] + }, + "CreateDomainRequest":{ + "type":"structure", + "required":[ + "Name", + "ServerSideEncryptionConfiguration" + ], + "members":{ + "ClientToken":{ + "shape":"ClientTokenString", + "documentation":"

The idempotency token for creating a new domain. If not provided, Amazon Web Services SDK populates this field.

", + "idempotencyToken":true + }, + "Description":{ + "shape":"Description", + "documentation":"

A brief description of this domain.

" + }, + "Name":{ + "shape":"DomainName", + "documentation":"

The name of the domain.

" + }, + "ServerSideEncryptionConfiguration":{ + "shape":"ServerSideEncryptionConfiguration", + "documentation":"

The configuration, containing the KMS Key Identifier, to be used by Voice ID for the server-side encryption of your data. Refer to Amazon Connect VoiceID encryption at rest for more details on how the KMS Key is used.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

A list of tags you want added to the domain.

" + } + } + }, + "CreateDomainResponse":{ + "type":"structure", + "members":{ + "Domain":{ + "shape":"Domain", + "documentation":"

Information about the newly created domain.

" + } + } + }, + "CustomerSpeakerId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9_-]*$", + "sensitive":true + }, + "DeleteDomainRequest":{ + "type":"structure", + "required":["DomainId"], + "members":{ + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain you want to delete.

" + } + } + }, + "DeleteFraudsterRequest":{ + "type":"structure", + "required":[ + "DomainId", + "FraudsterId" + ], + "members":{ + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain containing the fraudster.

" + }, + "FraudsterId":{ + "shape":"FraudsterId", + "documentation":"

The identifier of the fraudster you want to delete.

" + } + } + }, + "DeleteSpeakerRequest":{ + "type":"structure", + "required":[ + "DomainId", + "SpeakerId" + ], + "members":{ + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain containing the speaker.

" + }, + "SpeakerId":{ + "shape":"SpeakerId", + "documentation":"

The identifier of the speaker you want to delete.

" + } + } + }, + "DescribeDomainRequest":{ + "type":"structure", + "required":["DomainId"], + "members":{ + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain you are describing.

" + } + } + }, + "DescribeDomainResponse":{ + "type":"structure", + "members":{ + "Domain":{ + "shape":"Domain", + "documentation":"

Information about the specified domain.

" + } + } + }, + "DescribeFraudsterRegistrationJobRequest":{ + "type":"structure", + "required":[ + "DomainId", + "JobId" + ], + "members":{ + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier for the domain containing the fraudster registration job.

" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier for the fraudster registration job you are describing.

" + } + } + }, + "DescribeFraudsterRegistrationJobResponse":{ + "type":"structure", + "members":{ + "Job":{ + "shape":"FraudsterRegistrationJob", + "documentation":"

Contains details about the specified fraudster registration job.

" + } + } + }, + "DescribeFraudsterRequest":{ + "type":"structure", + "required":[ + "DomainId", + "FraudsterId" + ], + "members":{ + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain containing the fraudster.

" + }, + "FraudsterId":{ + "shape":"FraudsterId", + "documentation":"

The identifier of the fraudster you are describing.

" + } + } + }, + "DescribeFraudsterResponse":{ + "type":"structure", + "members":{ + "Fraudster":{ + "shape":"Fraudster", + "documentation":"

Information about the specified fraudster.

" + } + } + }, + "DescribeSpeakerEnrollmentJobRequest":{ + "type":"structure", + "required":[ + "DomainId", + "JobId" + ], + "members":{ + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain containing the speaker enrollment job.

" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

The identifier of the speaker enrollment job you are describing.

" + } + } + }, + "DescribeSpeakerEnrollmentJobResponse":{ + "type":"structure", + "members":{ + "Job":{ + "shape":"SpeakerEnrollmentJob", + "documentation":"

Contains details about the specified speaker enrollment job.

" + } + } + }, + "DescribeSpeakerRequest":{ + "type":"structure", + "required":[ + "DomainId", + "SpeakerId" + ], + "members":{ + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain that contains the speaker.

" + }, + "SpeakerId":{ + "shape":"SpeakerId", + "documentation":"

The identifier of the speaker you are describing.

" + } + } + }, + "DescribeSpeakerResponse":{ + "type":"structure", + "members":{ + "Speaker":{ + "shape":"Speaker", + "documentation":"

Information about the specified speaker.

" + } + } + }, + "Description":{ + "type":"string", + "max":1024, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-%@]*)$", + "sensitive":true + }, + "Domain":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the domain.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp at which the domain is created.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

The client-provided description of the domain.

" + }, + "DomainId":{ + "shape":"DomainId", + "documentation":"

The service-generated identifier for the domain.

" + }, + "DomainStatus":{ + "shape":"DomainStatus", + "documentation":"

The current status of the domain.

" + }, + "Name":{ + "shape":"DomainName", + "documentation":"

The client-provided name for the domain.

" + }, + "ServerSideEncryptionConfiguration":{ + "shape":"ServerSideEncryptionConfiguration", + "documentation":"

The server-side encryption configuration containing the KMS Key Identifier you want Voice ID to use to encrypt your data.

" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp showing the domain's last update.

" + } + }, + "documentation":"

Contains all the information about a domain.

" + }, + "DomainId":{ + "type":"string", + "max":22, + "min":22, + "pattern":"^[a-zA-Z0-9]{22}$" + }, + "DomainName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9_-]*$", + "sensitive":true + }, + "DomainStatus":{ + "type":"string", + "enum":[ + "ACTIVE", + "PENDING", + "SUSPENDED" + ] + }, + "DomainSummaries":{ + "type":"list", + "member":{"shape":"DomainSummary"} + }, + "DomainSummary":{ + "type":"structure", + "members":{ + "Arn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) for the domain.

" + }, + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp showing when the domain is created.

" + }, + "Description":{ + "shape":"Description", + "documentation":"

The client-provided description of the domain.

" + }, + "DomainId":{ + "shape":"DomainId", + "documentation":"

The service-generated identifier for the domain.

" + }, + "DomainStatus":{ + "shape":"DomainStatus", + "documentation":"

The current status of the domain.

" + }, + "Name":{ + "shape":"DomainName", + "documentation":"

The client-provided name for the domain.

" + }, + "ServerSideEncryptionConfiguration":{ + "shape":"ServerSideEncryptionConfiguration", + "documentation":"

The server-side encryption configuration containing the KMS Key Identifier you want Voice ID to use to encrypt your data..

" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp showing the domain's last update.

" + } + }, + "documentation":"

Contains a summary of information about a domain.

" + }, + "DuplicateRegistrationAction":{ + "type":"string", + "enum":[ + "SKIP", + "REGISTER_AS_NEW" + ] + }, + "EnrollmentConfig":{ + "type":"structure", + "members":{ + "ExistingEnrollmentAction":{ + "shape":"ExistingEnrollmentAction", + "documentation":"

The action to take when the specified speaker is already enrolled in the specified domain. The default value is SKIP, which skips the enrollment for the existing speaker. Setting the value to OVERWRITE replaces the existing voice prints and enrollment audio stored for that speaker with new data generated from the latest audio.

" + }, + "FraudDetectionConfig":{ + "shape":"EnrollmentJobFraudDetectionConfig", + "documentation":"

The fraud detection configuration to use for the speaker enrollment job.

" + } + }, + "documentation":"

Contains configurations defining enrollment behavior for the batch job.

" + }, + "EnrollmentJobFraudDetectionConfig":{ + "type":"structure", + "members":{ + "FraudDetectionAction":{ + "shape":"FraudDetectionAction", + "documentation":"

The action to take when the given speaker is flagged by the fraud detection system. The default value is FAIL, which fails the speaker enrollment. Changing this value to IGNORE results in the speaker being enrolled even if they are flagged by the fraud detection system.

" + }, + "RiskThreshold":{ + "shape":"Score", + "documentation":"

Threshold value for determining whether the speaker is a high risk to be fraudulent. If the detected risk score calculated by Voice ID is greater than or equal to the threshold, the speaker is considered a fraudster.

" + } + }, + "documentation":"

The configuration defining the action to take when a speaker is flagged by the fraud detection system during a batch speaker enrollment job, and the risk threshold to use for identification.

" + }, + "EvaluateSessionRequest":{ + "type":"structure", + "required":[ + "DomainId", + "SessionNameOrId" + ], + "members":{ + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain where the session started.

" + }, + "SessionNameOrId":{ + "shape":"SessionNameOrId", + "documentation":"

The session identifier, or name of the session, that you want to evaluate. In Voice ID integration, this is the Contact-Id.

" + } + } + }, + "EvaluateSessionResponse":{ + "type":"structure", + "members":{ + "AuthenticationResult":{ + "shape":"AuthenticationResult", + "documentation":"

Details resulting from the authentication process, such as authentication decision and authentication score.

" + }, + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain containing the session.

" + }, + "FraudDetectionResult":{ + "shape":"FraudDetectionResult", + "documentation":"

Details resulting from the fraud detection process, such as fraud detection decision and risk score.

" + }, + "SessionId":{ + "shape":"SessionId", + "documentation":"

The service-generated identifier of the session.

" + }, + "SessionName":{ + "shape":"SessionName", + "documentation":"

The client-provided name of the session.

" + }, + "StreamingStatus":{ + "shape":"StreamingStatus", + "documentation":"

The current status of audio streaming for this session. This field is useful to infer next steps when the Authentication or Fraud Detection results are empty or the decision is NOT_ENOUGH_SPEECH. In this situation, if the StreamingStatus is ONGOING/PENDING_CONFIGURATION, it can mean that the client should call the API again later, once Voice ID has enough audio to produce a result. If the decision remains NOT_ENOUGH_SPEECH even after StreamingStatus is ENDED, it means that the previously streamed session did not have enough speech to perform evaluation, and a new streaming session is needed to try again.

" + } + } + }, + "ExistingEnrollmentAction":{ + "type":"string", + "enum":[ + "SKIP", + "OVERWRITE" + ] + }, + "FailureDetails":{ + "type":"structure", + "members":{ + "Message":{ + "shape":"String", + "documentation":"

A description of the error that caused the batch job failure.

" + }, + "StatusCode":{ + "shape":"Integer", + "documentation":"

An HTTP status code representing the nature of the error.

" + } + }, + "documentation":"

Contains error details for a failed batch job.

" + }, + "FraudDetectionAction":{ + "type":"string", + "enum":[ + "IGNORE", + "FAIL" + ] + }, + "FraudDetectionConfiguration":{ + "type":"structure", + "required":["RiskThreshold"], + "members":{ + "RiskThreshold":{ + "shape":"Score", + "documentation":"

Threshold value for determining whether the speaker is a fraudster. If the detected risk score calculated by Voice ID is higher than the threshold, the speaker is considered a fraudster.

" + } + }, + "documentation":"

The configuration used for performing fraud detection over a speaker during a session.

" + }, + "FraudDetectionDecision":{ + "type":"string", + "enum":[ + "HIGH_RISK", + "LOW_RISK", + "NOT_ENOUGH_SPEECH" + ] + }, + "FraudDetectionReason":{ + "type":"string", + "enum":["KNOWN_FRAUDSTER"] + }, + "FraudDetectionReasons":{ + "type":"list", + "member":{"shape":"FraudDetectionReason"}, + "max":3, + "min":0 + }, + "FraudDetectionResult":{ + "type":"structure", + "members":{ + "AudioAggregationEndedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp indicating when audio aggregation ended for this fraud detection result.

" + }, + "AudioAggregationStartedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp indicating when audio aggregation started for this fraud detection result.

" + }, + "Configuration":{ + "shape":"FraudDetectionConfiguration", + "documentation":"

The FraudDetectionConfiguration used to generate this fraud detection result.

" + }, + "Decision":{ + "shape":"FraudDetectionDecision", + "documentation":"

The fraud detection decision produced by Voice ID, processed against the current session state and streamed audio of the speaker.

" + }, + "FraudDetectionResultId":{ + "shape":"UniqueIdLarge", + "documentation":"

The unique identifier for this fraud detection result. Given there can be multiple fraud detections for a given session, this field helps in identifying if the returned result is from previous streaming activity or a new result. Note that in the absence of any new streaming activity or risk threshold changes, Voice ID always returns cached Fraud Detection result for this API.

" + }, + "Reasons":{ + "shape":"FraudDetectionReasons", + "documentation":"

The reason speaker was flagged by the fraud detection system. This is only be populated if fraud detection Decision is HIGH_RISK, and only has one possible value: KNOWN_FRAUDSTER.

" + }, + "RiskDetails":{ + "shape":"FraudRiskDetails", + "documentation":"

Details about each risk analyzed for this speaker.

" + } + }, + "documentation":"

The fraud detection result produced by Voice ID, processed against the current session state and streamed audio of the speaker.

" + }, + "FraudRiskDetails":{ + "type":"structure", + "required":["KnownFraudsterRisk"], + "members":{ + "KnownFraudsterRisk":{ + "shape":"KnownFraudsterRisk", + "documentation":"

The details resulting from 'Known Fraudster Risk' analysis of the speaker.

" + } + }, + "documentation":"

Details regarding various fraud risk analyses performed against the current session state and streamed audio of the speaker.

" + }, + "Fraudster":{ + "type":"structure", + "members":{ + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

The timestamp when Voice ID identified the fraudster.

" + }, + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier for the domain containing the fraudster.

" + }, + "GeneratedFraudsterId":{ + "shape":"GeneratedFraudsterId", + "documentation":"

The service-generated identifier for the fraudster.

" + } + }, + "documentation":"

Contains all the information about a fraudster.

" + }, + "FraudsterId":{ + "type":"string", + "max":25, + "min":25, + "pattern":"^id#[a-zA-Z0-9]{22}$", + "sensitive":true + }, + "FraudsterRegistrationJob":{ + "type":"structure", + "members":{ + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp showing the creation time of the fraudster registration job.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The IAM role Amazon Resource Name (ARN) that grants Voice ID permissions to access customer's buckets to read the input manifest file and write the job output file.

" + }, + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain containing the fraudster registration job.

" + }, + "EndedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp showing when the fraudster registration job ended.

" + }, + "FailureDetails":{ + "shape":"FailureDetails", + "documentation":"

Contains details that are populated when an entire batch job fails. In cases of individual registration job failures, the batch job as a whole doesn't fail; it is completed with a JobStatus of COMPLETED_WITH_ERRORS. You can use the job output file to identify the individual registration requests that failed.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

The input data config containing an S3 URI for the input manifest file that contains the list of fraudster registration job requests.

" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

The service-generated identifier for the fraudster registration job.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The client-provied name for the fraudster registration job.

" + }, + "JobProgress":{ + "shape":"JobProgress", + "documentation":"

Shows the completed percentage of registration requests listed in the input file.

" + }, + "JobStatus":{ + "shape":"FraudsterRegistrationJobStatus", + "documentation":"

The current status of the fraudster registration job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

The output data config containing the S3 location where you want Voice ID to write your job output file; you must also include a KMS Key ID in order to encrypt the file.

" + }, + "RegistrationConfig":{ + "shape":"RegistrationConfig", + "documentation":"

The registration config containing details such as the action to take when a duplicate fraudster is detected, and the similarity threshold to use for detecting a duplicate fraudster.

" + } + }, + "documentation":"

Contains all the information about a fraudster registration job.

" + }, + "FraudsterRegistrationJobStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "COMPLETED", + "COMPLETED_WITH_ERRORS", + "FAILED" + ] + }, + "FraudsterRegistrationJobSummaries":{ + "type":"list", + "member":{"shape":"FraudsterRegistrationJobSummary"} + }, + "FraudsterRegistrationJobSummary":{ + "type":"structure", + "members":{ + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp showing when the fraudster registration job is created.

" + }, + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain containing the fraudster registration job.

" + }, + "EndedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp showing when the fraudster registration job ended.

" + }, + "FailureDetails":{ + "shape":"FailureDetails", + "documentation":"

Contains details that are populated when an entire batch job fails. In cases of individual registration job failures, the batch job as a whole doesn't fail; it is completed with a JobStatus of COMPLETED_WITH_ERRORS. You can use the job output file to identify the individual registration requests that failed.

" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

The service-generated identifier for the fraudster registration job.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The client-provied name for the fraudster registration job.

" + }, + "JobProgress":{ + "shape":"JobProgress", + "documentation":"

Shows the completed percentage of registration requests listed in the input file.

" + }, + "JobStatus":{ + "shape":"FraudsterRegistrationJobStatus", + "documentation":"

The current status of the fraudster registration job.

" + } + }, + "documentation":"

Contains a summary of information about a fraudster registration job.

" + }, + "GeneratedFraudsterId":{ + "type":"string", + "max":25, + "min":25, + "pattern":"^id#[a-zA-Z0-9]{22}$" + }, + "GeneratedSpeakerId":{ + "type":"string", + "max":25, + "min":25, + "pattern":"^id#[a-zA-Z0-9]{22}$" + }, + "IamRoleArn":{ + "type":"string", + "max":2048, + "min":20, + "pattern":"^arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+$" + }, + "InputDataConfig":{ + "type":"structure", + "required":["S3Uri"], + "members":{ + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

The S3 location for the input manifest file that contains the list of individual enrollment or registration job requests.

" + } + }, + "documentation":"

The configuration containing input file information for a batch job.

" + }, + "Integer":{ + "type":"integer", + "box":true + }, + "InternalServerException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The request failed due to an unknown error on the server side.

", + "exception":true, + "fault":true + }, + "JobId":{ + "type":"string", + "max":22, + "min":22, + "pattern":"^[a-zA-Z0-9]{22}$" + }, + "JobName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9_-]*$", + "sensitive":true + }, + "JobProgress":{ + "type":"structure", + "members":{ + "PercentComplete":{ + "shape":"Score", + "documentation":"

Shows the completed percentage of enrollment or registration requests listed in the input file.

" + } + }, + "documentation":"

Indicates the completion progress for a batch job.

" + }, + "KmsKeyId":{ + "type":"string", + "max":2048, + "min":1 + }, + "KnownFraudsterRisk":{ + "type":"structure", + "required":["RiskScore"], + "members":{ + "GeneratedFraudsterId":{ + "shape":"GeneratedFraudsterId", + "documentation":"

The identifier of the fraudster that is the closest match to the speaker. If there are no fraudsters registered in a given domain, or if there are no fraudsters with a non-zero RiskScore, this value is null.

" + }, + "RiskScore":{ + "shape":"Score", + "documentation":"

The score indicating the likelihood the speaker is a known fraudster.

" + } + }, + "documentation":"

Contains details produced as a result of performing known fraudster risk analysis on a speaker.

" + }, + "ListDomainsRequest":{ + "type":"structure", + "members":{ + "MaxResults":{ + "shape":"MaxResultsForListDomainFe", + "documentation":"

The maximum number of results that are returned per call. You can use NextToken to obtain further pages of results. The default is 100; the maximum allowed page size is also 100.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours.

" + } + } + }, + "ListDomainsResponse":{ + "type":"structure", + "members":{ + "DomainSummaries":{ + "shape":"DomainSummaries", + "documentation":"

A list containing details about each domain in the Amazon Web Services account.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours.

" + } + } + }, + "ListFraudsterRegistrationJobsRequest":{ + "type":"structure", + "required":["DomainId"], + "members":{ + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain containing the fraudster registration Jobs.

" + }, + "JobStatus":{ + "shape":"FraudsterRegistrationJobStatus", + "documentation":"

Provides the status of your fraudster registration job.

" + }, + "MaxResults":{ + "shape":"MaxResultsForList", + "documentation":"

The maximum number of results that are returned per call. You can use NextToken to obtain further pages of results. The default is 100; the maximum allowed page size is also 100.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours.

" + } + } + }, + "ListFraudsterRegistrationJobsResponse":{ + "type":"structure", + "members":{ + "JobSummaries":{ + "shape":"FraudsterRegistrationJobSummaries", + "documentation":"

A list containing details about each specified fraudster registration job.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours.

" + } + } + }, + "ListSpeakerEnrollmentJobsRequest":{ + "type":"structure", + "required":["DomainId"], + "members":{ + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain containing the speaker enrollment jobs.

" + }, + "JobStatus":{ + "shape":"SpeakerEnrollmentJobStatus", + "documentation":"

Provides the status of your speaker enrollment Job.

" + }, + "MaxResults":{ + "shape":"MaxResultsForList", + "documentation":"

The maximum number of results that are returned per call. You can use NextToken to obtain further pages of results. The default is 100; the maximum allowed page size is also 100.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours.

" + } + } + }, + "ListSpeakerEnrollmentJobsResponse":{ + "type":"structure", + "members":{ + "JobSummaries":{ + "shape":"SpeakerEnrollmentJobSummaries", + "documentation":"

A list containing details about each specified speaker enrollment job.

" + }, + "NextToken":{ + "shape":"String", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours.

" + } + } + }, + "ListSpeakersRequest":{ + "type":"structure", + "required":["DomainId"], + "members":{ + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain.

" + }, + "MaxResults":{ + "shape":"MaxResultsForList", + "documentation":"

The maximum number of results that are returned per call. You can use NextToken to obtain further pages of results. The default is 100; the maximum allowed page size is also 100.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours.

" + } + } + }, + "ListSpeakersResponse":{ + "type":"structure", + "members":{ + "NextToken":{ + "shape":"String", + "documentation":"

If NextToken is returned, there are more results available. The value of NextToken is a unique pagination token for each page. Make the call again using the returned token to retrieve the next page. Keep all other arguments unchanged. Each pagination token expires after 24 hours.

" + }, + "SpeakerSummaries":{ + "shape":"SpeakerSummaries", + "documentation":"

A list containing details about each speaker in the Amazon Web Services account.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the Voice ID resource for which you want to list the tags.

" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "Tags":{ + "shape":"TagList", + "documentation":"

The list of tags associated with the specified resource.

" + } + } + }, + "MaxResultsForList":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "MaxResultsForListDomainFe":{ + "type":"integer", + "box":true, + "max":10, + "min":1 + }, + "NextToken":{ + "type":"string", + "max":8192, + "min":0, + "pattern":"^\\p{ASCII}{0,8192}$" + }, + "OptOutSpeakerRequest":{ + "type":"structure", + "required":[ + "DomainId", + "SpeakerId" + ], + "members":{ + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain containing the speaker.

" + }, + "SpeakerId":{ + "shape":"SpeakerId", + "documentation":"

The identifier of the speaker you want opted-out.

" + } + } + }, + "OptOutSpeakerResponse":{ + "type":"structure", + "members":{ + "Speaker":{ + "shape":"Speaker", + "documentation":"

Details about the opted-out speaker.

" + } + } + }, + "OutputDataConfig":{ + "type":"structure", + "required":["S3Uri"], + "members":{ + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

the identifier of the KMS key you want Voice ID to use to encrypt the output file of the fraudster registration job.

" + }, + "S3Uri":{ + "shape":"S3Uri", + "documentation":"

The S3 path of the folder to which Voice ID writes the job output file, which has a *.out extension. For example, if the input file name is input-file.json and the output folder path is s3://output-bucket/output-folder, the full output file path is s3://output-bucket/output-folder/job-Id/input-file.json.out.

" + } + }, + "documentation":"

The configuration containing output file information for a batch job.

" + }, + "RegistrationConfig":{ + "type":"structure", + "members":{ + "DuplicateRegistrationAction":{ + "shape":"DuplicateRegistrationAction", + "documentation":"

The action to take when a fraudster is identified as a duplicate. The default action is SKIP, which skips registering the duplicate fraudster. Setting the value to REGISTER_AS_NEW always registers a new fraudster into the specified domain.

" + }, + "FraudsterSimilarityThreshold":{ + "shape":"Score", + "documentation":"

The minimum similarity score between the new and old fraudsters in order to consider the new fraudster a duplicate.

" + } + }, + "documentation":"

The configuration definining the action to take when a duplicate fraudster is detected, and the similarity threshold to use for detecting a duplicate fraudster during a batch fraudster registration job.

" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"}, + "ResourceType":{ + "shape":"ResourceType", + "documentation":"

The type of resource which cannot not be found. Possible types are BATCH_JOB, COMPLIANCE_CONSENT, DOMAIN, FRAUDSTER, SESSION and SPEAKER.

" + } + }, + "documentation":"

The specified resource cannot be found. Check the ResourceType and error message for more details.

", + "exception":true + }, + "ResourceType":{ + "type":"string", + "enum":[ + "BATCH_JOB", + "COMPLIANCE_CONSENT", + "DOMAIN", + "FRAUDSTER", + "SESSION", + "SPEAKER" + ] + }, + "S3Uri":{ + "type":"string", + "max":1024, + "min":0, + "pattern":"^s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/.*)?$" + }, + "Score":{ + "type":"integer", + "box":true, + "max":100, + "min":0 + }, + "ServerSideEncryptionConfiguration":{ + "type":"structure", + "required":["KmsKeyId"], + "members":{ + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The identifier of the KMS Key you want Voice ID to use to encrypt your data.

" + } + }, + "documentation":"

The configuration containing information about the customer-managed KMS Key used for encrypting customer data.

" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The request exceeded the service quota. Refer to Voice ID Service Quotas and try your request again.

", + "exception":true + }, + "SessionId":{ + "type":"string", + "max":25, + "min":25, + "pattern":"^id#[a-zA-Z0-9]{22}$" + }, + "SessionName":{ + "type":"string", + "max":36, + "min":1, + "pattern":"^[a-zA-Z0-9][a-zA-Z0-9_-]*$" + }, + "SessionNameOrId":{ + "type":"string", + "max":36, + "min":1, + "pattern":"^(id#[a-zA-Z0-9]{22}|[a-zA-Z0-9][a-zA-Z0-9_-]*)$" + }, + "Speaker":{ + "type":"structure", + "members":{ + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp showing when the speaker is created.

" + }, + "CustomerSpeakerId":{ + "shape":"CustomerSpeakerId", + "documentation":"

The client-provided identifier for the speaker.

" + }, + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain that contains the speaker.

" + }, + "GeneratedSpeakerId":{ + "shape":"GeneratedSpeakerId", + "documentation":"

The service-generated identifier for the speaker.

" + }, + "Status":{ + "shape":"SpeakerStatus", + "documentation":"

The current status of the speaker.

" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp showing the speaker's last update.

" + } + }, + "documentation":"

Contains all the information about a speaker.

" + }, + "SpeakerEnrollmentJob":{ + "type":"structure", + "members":{ + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp showing the creation of the speaker enrollment job.

" + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The IAM role Amazon Resource Name (ARN) that grants Voice ID permissions to access customer's buckets to read the input manifest file and write the job output file.

" + }, + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain that contains the speaker enrollment job.

" + }, + "EndedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp showing when the speaker enrollment job ended.

" + }, + "EnrollmentConfig":{ + "shape":"EnrollmentConfig", + "documentation":"

The configuration that defines the action to take when the speaker is already enrolled in Voice ID, and the FraudDetectionConfig to use.

" + }, + "FailureDetails":{ + "shape":"FailureDetails", + "documentation":"

Contains details that are populated when an entire batch job fails. In cases of individual registration job failures, the batch job as a whole doesn't fail; it is completed with a JobStatus of COMPLETED_WITH_ERRORS. You can use the job output file to identify the individual registration requests that failed.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

The input data config containing an S3 URI for the input manifest file that contains the list of speaker enrollment job requests.

" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

The service-generated identifier for the speaker enrollment job.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The client-provided name for the speaker enrollment job.

" + }, + "JobProgress":{ + "shape":"JobProgress", + "documentation":"

Provides details on job progress. This field shows the completed percentage of registration requests listed in the input file.

" + }, + "JobStatus":{ + "shape":"SpeakerEnrollmentJobStatus", + "documentation":"

The current status of the speaker enrollment job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

The output data config containing the S3 location where Voice ID writes the job output file; you must also include a KMS Key ID to encrypt the file.

" + } + }, + "documentation":"

Contains all the information about a speaker enrollment job.

" + }, + "SpeakerEnrollmentJobStatus":{ + "type":"string", + "enum":[ + "SUBMITTED", + "IN_PROGRESS", + "COMPLETED", + "COMPLETED_WITH_ERRORS", + "FAILED" + ] + }, + "SpeakerEnrollmentJobSummaries":{ + "type":"list", + "member":{"shape":"SpeakerEnrollmentJobSummary"} + }, + "SpeakerEnrollmentJobSummary":{ + "type":"structure", + "members":{ + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp showing the creation time of the speaker enrollment job.

" + }, + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain that contains the speaker enrollment job.

" + }, + "EndedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp showing when the speaker enrollment job ended.

" + }, + "FailureDetails":{ + "shape":"FailureDetails", + "documentation":"

Contains details that are populated when an entire batch job fails. In cases of individual registration job failures, the batch job as a whole doesn't fail; it is completed with a JobStatus of COMPLETED_WITH_ERRORS. You can use the job output file to identify the individual registration requests that failed.

" + }, + "JobId":{ + "shape":"JobId", + "documentation":"

The service-generated identifier for the speaker enrollment job.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The client-provided name for the speaker enrollment job.

" + }, + "JobProgress":{ + "shape":"JobProgress", + "documentation":"

Provides details regarding job progress. This field shows the completed percentage of enrollment requests listed in the input file.

" + }, + "JobStatus":{ + "shape":"SpeakerEnrollmentJobStatus", + "documentation":"

The current status of the speaker enrollment job.

" + } + }, + "documentation":"

Contains a summary of information about a speaker enrollment job.

" + }, + "SpeakerId":{ + "type":"string", + "max":256, + "min":1, + "pattern":"^(id#[a-zA-Z0-9]{22}|[a-zA-Z0-9][a-zA-Z0-9_-]*)$", + "sensitive":true + }, + "SpeakerStatus":{ + "type":"string", + "enum":[ + "ENROLLED", + "EXPIRED", + "OPTED_OUT", + "PENDING" + ] + }, + "SpeakerSummaries":{ + "type":"list", + "member":{"shape":"SpeakerSummary"} + }, + "SpeakerSummary":{ + "type":"structure", + "members":{ + "CreatedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp showing the speaker's creation time.

" + }, + "CustomerSpeakerId":{ + "shape":"CustomerSpeakerId", + "documentation":"

The client-provided identifier for the speaker.

" + }, + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain that contains the speaker.

" + }, + "GeneratedSpeakerId":{ + "shape":"GeneratedSpeakerId", + "documentation":"

The service-generated identifier for the speaker.

" + }, + "Status":{ + "shape":"SpeakerStatus", + "documentation":"

The current status of the speaker.

" + }, + "UpdatedAt":{ + "shape":"Timestamp", + "documentation":"

A timestamp showing the speaker's last update.

" + } + }, + "documentation":"

Contains a summary of information about a speaker.

" + }, + "StartFraudsterRegistrationJobRequest":{ + "type":"structure", + "required":[ + "DataAccessRoleArn", + "DomainId", + "InputDataConfig", + "OutputDataConfig" + ], + "members":{ + "ClientToken":{ + "shape":"ClientTokenString", + "documentation":"

The idempotency token for starting a new fraudster registration job. If not provided, Amazon Web Services SDK populates this field.

", + "idempotencyToken":true + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The IAM role Amazon Resource Name (ARN) that grants Voice ID permissions to access customer's buckets to read the input manifest file and write the Job output file. Refer to the Create and edit a fraudster watchlist documentation for the permissions needed in this role.

" + }, + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain containing the fraudster registration job and in which the fraudsters are registered.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

The input data config containing an S3 URI for the input manifest file that contains the list of fraudster registration requests.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

The name of the new fraudster registration job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

The output data config containing the S3 location where Voice ID writes the job output file; you must also include a KMS Key ID to encrypt the file.

" + }, + "RegistrationConfig":{ + "shape":"RegistrationConfig", + "documentation":"

The registration config containing details such as the action to take when a duplicate fraudster is detected, and the similarity threshold to use for detecting a duplicate fraudster.

" + } + } + }, + "StartFraudsterRegistrationJobResponse":{ + "type":"structure", + "members":{ + "Job":{ + "shape":"FraudsterRegistrationJob", + "documentation":"

Details about the started fraudster registration job.

" + } + } + }, + "StartSpeakerEnrollmentJobRequest":{ + "type":"structure", + "required":[ + "DataAccessRoleArn", + "DomainId", + "InputDataConfig", + "OutputDataConfig" + ], + "members":{ + "ClientToken":{ + "shape":"ClientTokenString", + "documentation":"

The idempotency token for starting a new speaker enrollment Job. If not provided, Amazon Web Services SDK populates this field.

", + "idempotencyToken":true + }, + "DataAccessRoleArn":{ + "shape":"IamRoleArn", + "documentation":"

The IAM role Amazon Resource Name (ARN) that grants Voice ID permissions to access customer's buckets to read the input manifest file and write the job output file. Refer to Batch enrollment using audio data from prior calls documentation for the permissions needed in this role.

" + }, + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain that contains the speaker enrollment job and in which the speakers are enrolled.

" + }, + "EnrollmentConfig":{ + "shape":"EnrollmentConfig", + "documentation":"

The enrollment config that contains details such as the action to take when a speaker is already enrolled in the Voice ID system or when a speaker is identified as a fraudster.

" + }, + "InputDataConfig":{ + "shape":"InputDataConfig", + "documentation":"

The input data config containing the S3 location for the input manifest file that contains the list of speaker enrollment requests.

" + }, + "JobName":{ + "shape":"JobName", + "documentation":"

A name for your speaker enrollment job.

" + }, + "OutputDataConfig":{ + "shape":"OutputDataConfig", + "documentation":"

The output data config containing the S3 location where Voice ID writes the job output file; you must also include a KMS Key ID to encrypt the file.

" + } + } + }, + "StartSpeakerEnrollmentJobResponse":{ + "type":"structure", + "members":{ + "Job":{ + "shape":"SpeakerEnrollmentJob", + "documentation":"

Details about the started speaker enrollment job.

" + } + } + }, + "StreamingStatus":{ + "type":"string", + "enum":[ + "PENDING_CONFIGURATION", + "ONGOING", + "ENDED" + ] + }, + "String":{ + "type":"string", + "min":1 + }, + "Tag":{ + "type":"structure", + "required":[ + "Key", + "Value" + ], + "members":{ + "Key":{ + "shape":"TagKey", + "documentation":"

The first part of a key:value pair that forms a tag associated with a given resource. For example, in the tag ‘Department’:’Sales’, the key is 'Department'.

" + }, + "Value":{ + "shape":"TagValue", + "documentation":"

The second part of a key:value pair that forms a tag associated with a given resource. For example, in the tag ‘Department’:’Sales’, the value is 'Sales'.

" + } + }, + "documentation":"

A tag that can be assigned to a Voice ID resource.

" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$", + "sensitive":true + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":200, + "min":0 + }, + "TagList":{ + "type":"list", + "member":{"shape":"Tag"}, + "max":200, + "min":0 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Tags" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the Voice ID resource you want to tag.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The list of tags to assign to the specified resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":0, + "pattern":"^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$", + "sensitive":true + }, + "ThrottlingException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The request was denied due to request throttling. Please slow down your request rate. Refer to Amazon Connect Voice ID Service API throttling quotas and try your request again.

", + "exception":true + }, + "Timestamp":{"type":"timestamp"}, + "UniqueIdLarge":{ + "type":"string", + "max":22, + "min":22, + "pattern":"^[a-zA-Z0-9]{22}$" + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "TagKeys" + ], + "members":{ + "ResourceArn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) of the Voice ID resource you want to remove tags from.

" + }, + "TagKeys":{ + "shape":"TagKeyList", + "documentation":"

The list of tag keys you want to remove from the specified resource.

" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateDomainRequest":{ + "type":"structure", + "required":[ + "DomainId", + "Name", + "ServerSideEncryptionConfiguration" + ], + "members":{ + "Description":{ + "shape":"Description", + "documentation":"

A brief description about this domain.

" + }, + "DomainId":{ + "shape":"DomainId", + "documentation":"

The identifier of the domain to be updated.

" + }, + "Name":{ + "shape":"DomainName", + "documentation":"

The name of the domain.

" + }, + "ServerSideEncryptionConfiguration":{ + "shape":"ServerSideEncryptionConfiguration", + "documentation":"

The configuration, containing the KMS Key Identifier, to be used by Voice ID for the server-side encryption of your data. Note that all the existing data in the domain are still encrypted using the existing key, only the data added to domain after updating the key is encrypted using the new key.

" + } + } + }, + "UpdateDomainResponse":{ + "type":"structure", + "members":{ + "Domain":{ + "shape":"Domain", + "documentation":"

Details about the updated domain

" + } + } + }, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The request failed one or more validations; check the error message for more details.

", + "exception":true + } + }, + "documentation":"

Amazon Connect Voice ID provides real-time caller authentication and fraud screening. This guide describes the APIs used for this service.

" +} diff --git a/botocore/data/wisdom/2020-10-19/paginators-1.json b/botocore/data/wisdom/2020-10-19/paginators-1.json new file mode 100644 index 00000000..00d363ef --- /dev/null +++ b/botocore/data/wisdom/2020-10-19/paginators-1.json @@ -0,0 +1,46 @@ +{ + "pagination": { + "ListAssistantAssociations": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assistantAssociationSummaries" + }, + "ListAssistants": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "assistantSummaries" + }, + "ListContents": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "contentSummaries" + }, + "ListKnowledgeBases": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "knowledgeBaseSummaries" + }, + "QueryAssistant": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "results" + }, + "SearchContent": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "contentSummaries" + }, + "SearchSessions": { + "input_token": "nextToken", + "output_token": "nextToken", + "limit_key": "maxResults", + "result_key": "sessionSummaries" + } + } +} diff --git a/botocore/data/wisdom/2020-10-19/service-2.json b/botocore/data/wisdom/2020-10-19/service-2.json new file mode 100644 index 00000000..1fdf6098 --- /dev/null +++ b/botocore/data/wisdom/2020-10-19/service-2.json @@ -0,0 +1,2648 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2020-10-19", + "endpointPrefix":"wisdom", + "jsonVersion":"1.1", + "protocol":"rest-json", + "serviceFullName":"Amazon Connect Wisdom Service", + "serviceId":"Wisdom", + "signatureVersion":"v4", + "signingName":"wisdom", + "uid":"wisdom-2020-10-19" + }, + "operations":{ + "CreateAssistant":{ + "name":"CreateAssistant", + "http":{ + "method":"POST", + "requestUri":"/assistants", + "responseCode":200 + }, + "input":{"shape":"CreateAssistantRequest"}, + "output":{"shape":"CreateAssistantResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates an Amazon Connect Wisdom assistant.

", + "idempotent":true + }, + "CreateAssistantAssociation":{ + "name":"CreateAssistantAssociation", + "http":{ + "method":"POST", + "requestUri":"/assistants/{assistantId}/associations", + "responseCode":200 + }, + "input":{"shape":"CreateAssistantAssociationRequest"}, + "output":{"shape":"CreateAssistantAssociationResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Creates an association between an Amazon Connect Wisdom assistant and another resource. Currently, the only supported association is with a knowledge base. An assistant can have only a single association.

", + "idempotent":true + }, + "CreateContent":{ + "name":"CreateContent", + "http":{ + "method":"POST", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/contents", + "responseCode":200 + }, + "input":{"shape":"CreateContentRequest"}, + "output":{"shape":"CreateContentResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Creates Wisdom content. Before to calling this API, use StartContentUpload to upload an asset.

", + "idempotent":true + }, + "CreateKnowledgeBase":{ + "name":"CreateKnowledgeBase", + "http":{ + "method":"POST", + "requestUri":"/knowledgeBases", + "responseCode":200 + }, + "input":{"shape":"CreateKnowledgeBaseRequest"}, + "output":{"shape":"CreateKnowledgeBaseResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ServiceQuotaExceededException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Creates a knowledge base.

When using this API, you cannot reuse Amazon AppIntegrations DataIntegrations with external knowledge bases such as Salesforce and ServiceNow. If you do, you'll get an InvalidRequestException error.

 <p>For example, you're programmatically managing your external knowledge base, and you want to add or remove one of the fields that is being ingested from Salesforce. Do the following:</p> <ol> <li> <p>Call <a href="https://docs.aws.amazon.com/wisdom/latest/APIReference/API_DeleteKnowledgeBase.html">DeleteKnowledgeBase</a>.</p> </li> <li> <p>Call <a href="https://docs.aws.amazon.com/appintegrations/latest/APIReference/API_DeleteDataIntegration.html">DeleteDataIntegration</a>.</p> </li> <li> <p>Call <a href="https://docs.aws.amazon.com/appintegrations/latest/APIReference/API_CreateDataIntegration.html">CreateDataIntegration</a> to recreate the DataIntegration or a create different one.</p> </li> <li> <p>Call CreateKnowledgeBase.</p> </li> </ol> </note> 
", + "idempotent":true + }, + "CreateSession":{ + "name":"CreateSession", + "http":{ + "method":"POST", + "requestUri":"/assistants/{assistantId}/sessions", + "responseCode":200 + }, + "input":{"shape":"CreateSessionRequest"}, + "output":{"shape":"CreateSessionResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"ValidationException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Creates a session. A session is a contextual container used for generating recommendations. Amazon Connect creates a new Wisdom session for each contact on which Wisdom is enabled.

", + "idempotent":true + }, + "DeleteAssistant":{ + "name":"DeleteAssistant", + "http":{ + "method":"DELETE", + "requestUri":"/assistants/{assistantId}", + "responseCode":204 + }, + "input":{"shape":"DeleteAssistantRequest"}, + "output":{"shape":"DeleteAssistantResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes an assistant.

", + "idempotent":true + }, + "DeleteAssistantAssociation":{ + "name":"DeleteAssistantAssociation", + "http":{ + "method":"DELETE", + "requestUri":"/assistants/{assistantId}/associations/{assistantAssociationId}", + "responseCode":204 + }, + "input":{"shape":"DeleteAssistantAssociationRequest"}, + "output":{"shape":"DeleteAssistantAssociationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes an assistant association.

", + "idempotent":true + }, + "DeleteContent":{ + "name":"DeleteContent", + "http":{ + "method":"DELETE", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/contents/{contentId}", + "responseCode":204 + }, + "input":{"shape":"DeleteContentRequest"}, + "output":{"shape":"DeleteContentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes the content.

", + "idempotent":true + }, + "DeleteKnowledgeBase":{ + "name":"DeleteKnowledgeBase", + "http":{ + "method":"DELETE", + "requestUri":"/knowledgeBases/{knowledgeBaseId}", + "responseCode":204 + }, + "input":{"shape":"DeleteKnowledgeBaseRequest"}, + "output":{"shape":"DeleteKnowledgeBaseResponse"}, + "errors":[ + {"shape":"ConflictException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Deletes the knowledge base.

When you use this API to delete an external knowledge base such as Salesforce or ServiceNow, you must also delete the Amazon AppIntegrations DataIntegration. This is because you can't reuse the DataIntegration after it's been associated with an external knowledge base. However, you can delete and recreate it. See DeleteDataIntegration and CreateDataIntegration in the Amazon AppIntegrations API Reference.

", + "idempotent":true + }, + "GetAssistant":{ + "name":"GetAssistant", + "http":{ + "method":"GET", + "requestUri":"/assistants/{assistantId}", + "responseCode":200 + }, + "input":{"shape":"GetAssistantRequest"}, + "output":{"shape":"GetAssistantResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves information about an assistant.

" + }, + "GetAssistantAssociation":{ + "name":"GetAssistantAssociation", + "http":{ + "method":"GET", + "requestUri":"/assistants/{assistantId}/associations/{assistantAssociationId}", + "responseCode":200 + }, + "input":{"shape":"GetAssistantAssociationRequest"}, + "output":{"shape":"GetAssistantAssociationResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves information about an assistant association.

" + }, + "GetContent":{ + "name":"GetContent", + "http":{ + "method":"GET", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/contents/{contentId}", + "responseCode":200 + }, + "input":{"shape":"GetContentRequest"}, + "output":{"shape":"GetContentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves content, including a pre-signed URL to download the content.

" + }, + "GetContentSummary":{ + "name":"GetContentSummary", + "http":{ + "method":"GET", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/contents/{contentId}/summary", + "responseCode":200 + }, + "input":{"shape":"GetContentSummaryRequest"}, + "output":{"shape":"GetContentSummaryResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves summary information about the content.

" + }, + "GetKnowledgeBase":{ + "name":"GetKnowledgeBase", + "http":{ + "method":"GET", + "requestUri":"/knowledgeBases/{knowledgeBaseId}", + "responseCode":200 + }, + "input":{"shape":"GetKnowledgeBaseRequest"}, + "output":{"shape":"GetKnowledgeBaseResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves information about the knowledge base.

" + }, + "GetRecommendations":{ + "name":"GetRecommendations", + "http":{ + "method":"GET", + "requestUri":"/assistants/{assistantId}/sessions/{sessionId}/recommendations", + "responseCode":200 + }, + "input":{"shape":"GetRecommendationsRequest"}, + "output":{"shape":"GetRecommendationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves recommendations for the specified session. To avoid retrieving the same recommendations in subsequent calls, use NotifyRecommendationsReceived. This API supports long-polling behavior with the waitTimeSeconds parameter. Short poll is the default behavior and only returns recommendations already available. To perform a manual query against an assistant, use QueryAssistant.

" + }, + "GetSession":{ + "name":"GetSession", + "http":{ + "method":"GET", + "requestUri":"/assistants/{assistantId}/sessions/{sessionId}", + "responseCode":200 + }, + "input":{"shape":"GetSessionRequest"}, + "output":{"shape":"GetSessionResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Retrieves information for a specified session.

" + }, + "ListAssistantAssociations":{ + "name":"ListAssistantAssociations", + "http":{ + "method":"GET", + "requestUri":"/assistants/{assistantId}/associations", + "responseCode":200 + }, + "input":{"shape":"ListAssistantAssociationsRequest"}, + "output":{"shape":"ListAssistantAssociationsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists information about assistant associations.

" + }, + "ListAssistants":{ + "name":"ListAssistants", + "http":{ + "method":"GET", + "requestUri":"/assistants", + "responseCode":200 + }, + "input":{"shape":"ListAssistantsRequest"}, + "output":{"shape":"ListAssistantsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Lists information about assistants.

" + }, + "ListContents":{ + "name":"ListContents", + "http":{ + "method":"GET", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/contents", + "responseCode":200 + }, + "input":{"shape":"ListContentsRequest"}, + "output":{"shape":"ListContentsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists the content.

" + }, + "ListKnowledgeBases":{ + "name":"ListKnowledgeBases", + "http":{ + "method":"GET", + "requestUri":"/knowledgeBases", + "responseCode":200 + }, + "input":{"shape":"ListKnowledgeBasesRequest"}, + "output":{"shape":"ListKnowledgeBasesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"} + ], + "documentation":"

Lists the knowledge bases.

" + }, + "ListTagsForResource":{ + "name":"ListTagsForResource", + "http":{ + "method":"GET", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"ListTagsForResourceRequest"}, + "output":{"shape":"ListTagsForResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Lists the tags for the specified resource.

" + }, + "NotifyRecommendationsReceived":{ + "name":"NotifyRecommendationsReceived", + "http":{ + "method":"POST", + "requestUri":"/assistants/{assistantId}/sessions/{sessionId}/recommendations/notify", + "responseCode":200 + }, + "input":{"shape":"NotifyRecommendationsReceivedRequest"}, + "output":{"shape":"NotifyRecommendationsReceivedResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes the specified recommendations from the specified assistant's queue of newly available recommendations. You can use this API in conjunction with GetRecommendations and a waitTimeSeconds input for long-polling behavior and avoiding duplicate recommendations.

", + "idempotent":true + }, + "QueryAssistant":{ + "name":"QueryAssistant", + "http":{ + "method":"POST", + "requestUri":"/assistants/{assistantId}/query", + "responseCode":200 + }, + "input":{"shape":"QueryAssistantRequest"}, + "output":{"shape":"QueryAssistantResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Performs a manual search against the specified assistant. To retrieve recommendations for an assistant, use GetRecommendations.

" + }, + "RemoveKnowledgeBaseTemplateUri":{ + "name":"RemoveKnowledgeBaseTemplateUri", + "http":{ + "method":"DELETE", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/templateUri", + "responseCode":204 + }, + "input":{"shape":"RemoveKnowledgeBaseTemplateUriRequest"}, + "output":{"shape":"RemoveKnowledgeBaseTemplateUriResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes a URI template from a knowledge base.

" + }, + "SearchContent":{ + "name":"SearchContent", + "http":{ + "method":"POST", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/search", + "responseCode":200 + }, + "input":{"shape":"SearchContentRequest"}, + "output":{"shape":"SearchContentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Searches for content in a specified knowledge base. Can be used to get a specific content resource by its name.

" + }, + "SearchSessions":{ + "name":"SearchSessions", + "http":{ + "method":"POST", + "requestUri":"/assistants/{assistantId}/searchSessions", + "responseCode":200 + }, + "input":{"shape":"SearchSessionsRequest"}, + "output":{"shape":"SearchSessionsResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Searches for sessions.

" + }, + "StartContentUpload":{ + "name":"StartContentUpload", + "http":{ + "method":"POST", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/upload", + "responseCode":200 + }, + "input":{"shape":"StartContentUploadRequest"}, + "output":{"shape":"StartContentUploadResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Get a URL to upload content to a knowledge base. To upload content, first make a PUT request to the returned URL with your file, making sure to include the required headers. Then use CreateContent to finalize the content creation process or UpdateContent to modify an existing resource. You can only upload content to a knowledge base of type CUSTOM.

" + }, + "TagResource":{ + "name":"TagResource", + "http":{ + "method":"POST", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"TagResourceRequest"}, + "output":{"shape":"TagResourceResponse"}, + "errors":[ + {"shape":"TooManyTagsException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Adds the specified tags to the specified resource.

", + "idempotent":true + }, + "UntagResource":{ + "name":"UntagResource", + "http":{ + "method":"DELETE", + "requestUri":"/tags/{resourceArn}", + "responseCode":200 + }, + "input":{"shape":"UntagResourceRequest"}, + "output":{"shape":"UntagResourceResponse"}, + "errors":[ + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Removes the specified tags from the specified resource.

", + "idempotent":true + }, + "UpdateContent":{ + "name":"UpdateContent", + "http":{ + "method":"POST", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/contents/{contentId}", + "responseCode":200 + }, + "input":{"shape":"UpdateContentRequest"}, + "output":{"shape":"UpdateContentResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"PreconditionFailedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Updates information about the content.

" + }, + "UpdateKnowledgeBaseTemplateUri":{ + "name":"UpdateKnowledgeBaseTemplateUri", + "http":{ + "method":"POST", + "requestUri":"/knowledgeBases/{knowledgeBaseId}/templateUri", + "responseCode":200 + }, + "input":{"shape":"UpdateKnowledgeBaseTemplateUriRequest"}, + "output":{"shape":"UpdateKnowledgeBaseTemplateUriResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"AccessDeniedException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Updates the template URI of a knowledge base. This is only supported for knowledge bases of type EXTERNAL. Include a single variable in ${variable} format; this interpolated by Wisdom using ingested content. For example, if you ingest a Salesforce article, it has an Id value, and you can set the template URI to https://myInstanceName.lightning.force.com/lightning/r/Knowledge__kav/*${Id}*/view.

" + } + }, + "shapes":{ + "AccessDeniedException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

You do not have sufficient access to perform this action.

", + "error":{ + "httpStatusCode":403, + "senderFault":true + }, + "exception":true + }, + "AppIntegrationsConfiguration":{ + "type":"structure", + "required":[ + "appIntegrationArn", + "objectFields" + ], + "members":{ + "appIntegrationArn":{ + "shape":"GenericArn", + "documentation":"

The Amazon Resource Name (ARN) of the AppIntegrations DataIntegration to use for ingesting content.

" + }, + "objectFields":{ + "shape":"ObjectFieldsList", + "documentation":"

The fields from the source that are made available to your agents in Wisdom.

  • For Salesforce, you must include at least Id, ArticleNumber, VersionNumber, Title, PublishStatus, and IsDeleted.

  • For ServiceNow, you must include at least number, short_description, sys_mod_count, workflow_state, and active.

Make sure to include additional field(s); these are indexed and used to source recommendations.

" + } + }, + "documentation":"

Configuration information for Amazon AppIntegrations to automatically ingest content.

" + }, + "Arn":{ + "type":"string", + "pattern":"^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})?$" + }, + "AssistantAssociationData":{ + "type":"structure", + "required":[ + "assistantArn", + "assistantAssociationArn", + "assistantAssociationId", + "assistantId", + "associationData", + "associationType" + ], + "members":{ + "assistantArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the Wisdom assistant

" + }, + "assistantAssociationArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the assistant association.

" + }, + "assistantAssociationId":{ + "shape":"Uuid", + "documentation":"

The identifier of the assistant association.

" + }, + "assistantId":{ + "shape":"Uuid", + "documentation":"

The identifier of the Wisdom assistant.

" + }, + "associationData":{ + "shape":"AssistantAssociationOutputData", + "documentation":"

A union type that currently has a single argument, the knowledge base ID.

" + }, + "associationType":{ + "shape":"AssociationType", + "documentation":"

The type of association.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + }, + "documentation":"

Information about the assistant association.

" + }, + "AssistantAssociationInputData":{ + "type":"structure", + "members":{ + "knowledgeBaseId":{ + "shape":"Uuid", + "documentation":"

The the identifier of the knowledge base.

" + } + }, + "documentation":"

The data that is input into Wisdom as a result of the assistant association.

", + "union":true + }, + "AssistantAssociationOutputData":{ + "type":"structure", + "members":{ + "knowledgeBaseAssociation":{ + "shape":"KnowledgeBaseAssociationData", + "documentation":"

The knowledge base where output data is sent.

" + } + }, + "documentation":"

The data that is output as a result of the assistant association.

", + "union":true + }, + "AssistantAssociationSummary":{ + "type":"structure", + "required":[ + "assistantArn", + "assistantAssociationArn", + "assistantAssociationId", + "assistantId", + "associationData", + "associationType" + ], + "members":{ + "assistantArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the Wisdom assistant

" + }, + "assistantAssociationArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the assistant association.

" + }, + "assistantAssociationId":{ + "shape":"Uuid", + "documentation":"

The identifier of the assistant association.

" + }, + "assistantId":{ + "shape":"Uuid", + "documentation":"

The identifier of the Wisdom assistant.

" + }, + "associationData":{ + "shape":"AssistantAssociationOutputData", + "documentation":"

The association data.

" + }, + "associationType":{ + "shape":"AssociationType", + "documentation":"

The type of association.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + }, + "documentation":"

Summary information about the assistant association.

" + }, + "AssistantAssociationSummaryList":{ + "type":"list", + "member":{"shape":"AssistantAssociationSummary"} + }, + "AssistantData":{ + "type":"structure", + "required":[ + "assistantArn", + "assistantId", + "name", + "status", + "type" + ], + "members":{ + "assistantArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the Wisdom assistant

" + }, + "assistantId":{ + "shape":"Uuid", + "documentation":"

The identifier of the Wisdom assistant.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name.

" + }, + "serverSideEncryptionConfiguration":{ + "shape":"ServerSideEncryptionConfiguration", + "documentation":"

The KMS key used for encryption.

" + }, + "status":{ + "shape":"AssistantStatus", + "documentation":"

The status of the assistant.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + }, + "type":{ + "shape":"AssistantType", + "documentation":"

The type of assistant.

" + } + }, + "documentation":"

The assistant data.

" + }, + "AssistantList":{ + "type":"list", + "member":{"shape":"AssistantSummary"} + }, + "AssistantStatus":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_FAILED", + "ACTIVE", + "DELETE_IN_PROGRESS", + "DELETE_FAILED", + "DELETED" + ] + }, + "AssistantSummary":{ + "type":"structure", + "required":[ + "assistantArn", + "assistantId", + "name", + "status", + "type" + ], + "members":{ + "assistantArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the Wisdom assistant

" + }, + "assistantId":{ + "shape":"Uuid", + "documentation":"

The identifier of the Wisdom assistant.

" + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the assistant.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the assistant.

" + }, + "serverSideEncryptionConfiguration":{ + "shape":"ServerSideEncryptionConfiguration", + "documentation":"

The KMS key used for encryption.

" + }, + "status":{ + "shape":"AssistantStatus", + "documentation":"

The status of the assistant.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + }, + "type":{ + "shape":"AssistantType", + "documentation":"

The type of the assistant.

" + } + }, + "documentation":"

Summary information about the assistant.

" + }, + "AssistantType":{ + "type":"string", + "enum":["AGENT"] + }, + "AssociationType":{ + "type":"string", + "enum":["KNOWLEDGE_BASE"] + }, + "Boolean":{ + "type":"boolean", + "box":true + }, + "ClientToken":{ + "type":"string", + "max":4096, + "min":1 + }, + "ConflictException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The request could not be processed because of conflict in the current state of the resource. For example, if you're using a Create API (such as CreateAssistant) that accepts name, a conflicting resource (usually with the same name) is being created or mutated.

", + "error":{ + "httpStatusCode":409, + "senderFault":true + }, + "exception":true + }, + "ContentData":{ + "type":"structure", + "required":[ + "contentArn", + "contentId", + "contentType", + "knowledgeBaseArn", + "knowledgeBaseId", + "metadata", + "name", + "revisionId", + "status", + "title", + "url", + "urlExpiry" + ], + "members":{ + "contentArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the content.

" + }, + "contentId":{ + "shape":"Uuid", + "documentation":"

The identifier of the content.

" + }, + "contentType":{ + "shape":"ContentType", + "documentation":"

The media type of the content.

" + }, + "knowledgeBaseArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the knowledge base.

" + }, + "knowledgeBaseId":{ + "shape":"Uuid", + "documentation":"

The the identifier of the knowledge base.

" + }, + "linkOutUri":{ + "shape":"Uri", + "documentation":"

The URI of the content.

" + }, + "metadata":{ + "shape":"ContentMetadata", + "documentation":"

A key/value map to store attributes without affecting tagging or recommendations. For example, when synchronizing data between an external system and Wisdom, you can store an external version identifier as metadata to utilize for determining drift.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the content.

" + }, + "revisionId":{ + "shape":"NonEmptyString", + "documentation":"

The identifier of the content revision.

" + }, + "status":{ + "shape":"ContentStatus", + "documentation":"

The status of the content.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + }, + "title":{ + "shape":"ContentTitle", + "documentation":"

The title of the content.

" + }, + "url":{ + "shape":"SyntheticContentDataUrl", + "documentation":"

The URL of the content.

" + }, + "urlExpiry":{ + "shape":"SyntheticTimestamp_epoch_seconds", + "documentation":"

The expiration time of the URL as an epoch timestamp.

" + } + }, + "documentation":"

Information about the content.

" + }, + "ContentMetadata":{ + "type":"map", + "key":{"shape":"NonEmptyString"}, + "value":{"shape":"NonEmptyString"} + }, + "ContentReference":{ + "type":"structure", + "members":{ + "contentArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the content.

" + }, + "contentId":{ + "shape":"Uuid", + "documentation":"

The identifier of the content.

" + }, + "knowledgeBaseArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the knowledge base.

" + }, + "knowledgeBaseId":{ + "shape":"Uuid", + "documentation":"

The the identifier of the knowledge base.

" + } + }, + "documentation":"

Reference information about the content.

" + }, + "ContentStatus":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_FAILED", + "ACTIVE", + "DELETE_IN_PROGRESS", + "DELETE_FAILED", + "DELETED", + "UPDATE_FAILED" + ] + }, + "ContentSummary":{ + "type":"structure", + "required":[ + "contentArn", + "contentId", + "contentType", + "knowledgeBaseArn", + "knowledgeBaseId", + "metadata", + "name", + "revisionId", + "status", + "title" + ], + "members":{ + "contentArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the content.

" + }, + "contentId":{ + "shape":"Uuid", + "documentation":"

The identifier of the content.

" + }, + "contentType":{ + "shape":"ContentType", + "documentation":"

The media type of the content.

" + }, + "knowledgeBaseArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the knowledge base.

" + }, + "knowledgeBaseId":{ + "shape":"Uuid", + "documentation":"

The the identifier of the knowledge base.

" + }, + "metadata":{ + "shape":"ContentMetadata", + "documentation":"

A key/value map to store attributes without affecting tagging or recommendations. For example, when synchronizing data between an external system and Wisdom, you can store an external version identifier as metadata to utilize for determining drift.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the content.

" + }, + "revisionId":{ + "shape":"NonEmptyString", + "documentation":"

The identifier of the revision of the content.

" + }, + "status":{ + "shape":"ContentStatus", + "documentation":"

The status of the content.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + }, + "title":{ + "shape":"ContentTitle", + "documentation":"

The title of the content.

" + } + }, + "documentation":"

Summary information about the content.

" + }, + "ContentSummaryList":{ + "type":"list", + "member":{"shape":"ContentSummary"} + }, + "ContentTitle":{ + "type":"string", + "max":255, + "min":1 + }, + "ContentType":{ + "type":"string", + "pattern":"^(text/(plain|html))|(application/x\\.wisdom-json;source=(salesforce|servicenow))$" + }, + "CreateAssistantAssociationRequest":{ + "type":"structure", + "required":[ + "assistantId", + "association", + "associationType" + ], + "members":{ + "assistantId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the Wisdom assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantId" + }, + "association":{ + "shape":"AssistantAssociationInputData", + "documentation":"

The identifier of the associated resource.

" + }, + "associationType":{ + "shape":"AssociationType", + "documentation":"

The type of association.

" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "CreateAssistantAssociationResponse":{ + "type":"structure", + "members":{ + "assistantAssociation":{ + "shape":"AssistantAssociationData", + "documentation":"

The assistant association.

" + } + } + }, + "CreateAssistantRequest":{ + "type":"structure", + "required":[ + "name", + "type" + ], + "members":{ + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "description":{ + "shape":"Description", + "documentation":"

The description of the assistant.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the assistant.

" + }, + "serverSideEncryptionConfiguration":{ + "shape":"ServerSideEncryptionConfiguration", + "documentation":"

The KMS key used for encryption.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + }, + "type":{ + "shape":"AssistantType", + "documentation":"

The type of assistant.

" + } + } + }, + "CreateAssistantResponse":{ + "type":"structure", + "members":{ + "assistant":{ + "shape":"AssistantData", + "documentation":"

Information about the assistant.

" + } + } + }, + "CreateContentRequest":{ + "type":"structure", + "required":[ + "knowledgeBaseId", + "name", + "uploadId" + ], + "members":{ + "clientToken":{ + "shape":"NonEmptyString", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The the identifier of the knowledge base. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"knowledgeBaseId" + }, + "metadata":{ + "shape":"ContentMetadata", + "documentation":"

A key/value map to store attributes without affecting tagging or recommendations. For example, when synchronizing data between an external system and Wisdom, you can store an external version identifier as metadata to utilize for determining drift.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the content. Each piece of content in a knowledge base must have a unique name. You can retrieve a piece of content using only its knowledge base and its name with the SearchContent API.

" + }, + "overrideLinkOutUri":{ + "shape":"Uri", + "documentation":"

The URI you want to use for the article. If the knowledge base has a templateUri, setting this argument overrides it for this piece of content.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + }, + "title":{ + "shape":"ContentTitle", + "documentation":"

The title of the content. If not set, the title is equal to the name.

" + }, + "uploadId":{ + "shape":"NonEmptyString", + "documentation":"

A pointer to the uploaded asset. This value is returned by StartContentUpload.

" + } + } + }, + "CreateContentResponse":{ + "type":"structure", + "members":{ + "content":{ + "shape":"ContentData", + "documentation":"

The content.

" + } + } + }, + "CreateKnowledgeBaseRequest":{ + "type":"structure", + "required":[ + "knowledgeBaseType", + "name" + ], + "members":{ + "clientToken":{ + "shape":"NonEmptyString", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "description":{ + "shape":"Description", + "documentation":"

The description.

" + }, + "knowledgeBaseType":{ + "shape":"KnowledgeBaseType", + "documentation":"

The type of knowledge base. Only CUSTOM knowledge bases allow you to upload your own content. EXTERNAL knowledge bases support integrations with third-party systems whose content is synchronized automatically.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the knowledge base.

" + }, + "renderingConfiguration":{ + "shape":"RenderingConfiguration", + "documentation":"

Information about how to render the content.

" + }, + "serverSideEncryptionConfiguration":{ + "shape":"ServerSideEncryptionConfiguration", + "documentation":"

The KMS key used for encryption.

" + }, + "sourceConfiguration":{ + "shape":"SourceConfiguration", + "documentation":"

The source of the knowledge base content. Only set this argument for EXTERNAL knowledge bases.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "CreateKnowledgeBaseResponse":{ + "type":"structure", + "members":{ + "knowledgeBase":{ + "shape":"KnowledgeBaseData", + "documentation":"

The knowledge base.

" + } + } + }, + "CreateSessionRequest":{ + "type":"structure", + "required":[ + "assistantId", + "name" + ], + "members":{ + "assistantId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the Wisdom assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantId" + }, + "clientToken":{ + "shape":"ClientToken", + "documentation":"

A unique, case-sensitive identifier that you provide to ensure the idempotency of the request.

", + "idempotencyToken":true + }, + "description":{ + "shape":"Description", + "documentation":"

The description.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the session.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "CreateSessionResponse":{ + "type":"structure", + "members":{ + "session":{ + "shape":"SessionData", + "documentation":"

The session.

" + } + } + }, + "DeleteAssistantAssociationRequest":{ + "type":"structure", + "required":[ + "assistantAssociationId", + "assistantId" + ], + "members":{ + "assistantAssociationId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the assistant association. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantAssociationId" + }, + "assistantId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the Wisdom assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantId" + } + } + }, + "DeleteAssistantAssociationResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteAssistantRequest":{ + "type":"structure", + "required":["assistantId"], + "members":{ + "assistantId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the Wisdom assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantId" + } + } + }, + "DeleteAssistantResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteContentRequest":{ + "type":"structure", + "required":[ + "contentId", + "knowledgeBaseId" + ], + "members":{ + "contentId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the content. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"contentId" + }, + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The the identifier of the knowledge base. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"knowledgeBaseId" + } + } + }, + "DeleteContentResponse":{ + "type":"structure", + "members":{ + } + }, + "DeleteKnowledgeBaseRequest":{ + "type":"structure", + "required":["knowledgeBaseId"], + "members":{ + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The knowledge base to delete content from. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"knowledgeBaseId" + } + } + }, + "DeleteKnowledgeBaseResponse":{ + "type":"structure", + "members":{ + } + }, + "Description":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9\\s_.,-]+" + }, + "Document":{ + "type":"structure", + "required":["contentReference"], + "members":{ + "contentReference":{ + "shape":"ContentReference", + "documentation":"

A reference to the content resource.

" + }, + "excerpt":{ + "shape":"DocumentText", + "documentation":"

The excerpt from the document.

" + }, + "title":{ + "shape":"DocumentText", + "documentation":"

The title of the document.

" + } + }, + "documentation":"

The document.

" + }, + "DocumentText":{ + "type":"structure", + "members":{ + "highlights":{ + "shape":"Highlights", + "documentation":"

Highlights in the document text.

" + }, + "text":{ + "shape":"SyntheticDocumentTextString", + "documentation":"

Text in the document.

" + } + }, + "documentation":"

The text of the document.

" + }, + "Filter":{ + "type":"structure", + "required":[ + "field", + "operator", + "value" + ], + "members":{ + "field":{ + "shape":"FilterField", + "documentation":"

The field on which to filter.

" + }, + "operator":{ + "shape":"FilterOperator", + "documentation":"

The operator to use for comparing the field’s value with the provided value.

" + }, + "value":{ + "shape":"NonEmptyString", + "documentation":"

The desired field value on which to filter.

" + } + }, + "documentation":"

A search filter.

" + }, + "FilterField":{ + "type":"string", + "enum":["NAME"] + }, + "FilterList":{ + "type":"list", + "member":{"shape":"Filter"} + }, + "FilterOperator":{ + "type":"string", + "enum":["EQUALS"] + }, + "GenericArn":{ + "type":"string", + "max":2048, + "min":1, + "pattern":"^arn:[a-z-]+?:[a-z-]+?:[a-z0-9-]*?:([0-9]{12})?:[a-zA-Z0-9-:/]+$" + }, + "GetAssistantAssociationRequest":{ + "type":"structure", + "required":[ + "assistantAssociationId", + "assistantId" + ], + "members":{ + "assistantAssociationId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the assistant association. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantAssociationId" + }, + "assistantId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the Wisdom assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantId" + } + } + }, + "GetAssistantAssociationResponse":{ + "type":"structure", + "members":{ + "assistantAssociation":{ + "shape":"AssistantAssociationData", + "documentation":"

The assistant association.

" + } + } + }, + "GetAssistantRequest":{ + "type":"structure", + "required":["assistantId"], + "members":{ + "assistantId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the Wisdom assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantId" + } + } + }, + "GetAssistantResponse":{ + "type":"structure", + "members":{ + "assistant":{ + "shape":"AssistantData", + "documentation":"

Information about the assistant.

" + } + } + }, + "GetContentRequest":{ + "type":"structure", + "required":[ + "contentId", + "knowledgeBaseId" + ], + "members":{ + "contentId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the content. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"contentId" + }, + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The the identifier of the knowledge base. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"knowledgeBaseId" + } + } + }, + "GetContentResponse":{ + "type":"structure", + "members":{ + "content":{ + "shape":"ContentData", + "documentation":"

The content.

" + } + } + }, + "GetContentSummaryRequest":{ + "type":"structure", + "required":[ + "contentId", + "knowledgeBaseId" + ], + "members":{ + "contentId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the content. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"contentId" + }, + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The the identifier of the knowledge base. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"knowledgeBaseId" + } + } + }, + "GetContentSummaryResponse":{ + "type":"structure", + "members":{ + "contentSummary":{ + "shape":"ContentSummary", + "documentation":"

The content summary.

" + } + } + }, + "GetKnowledgeBaseRequest":{ + "type":"structure", + "required":["knowledgeBaseId"], + "members":{ + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The the identifier of the knowledge base. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"knowledgeBaseId" + } + } + }, + "GetKnowledgeBaseResponse":{ + "type":"structure", + "members":{ + "knowledgeBase":{ + "shape":"KnowledgeBaseData", + "documentation":"

The knowledge base.

" + } + } + }, + "GetRecommendationsRequest":{ + "type":"structure", + "required":[ + "assistantId", + "sessionId" + ], + "members":{ + "assistantId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the Wisdom assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantId" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + }, + "sessionId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the session. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"sessionId" + }, + "waitTimeSeconds":{ + "shape":"WaitTimeSeconds", + "documentation":"

The duration (in seconds) for which the call waits for a recommendation to be made available before returning. If a recommendation is available, the call returns sooner than WaitTimeSeconds. If no messages are available and the wait time expires, the call returns successfully with an empty list.

", + "location":"querystring", + "locationName":"waitTimeSeconds" + } + } + }, + "GetRecommendationsResponse":{ + "type":"structure", + "required":["recommendations"], + "members":{ + "recommendations":{ + "shape":"RecommendationList", + "documentation":"

The recommendations.

" + } + } + }, + "GetSessionRequest":{ + "type":"structure", + "required":[ + "assistantId", + "sessionId" + ], + "members":{ + "assistantId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the Wisdom assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantId" + }, + "sessionId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the session. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"sessionId" + } + } + }, + "GetSessionResponse":{ + "type":"structure", + "members":{ + "session":{ + "shape":"SessionData", + "documentation":"

The session.

" + } + } + }, + "Headers":{ + "type":"map", + "key":{"shape":"NonEmptyString"}, + "value":{"shape":"NonEmptyString"} + }, + "Highlight":{ + "type":"structure", + "members":{ + "beginOffsetInclusive":{ + "shape":"HighlightOffset", + "documentation":"

The offset for the start of the highlight.

" + }, + "endOffsetExclusive":{ + "shape":"HighlightOffset", + "documentation":"

The offset for the end of the highlight.

" + } + }, + "documentation":"

Offset specification to describe highlighting of document excerpts for rendering search results and recommendations.

" + }, + "HighlightOffset":{"type":"integer"}, + "Highlights":{ + "type":"list", + "member":{"shape":"Highlight"} + }, + "KnowledgeBaseAssociationData":{ + "type":"structure", + "members":{ + "knowledgeBaseArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the knowledge base.

" + }, + "knowledgeBaseId":{ + "shape":"Uuid", + "documentation":"

The the identifier of the knowledge base.

" + } + }, + "documentation":"

Association information about the knowledge base.

" + }, + "KnowledgeBaseData":{ + "type":"structure", + "required":[ + "knowledgeBaseArn", + "knowledgeBaseId", + "knowledgeBaseType", + "name", + "status" + ], + "members":{ + "description":{ + "shape":"Description", + "documentation":"

The description.

" + }, + "knowledgeBaseArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the knowledge base.

" + }, + "knowledgeBaseId":{ + "shape":"Uuid", + "documentation":"

The the identifier of the knowledge base.

" + }, + "knowledgeBaseType":{ + "shape":"KnowledgeBaseType", + "documentation":"

The type of knowledge base.

" + }, + "lastContentModificationTime":{ + "shape":"SyntheticTimestamp_epoch_seconds", + "documentation":"

An epoch timestamp indicating the most recent content modification inside the knowledge base. If no content exists in a knowledge base, this value is unset.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the knowledge base.

" + }, + "renderingConfiguration":{ + "shape":"RenderingConfiguration", + "documentation":"

Information about how to render the content.

" + }, + "serverSideEncryptionConfiguration":{ + "shape":"ServerSideEncryptionConfiguration", + "documentation":"

The KMS key used for encryption.

" + }, + "sourceConfiguration":{ + "shape":"SourceConfiguration", + "documentation":"

Source configuration information about the knowledge base.

" + }, + "status":{ + "shape":"KnowledgeBaseStatus", + "documentation":"

The status of the knowledge base.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + }, + "documentation":"

Information about the knowledge base.

" + }, + "KnowledgeBaseList":{ + "type":"list", + "member":{"shape":"KnowledgeBaseSummary"} + }, + "KnowledgeBaseStatus":{ + "type":"string", + "enum":[ + "CREATE_IN_PROGRESS", + "CREATE_FAILED", + "ACTIVE", + "DELETE_IN_PROGRESS", + "DELETE_FAILED", + "DELETED" + ] + }, + "KnowledgeBaseSummary":{ + "type":"structure", + "required":[ + "knowledgeBaseArn", + "knowledgeBaseId", + "knowledgeBaseType", + "name", + "status" + ], + "members":{ + "description":{ + "shape":"Description", + "documentation":"

The description of the knowledge base.

" + }, + "knowledgeBaseArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the knowledge base.

" + }, + "knowledgeBaseId":{ + "shape":"Uuid", + "documentation":"

The the identifier of the knowledge base.

" + }, + "knowledgeBaseType":{ + "shape":"KnowledgeBaseType", + "documentation":"

The type of knowledge base.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the knowledge base.

" + }, + "renderingConfiguration":{ + "shape":"RenderingConfiguration", + "documentation":"

Information about how to render the content.

" + }, + "serverSideEncryptionConfiguration":{ + "shape":"ServerSideEncryptionConfiguration", + "documentation":"

The KMS key used for encryption.

" + }, + "sourceConfiguration":{ + "shape":"SourceConfiguration", + "documentation":"

[KEVIN]

" + }, + "status":{ + "shape":"KnowledgeBaseStatus", + "documentation":"

The status of the knowledge base summary.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + }, + "documentation":"

Summary information about the knowledge base.

" + }, + "KnowledgeBaseType":{ + "type":"string", + "enum":[ + "EXTERNAL", + "CUSTOM" + ] + }, + "ListAssistantAssociationsRequest":{ + "type":"structure", + "required":["assistantId"], + "members":{ + "assistantId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the Wisdom assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantId" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListAssistantAssociationsResponse":{ + "type":"structure", + "required":["assistantAssociationSummaries"], + "members":{ + "assistantAssociationSummaries":{ + "shape":"AssistantAssociationSummaryList", + "documentation":"

Summary information about assistant associations.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListAssistantsRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListAssistantsResponse":{ + "type":"structure", + "required":["assistantSummaries"], + "members":{ + "assistantSummaries":{ + "shape":"AssistantList", + "documentation":"

Information about the assistants.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListContentsRequest":{ + "type":"structure", + "required":["knowledgeBaseId"], + "members":{ + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The the identifier of the knowledge base. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"knowledgeBaseId" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListContentsResponse":{ + "type":"structure", + "required":["contentSummaries"], + "members":{ + "contentSummaries":{ + "shape":"ContentSummaryList", + "documentation":"

Information about the content.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListKnowledgeBasesRequest":{ + "type":"structure", + "members":{ + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NonEmptyString", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + } + } + }, + "ListKnowledgeBasesResponse":{ + "type":"structure", + "required":["knowledgeBaseSummaries"], + "members":{ + "knowledgeBaseSummaries":{ + "shape":"KnowledgeBaseList", + "documentation":"

Information about the knowledge bases.

" + }, + "nextToken":{ + "shape":"NonEmptyString", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "ListTagsForResourceRequest":{ + "type":"structure", + "required":["resourceArn"], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + } + } + }, + "ListTagsForResourceResponse":{ + "type":"structure", + "members":{ + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "box":true, + "max":100, + "min":1 + }, + "Name":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9\\s_.,-]+" + }, + "NextToken":{ + "type":"string", + "max":2048, + "min":1 + }, + "NonEmptyString":{ + "type":"string", + "max":4096, + "min":1 + }, + "NotifyRecommendationsReceivedError":{ + "type":"structure", + "members":{ + "message":{ + "shape":"NotifyRecommendationsReceivedErrorMessage", + "documentation":"

A recommendation is causing an error.

" + }, + "recommendationId":{ + "shape":"String", + "documentation":"

The identifier of the recommendation that is in error.

" + } + }, + "documentation":"

An error occurred when creating a recommendation.

" + }, + "NotifyRecommendationsReceivedErrorList":{ + "type":"list", + "member":{"shape":"NotifyRecommendationsReceivedError"} + }, + "NotifyRecommendationsReceivedErrorMessage":{"type":"string"}, + "NotifyRecommendationsReceivedRequest":{ + "type":"structure", + "required":[ + "assistantId", + "recommendationIds", + "sessionId" + ], + "members":{ + "assistantId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the Wisdom assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantId" + }, + "recommendationIds":{ + "shape":"RecommendationIdList", + "documentation":"

The identifiers of the recommendations.

" + }, + "sessionId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the session. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"sessionId" + } + } + }, + "NotifyRecommendationsReceivedResponse":{ + "type":"structure", + "members":{ + "errors":{ + "shape":"NotifyRecommendationsReceivedErrorList", + "documentation":"

The identifiers of recommendations that are causing errors.

" + }, + "recommendationIds":{ + "shape":"RecommendationIdList", + "documentation":"

The identifiers of the recommendations.

" + } + } + }, + "ObjectFieldsList":{ + "type":"list", + "member":{"shape":"NonEmptyString"}, + "max":100, + "min":1 + }, + "PreconditionFailedException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The provided revisionId does not match, indicating the content has been modified since it was last read.

", + "error":{ + "httpStatusCode":412, + "senderFault":true + }, + "exception":true + }, + "QueryAssistantRequest":{ + "type":"structure", + "required":[ + "assistantId", + "queryText" + ], + "members":{ + "assistantId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the Wisdom assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantId" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per page.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

" + }, + "queryText":{ + "shape":"QueryText", + "documentation":"

The text to search for.

" + } + } + }, + "QueryAssistantResponse":{ + "type":"structure", + "required":["results"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + }, + "results":{ + "shape":"QueryResultsList", + "documentation":"

The results of the query.

" + } + } + }, + "QueryResultsList":{ + "type":"list", + "member":{"shape":"ResultData"} + }, + "QueryText":{ + "type":"string", + "sensitive":true + }, + "RecommendationData":{ + "type":"structure", + "required":[ + "document", + "recommendationId" + ], + "members":{ + "document":{ + "shape":"Document", + "documentation":"

The recommended document.

" + }, + "recommendationId":{ + "shape":"String", + "documentation":"

The identifier of the recommendation.

" + }, + "relevanceLevel":{ + "shape":"RelevanceLevel", + "documentation":"

The relevance level of the recommendation.

" + }, + "relevanceScore":{ + "shape":"RelevanceScore", + "documentation":"

The relevance score of the recommendation.

" + } + }, + "documentation":"

Information about the recommendation.

" + }, + "RecommendationIdList":{ + "type":"list", + "member":{"shape":"String"} + }, + "RecommendationList":{ + "type":"list", + "member":{"shape":"RecommendationData"} + }, + "RelevanceLevel":{ + "type":"string", + "enum":[ + "HIGH", + "MEDIUM", + "LOW" + ] + }, + "RelevanceScore":{ + "type":"double", + "min":0.0 + }, + "RemoveKnowledgeBaseTemplateUriRequest":{ + "type":"structure", + "required":["knowledgeBaseId"], + "members":{ + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The the identifier of the knowledge base. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"knowledgeBaseId" + } + } + }, + "RemoveKnowledgeBaseTemplateUriResponse":{ + "type":"structure", + "members":{ + } + }, + "RenderingConfiguration":{ + "type":"structure", + "members":{ + "templateUri":{ + "shape":"Uri", + "documentation":"

A URI template containing exactly one variable in ${variableName} format. This can only be set for EXTERNAL knowledge bases. For Salesforce and ServiceNow, the variable must be one of the following:

  • Salesforce: Id, ArticleNumber, VersionNumber, Title, PublishStatus, or IsDeleted

  • ServiceNow: number, short_description, sys_mod_count, workflow_state, or active

 <p>The variable is replaced with the actual value for a piece of content when calling <a href="https://docs.aws.amazon.com/wisdom/latest/APIReference/API_GetContent.html">GetContent</a>. </p> 
" + } + }, + "documentation":"

Information about how to render the content.

" + }, + "ResourceNotFoundException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"}, + "resourceName":{ + "shape":"String", + "documentation":"

The specified resource name.

" + } + }, + "documentation":"

The specified resource does not exist.

", + "error":{ + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "ResultData":{ + "type":"structure", + "required":[ + "document", + "resultId" + ], + "members":{ + "document":{ + "shape":"Document", + "documentation":"

The document.

" + }, + "relevanceScore":{ + "shape":"RelevanceScore", + "documentation":"

The relevance score of the results.

" + }, + "resultId":{ + "shape":"Uuid", + "documentation":"

The identifier of the result data.

" + } + }, + "documentation":"

Information about the result.

" + }, + "SearchContentRequest":{ + "type":"structure", + "required":[ + "knowledgeBaseId", + "searchExpression" + ], + "members":{ + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The the identifier of the knowledge base. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"knowledgeBaseId" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "searchExpression":{ + "shape":"SearchExpression", + "documentation":"

The search expression to filter results.

" + } + } + }, + "SearchContentResponse":{ + "type":"structure", + "required":["contentSummaries"], + "members":{ + "contentSummaries":{ + "shape":"ContentSummaryList", + "documentation":"

Summary information about the content.

" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + } + } + }, + "SearchExpression":{ + "type":"structure", + "required":["filters"], + "members":{ + "filters":{ + "shape":"FilterList", + "documentation":"

The search expression filters.

" + } + }, + "documentation":"

The search expression.

" + }, + "SearchSessionsRequest":{ + "type":"structure", + "required":[ + "assistantId", + "searchExpression" + ], + "members":{ + "assistantId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the Wisdom assistant. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"assistantId" + }, + "maxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return per page.

", + "location":"querystring", + "locationName":"maxResults" + }, + "nextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results. Use the value returned in the previous response in the next request to retrieve the next set of results.

", + "location":"querystring", + "locationName":"nextToken" + }, + "searchExpression":{ + "shape":"SearchExpression", + "documentation":"

The search expression to filter results.

" + } + } + }, + "SearchSessionsResponse":{ + "type":"structure", + "required":["sessionSummaries"], + "members":{ + "nextToken":{ + "shape":"NextToken", + "documentation":"

If there are additional results, this is the token for the next set of results.

" + }, + "sessionSummaries":{ + "shape":"SessionSummaries", + "documentation":"

Summary information about the sessions.

" + } + } + }, + "ServerSideEncryptionConfiguration":{ + "type":"structure", + "members":{ + "kmsKeyId":{ + "shape":"NonEmptyString", + "documentation":"

The KMS key. For information about valid ID values, see Key identifiers (KeyId) in the AWS Key Management Service Developer Guide.

" + } + }, + "documentation":"

The KMS key used for encryption.

" + }, + "ServiceQuotaExceededException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

You've exceeded your service quota. To perform the requested action, remove some of the relevant resources, or use service quotas to request a service quota increase.

", + "error":{ + "httpStatusCode":402, + "senderFault":true + }, + "exception":true + }, + "SessionData":{ + "type":"structure", + "required":[ + "name", + "sessionArn", + "sessionId" + ], + "members":{ + "description":{ + "shape":"Description", + "documentation":"

The description of the session.

" + }, + "name":{ + "shape":"Name", + "documentation":"

The name of the session.

" + }, + "sessionArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the session.

" + }, + "sessionId":{ + "shape":"Uuid", + "documentation":"

The identifier of the session.

" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + }, + "documentation":"

Information about the session.

" + }, + "SessionSummaries":{ + "type":"list", + "member":{"shape":"SessionSummary"} + }, + "SessionSummary":{ + "type":"structure", + "required":[ + "assistantArn", + "assistantId", + "sessionArn", + "sessionId" + ], + "members":{ + "assistantArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the Wisdom assistant

" + }, + "assistantId":{ + "shape":"Uuid", + "documentation":"

The identifier of the Wisdom assistant.

" + }, + "sessionArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the session.

" + }, + "sessionId":{ + "shape":"Uuid", + "documentation":"

The identifier of the session.

" + } + }, + "documentation":"

Summary information about the session.

" + }, + "SourceConfiguration":{ + "type":"structure", + "members":{ + "appIntegrations":{ + "shape":"AppIntegrationsConfiguration", + "documentation":"

Configuration information for Amazon AppIntegrations to automatically ingest content.

" + } + }, + "documentation":"

Configuration information about the external data source.

", + "union":true + }, + "StartContentUploadRequest":{ + "type":"structure", + "required":[ + "contentType", + "knowledgeBaseId" + ], + "members":{ + "contentType":{ + "shape":"ContentType", + "documentation":"

The type of content to upload.

" + }, + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The the identifier of the knowledge base. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"knowledgeBaseId" + } + } + }, + "StartContentUploadResponse":{ + "type":"structure", + "required":[ + "headersToInclude", + "uploadId", + "url", + "urlExpiry" + ], + "members":{ + "headersToInclude":{ + "shape":"Headers", + "documentation":"

The headers to include in the upload.

" + }, + "uploadId":{ + "shape":"NonEmptyString", + "documentation":"

The identifier of the upload.

" + }, + "url":{ + "shape":"SyntheticStartContentUploadResponseUrl", + "documentation":"

The URL of the upload.

" + }, + "urlExpiry":{ + "shape":"SyntheticTimestamp_epoch_seconds", + "documentation":"

The expiration time of the URL as an epoch timestamp.

" + } + } + }, + "String":{"type":"string"}, + "SyntheticContentDataUrl":{ + "type":"string", + "max":4096, + "min":1, + "sensitive":true + }, + "SyntheticDocumentTextString":{ + "type":"string", + "sensitive":true + }, + "SyntheticStartContentUploadResponseUrl":{ + "type":"string", + "max":4096, + "min":1, + "sensitive":true + }, + "SyntheticTimestamp_epoch_seconds":{ + "type":"timestamp", + "timestampFormat":"unixTimestamp" + }, + "TagKey":{ + "type":"string", + "max":128, + "min":1, + "pattern":"^(?!aws:)[a-zA-Z+-=._:/]+$" + }, + "TagKeyList":{ + "type":"list", + "member":{"shape":"TagKey"}, + "max":50, + "min":1 + }, + "TagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tags" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tags":{ + "shape":"Tags", + "documentation":"

The tags used to organize, track, or control access for this resource.

" + } + } + }, + "TagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "TagValue":{ + "type":"string", + "max":256, + "min":1 + }, + "Tags":{ + "type":"map", + "key":{"shape":"TagKey"}, + "value":{"shape":"TagValue"} + }, + "TooManyTagsException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"}, + "resourceName":{ + "shape":"String", + "documentation":"

The specified resource name.

" + } + }, + "documentation":"

Amazon Connect Wisdom throws this exception if you have too many tags in your tag set.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "UntagResourceRequest":{ + "type":"structure", + "required":[ + "resourceArn", + "tagKeys" + ], + "members":{ + "resourceArn":{ + "shape":"Arn", + "documentation":"

The Amazon Resource Name (ARN) of the resource.

", + "location":"uri", + "locationName":"resourceArn" + }, + "tagKeys":{ + "shape":"TagKeyList", + "documentation":"

The tag keys.

", + "location":"querystring", + "locationName":"tagKeys" + } + } + }, + "UntagResourceResponse":{ + "type":"structure", + "members":{ + } + }, + "UpdateContentRequest":{ + "type":"structure", + "required":[ + "contentId", + "knowledgeBaseId" + ], + "members":{ + "contentId":{ + "shape":"UuidOrArn", + "documentation":"

The identifier of the content. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"contentId" + }, + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The the identifier of the knowledge base. Can be either the ID or the ARN

", + "location":"uri", + "locationName":"knowledgeBaseId" + }, + "metadata":{ + "shape":"ContentMetadata", + "documentation":"

A key/value map to store attributes without affecting tagging or recommendations. For example, when synchronizing data between an external system and Wisdom, you can store an external version identifier as metadata to utilize for determining drift.

" + }, + "overrideLinkOutUri":{ + "shape":"Uri", + "documentation":"

The URI for the article. If the knowledge base has a templateUri, setting this argument overrides it for this piece of content. To remove an existing overrideLinkOurUri, exclude this argument and set removeOverrideLinkOutUri to true.

" + }, + "removeOverrideLinkOutUri":{ + "shape":"Boolean", + "documentation":"

Unset the existing overrideLinkOutUri if it exists.

" + }, + "revisionId":{ + "shape":"NonEmptyString", + "documentation":"

The revisionId of the content resource to update, taken from an earlier call to GetContent, GetContentSummary, SearchContent, or ListContents. If included, this argument acts as an optimistic lock to ensure content was not modified since it was last read. If it has been modified, this API throws a PreconditionFailedException.

" + }, + "title":{ + "shape":"ContentTitle", + "documentation":"

The title of the content.

" + }, + "uploadId":{ + "shape":"NonEmptyString", + "documentation":"

A pointer to the uploaded asset. This value is returned by StartContentUpload.

" + } + } + }, + "UpdateContentResponse":{ + "type":"structure", + "members":{ + "content":{ + "shape":"ContentData", + "documentation":"

The content.

" + } + } + }, + "UpdateKnowledgeBaseTemplateUriRequest":{ + "type":"structure", + "required":[ + "knowledgeBaseId", + "templateUri" + ], + "members":{ + "knowledgeBaseId":{ + "shape":"UuidOrArn", + "documentation":"

The the identifier of the knowledge base. Can be either the ID or the ARN. URLs cannot contain the ARN.

", + "location":"uri", + "locationName":"knowledgeBaseId" + }, + "templateUri":{ + "shape":"Uri", + "documentation":"

The template URI to update.

" + } + } + }, + "UpdateKnowledgeBaseTemplateUriResponse":{ + "type":"structure", + "members":{ + "knowledgeBase":{ + "shape":"KnowledgeBaseData", + "documentation":"

The knowledge base to update.

" + } + } + }, + "Uri":{ + "type":"string", + "max":4096, + "min":1 + }, + "Uuid":{ + "type":"string", + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$" + }, + "UuidOrArn":{ + "type":"string", + "pattern":"^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$|^arn:[a-z-]*?:wisdom:[a-z0-9-]*?:[0-9]{12}:[a-z-]*?/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}(?:/[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12})?$" + }, + "ValidationException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"

The input fails to satisfy the constraints specified by an AWS service.

", + "error":{ + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "WaitTimeSeconds":{ + "type":"integer", + "max":20, + "min":0 + } + }, + "documentation":"

All Amazon Connect Wisdom functionality is accessible using the API. For example, you can create an assistant and a knowledge base.

 <p>Some more advanced features are only accessible using the Wisdom API. For example, you can manually manage content by uploading custom files and control their lifecycle. </p> 
" +} diff --git a/botocore/data/workmail/2017-10-01/service-2.json b/botocore/data/workmail/2017-10-01/service-2.json index a31989aa..f67426ef 100644 --- a/botocore/data/workmail/2017-10-01/service-2.json +++ b/botocore/data/workmail/2017-10-01/service-2.json @@ -257,6 +257,22 @@ "documentation":"

Deletes permissions granted to a member (user or group).

", "idempotent":true }, + "DeleteMobileDeviceAccessOverride":{ + "name":"DeleteMobileDeviceAccessOverride", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteMobileDeviceAccessOverrideRequest"}, + "output":{"shape":"DeleteMobileDeviceAccessOverrideResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OrganizationNotFoundException"}, + {"shape":"OrganizationStateException"}, + {"shape":"EntityNotFoundException"} + ], + "documentation":"

Deletes the mobile device access override for the given WorkMail organization, user, and device.

" + }, "DeleteMobileDeviceAccessRule":{ "name":"DeleteMobileDeviceAccessRule", "http":{ @@ -545,6 +561,23 @@ ], "documentation":"

Simulates the effect of the mobile device access rules for the given attributes of a sample access event. Use this method to test the effects of the current set of mobile device access rules for the Amazon WorkMail organization for a particular user's attributes.

" }, + "GetMobileDeviceAccessOverride":{ + "name":"GetMobileDeviceAccessOverride", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetMobileDeviceAccessOverrideRequest"}, + "output":{"shape":"GetMobileDeviceAccessOverrideResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OrganizationNotFoundException"}, + {"shape":"OrganizationStateException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Gets the mobile device access override for the given WorkMail organization, user, and device.

" + }, "ListAccessControlRules":{ "name":"ListAccessControlRules", "http":{ @@ -645,6 +678,22 @@ "documentation":"

Lists the mailbox permissions associated with a user, group, or resource mailbox.

", "idempotent":true }, + "ListMobileDeviceAccessOverrides":{ + "name":"ListMobileDeviceAccessOverrides", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListMobileDeviceAccessOverridesRequest"}, + "output":{"shape":"ListMobileDeviceAccessOverridesResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OrganizationNotFoundException"}, + {"shape":"OrganizationStateException"}, + {"shape":"EntityNotFoundException"} + ], + "documentation":"

Lists all the mobile device access overrides for any given combination of WorkMail organization, user, or device.

" + }, "ListMobileDeviceAccessRules":{ "name":"ListMobileDeviceAccessRules", "http":{ @@ -772,6 +821,23 @@ "documentation":"

Sets permissions for a user, group, or resource. This replaces any pre-existing permissions.

", "idempotent":true }, + "PutMobileDeviceAccessOverride":{ + "name":"PutMobileDeviceAccessOverride", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutMobileDeviceAccessOverrideRequest"}, + "output":{"shape":"PutMobileDeviceAccessOverrideResponse"}, + "errors":[ + {"shape":"InvalidParameterException"}, + {"shape":"OrganizationNotFoundException"}, + {"shape":"OrganizationStateException"}, + {"shape":"EntityNotFoundException"}, + {"shape":"EntityStateException"} + ], + "documentation":"

Creates or updates a mobile device access override for the given WorkMail organization, user, and device.

" + }, "PutRetentionPolicy":{ "name":"PutRetentionPolicy", "http":{ @@ -1516,6 +1582,33 @@ "members":{ } }, + "DeleteMobileDeviceAccessOverrideRequest":{ + "type":"structure", + "required":[ + "OrganizationId", + "UserId", + "DeviceId" + ], + "members":{ + "OrganizationId":{ + "shape":"OrganizationId", + "documentation":"

The Amazon WorkMail organization for which the access override will be deleted.

" + }, + "UserId":{ + "shape":"EntityIdentifier", + "documentation":"

The WorkMail user for which you want to delete the override. Accepts the following types of user identities:

  • User ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234

  • Email address: user@domain.tld

  • User name: user

" + }, + "DeviceId":{ + "shape":"DeviceId", + "documentation":"

The mobile device for which you delete the override. DeviceId is case insensitive.

" + } + } + }, + "DeleteMobileDeviceAccessOverrideResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteMobileDeviceAccessRuleRequest":{ "type":"structure", "required":[ @@ -1942,6 +2035,12 @@ "min":0, "pattern":"[\\S\\s]*" }, + "DeviceId":{ + "type":"string", + "max":32, + "min":1, + "pattern":"[A-Za-z0-9]+" + }, "DeviceModel":{ "type":"string", "max":256, @@ -2122,6 +2221,12 @@ "documentation":"

The user, group, or resource that you're trying to register is already registered.

", "exception":true }, + "EntityIdentifier":{ + "type":"string", + "max":256, + "min":1, + "pattern":"[a-zA-Z0-9._%+@-]+" + }, "EntityNotFoundException":{ "type":"structure", "members":{ @@ -2322,6 +2427,57 @@ } } }, + "GetMobileDeviceAccessOverrideRequest":{ + "type":"structure", + "required":[ + "OrganizationId", + "UserId", + "DeviceId" + ], + "members":{ + "OrganizationId":{ + "shape":"OrganizationId", + "documentation":"

The Amazon WorkMail organization to which you want to apply the override.

" + }, + "UserId":{ + "shape":"EntityIdentifier", + "documentation":"

Identifies the WorkMail user for the override. Accepts the following types of user identities:

  • User ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234

  • Email address: user@domain.tld

  • User name: user

" + }, + "DeviceId":{ + "shape":"DeviceId", + "documentation":"

The mobile device to which the override applies. DeviceId is case insensitive.

" + } + } + }, + "GetMobileDeviceAccessOverrideResponse":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"WorkMailIdentifier", + "documentation":"

The WorkMail user to which the access override applies.

" + }, + "DeviceId":{ + "shape":"DeviceId", + "documentation":"

The device to which the access override applies.

" + }, + "Effect":{ + "shape":"MobileDeviceAccessRuleEffect", + "documentation":"

The effect of the override, ALLOW or DENY.

" + }, + "Description":{ + "shape":"MobileDeviceAccessRuleDescription", + "documentation":"

A description of the override.

" + }, + "DateCreated":{ + "shape":"Timestamp", + "documentation":"

The date the override was first created.

" + }, + "DateModified":{ + "shape":"Timestamp", + "documentation":"

The date the description was last modified.

" + } + } + }, "Group":{ "type":"structure", "members":{ @@ -2629,6 +2785,45 @@ } } }, + "ListMobileDeviceAccessOverridesRequest":{ + "type":"structure", + "required":["OrganizationId"], + "members":{ + "OrganizationId":{ + "shape":"OrganizationId", + "documentation":"

The Amazon WorkMail organization under which to list mobile device access overrides.

" + }, + "UserId":{ + "shape":"EntityIdentifier", + "documentation":"

The WorkMail user under which you list the mobile device access overrides. Accepts the following types of user identities:

  • User ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234

  • Email address: user@domain.tld

  • User name: user

" + }, + "DeviceId":{ + "shape":"DeviceId", + "documentation":"

The mobile device to which the access override applies.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. The first call does not require a token.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of results to return in a single call.

" + } + } + }, + "ListMobileDeviceAccessOverridesResponse":{ + "type":"structure", + "members":{ + "Overrides":{ + "shape":"MobileDeviceAccessOverridesList", + "documentation":"

The list of mobile device access overrides that exist for the specified Amazon WorkMail organization and user.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token to use to retrieve the next page of results. The value is “null” when there are no more results to return.

" + } + } + }, "ListMobileDeviceAccessRulesRequest":{ "type":"structure", "required":["OrganizationId"], @@ -2948,6 +3143,40 @@ "max":10, "min":0 }, + "MobileDeviceAccessOverride":{ + "type":"structure", + "members":{ + "UserId":{ + "shape":"WorkMailIdentifier", + "documentation":"

The WorkMail user to which the access override applies.

" + }, + "DeviceId":{ + "shape":"DeviceId", + "documentation":"

The device to which the override applies.

" + }, + "Effect":{ + "shape":"MobileDeviceAccessRuleEffect", + "documentation":"

The effect of the override, ALLOW or DENY.

" + }, + "Description":{ + "shape":"MobileDeviceAccessRuleDescription", + "documentation":"

A description of the override.

" + }, + "DateCreated":{ + "shape":"Timestamp", + "documentation":"

The date the override was first created.

" + }, + "DateModified":{ + "shape":"Timestamp", + "documentation":"

The date the override was last modified.

" + } + }, + "documentation":"

The override object.

" + }, + "MobileDeviceAccessOverridesList":{ + "type":"list", + "member":{"shape":"MobileDeviceAccessOverride"} + }, "MobileDeviceAccessRule":{ "type":"structure", "members":{ @@ -3257,6 +3486,42 @@ "members":{ } }, + "PutMobileDeviceAccessOverrideRequest":{ + "type":"structure", + "required":[ + "OrganizationId", + "UserId", + "DeviceId", + "Effect" + ], + "members":{ + "OrganizationId":{ + "shape":"OrganizationId", + "documentation":"

Identifies the Amazon WorkMail organization for which you create the override.

" + }, + "UserId":{ + "shape":"EntityIdentifier", + "documentation":"

The WorkMail user for which you create the override. Accepts the following types of user identities:

  • User ID: 12345678-1234-1234-1234-123456789012 or S-1-1-12-1234567890-123456789-123456789-1234

  • Email address: user@domain.tld

  • User name: user

" + }, + "DeviceId":{ + "shape":"DeviceId", + "documentation":"

The mobile device for which you create the override. DeviceId is case insensitive.

" + }, + "Effect":{ + "shape":"MobileDeviceAccessRuleEffect", + "documentation":"

The effect of the override, ALLOW or DENY.

" + }, + "Description":{ + "shape":"MobileDeviceAccessRuleDescription", + "documentation":"

A description of the override.

" + } + } + }, + "PutMobileDeviceAccessOverrideResponse":{ + "type":"structure", + "members":{ + } + }, "PutRetentionPolicyRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/workspaces/2015-04-08/service-2.json b/botocore/data/workspaces/2015-04-08/service-2.json index cba70876..9df67935 100644 --- a/botocore/data/workspaces/2015-04-08/service-2.json +++ b/botocore/data/workspaces/2015-04-08/service-2.json @@ -82,7 +82,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValuesException"} ], - "documentation":"

Copies the specified image from the specified Region to the current Region. For more information about copying images, see Copy a Custom WorkSpaces Image.

In the China (Ningxia) Region, you can copy images only within the same Region.

In the AWS GovCloud (US-West) Region, to copy images to and from other AWS Regions, contact AWS Support.

Before copying a shared image, be sure to verify that it has been shared from the correct AWS account. To determine if an image has been shared and to see the AWS account ID that owns an image, use the DescribeWorkSpaceImages and DescribeWorkspaceImagePermissions API operations.

" + "documentation":"

Copies the specified image from the specified Region to the current Region. For more information about copying images, see Copy a Custom WorkSpaces Image.

In the China (Ningxia) Region, you can copy images only within the same Region.

In Amazon Web Services GovCloud (US), to copy images to and from other Regions, contact Amazon Web Services Support.

Before copying a shared image, be sure to verify that it has been shared from the correct Amazon Web Services account. To determine if an image has been shared and to see the ID of the Amazon Web Services account that owns an image, use the DescribeWorkSpaceImages and DescribeWorkspaceImagePermissions API operations.

" }, "CreateConnectionAlias":{ "name":"CreateConnectionAlias", @@ -134,6 +134,25 @@ ], "documentation":"

Creates the specified tags for the specified WorkSpaces resource.

" }, + "CreateUpdatedWorkspaceImage":{ + "name":"CreateUpdatedWorkspaceImage", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateUpdatedWorkspaceImageRequest"}, + "output":{"shape":"CreateUpdatedWorkspaceImageResult"}, + "errors":[ + {"shape":"ResourceLimitExceededException"}, + {"shape":"ResourceAlreadyExistsException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"OperationNotSupportedException"}, + {"shape":"InvalidResourceStateException"}, + {"shape":"AccessDeniedException"}, + {"shape":"InvalidParameterValuesException"} + ], + "documentation":"

Creates a new updated WorkSpace image based on the specified source image. The new updated WorkSpace image has the latest drivers and other updates required by the Amazon WorkSpaces components.

To determine which WorkSpace images need to be updated with the latest Amazon WorkSpaces requirements, use DescribeWorkspaceImages.

  • Only Windows 10 WorkSpace images can be programmatically updated at this time.

  • Microsoft Windows updates and other application updates are not included in the update process.

  • The source WorkSpace image is not deleted. You can delete the source image after you've verified your new updated image and created a new bundle.

" + }, "CreateWorkspaceBundle":{ "name":"CreateWorkspaceBundle", "http":{ @@ -260,7 +279,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidResourceStateException"} ], - "documentation":"

Deregisters the specified directory. This operation is asynchronous and returns before the WorkSpace directory is deregistered. If any WorkSpaces are registered to this directory, you must remove them before you can deregister the directory.

Simple AD and AD Connector are made available to you free of charge to use with WorkSpaces. If there are no WorkSpaces being used with your Simple AD or AD Connector directory for 30 consecutive days, this directory will be automatically deregistered for use with Amazon WorkSpaces, and you will be charged for this directory as per the AWS Directory Services pricing terms.

To delete empty directories, see Delete the Directory for Your WorkSpaces. If you delete your Simple AD or AD Connector directory, you can always create a new one when you want to start using WorkSpaces again.

" + "documentation":"

Deregisters the specified directory. This operation is asynchronous and returns before the WorkSpace directory is deregistered. If any WorkSpaces are registered to this directory, you must remove them before you can deregister the directory.

Simple AD and AD Connector are made available to you free of charge to use with WorkSpaces. If there are no WorkSpaces being used with your Simple AD or AD Connector directory for 30 consecutive days, this directory will be automatically deregistered for use with Amazon WorkSpaces, and you will be charged for this directory as per the Directory Service pricing terms.

To delete empty directories, see Delete the Directory for Your WorkSpaces. If you delete your Simple AD or AD Connector directory, you can always create a new one when you want to start using WorkSpaces again.

" }, "DescribeAccount":{ "name":"DescribeAccount", @@ -317,7 +336,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Describes the permissions that the owner of a connection alias has granted to another AWS account for the specified connection alias. For more information, see Cross-Region Redirection for Amazon WorkSpaces.

" + "documentation":"

Describes the permissions that the owner of a connection alias has granted to another Amazon Web Services account for the specified connection alias. For more information, see Cross-Region Redirection for Amazon WorkSpaces.

" }, "DescribeConnectionAliases":{ "name":"DescribeConnectionAliases", @@ -400,7 +419,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValuesException"} ], - "documentation":"

Describes the permissions that the owner of an image has granted to other AWS accounts for an image.

" + "documentation":"

Describes the permissions that the owner of an image has granted to other Amazon Web Services accounts for an image.

" }, "DescribeWorkspaceImages":{ "name":"DescribeWorkspaceImages", @@ -472,7 +491,7 @@ {"shape":"InvalidResourceStateException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Disassociates a connection alias from a directory. Disassociating a connection alias disables cross-Region redirection between two directories in different AWS Regions. For more information, see Cross-Region Redirection for Amazon WorkSpaces.

Before performing this operation, call DescribeConnectionAliases to make sure that the current state of the connection alias is CREATED.

" + "documentation":"

Disassociates a connection alias from a directory. Disassociating a connection alias disables cross-Region redirection between two directories in different Regions. For more information, see Cross-Region Redirection for Amazon WorkSpaces.

Before performing this operation, call DescribeConnectionAliases to make sure that the current state of the connection alias is CREATED.

" }, "DisassociateIpGroups":{ "name":"DisassociateIpGroups", @@ -506,7 +525,7 @@ {"shape":"AccessDeniedException"}, {"shape":"InvalidParameterValuesException"} ], - "documentation":"

Imports the specified Windows 10 Bring Your Own License (BYOL) image into Amazon WorkSpaces. The image must be an already licensed Amazon EC2 image that is in your AWS account, and you must own the image. For more information about creating BYOL images, see Bring Your Own Windows Desktop Licenses.

" + "documentation":"

Imports the specified Windows 10 Bring Your Own License (BYOL) image into Amazon WorkSpaces. The image must be an already licensed Amazon EC2 image that is in your Amazon Web Services account, and you must own the image. For more information about creating BYOL images, see Bring Your Own Windows Desktop Licenses.

" }, "ListAvailableManagementCidrRanges":{ "name":"ListAvailableManagementCidrRanges", @@ -520,7 +539,7 @@ {"shape":"InvalidParameterValuesException"}, {"shape":"AccessDeniedException"} ], - "documentation":"

Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that you can use for the network management interface when you enable Bring Your Own License (BYOL).

This operation can be run only by AWS accounts that are enabled for BYOL. If your account isn't enabled for BYOL, you'll receive an AccessDeniedException error.

The management network interface is connected to a secure Amazon WorkSpaces management network. It is used for interactive streaming of the WorkSpace desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to manage the WorkSpace.

" + "documentation":"

Retrieves a list of IP address ranges, specified as IPv4 CIDR blocks, that you can use for the network management interface when you enable Bring Your Own License (BYOL).

This operation can be run only by Amazon Web Services accounts that are enabled for BYOL. If your account isn't enabled for BYOL, you'll receive an AccessDeniedException error.

The management network interface is connected to a secure Amazon WorkSpaces management network. It is used for interactive streaming of the WorkSpace desktop to Amazon WorkSpaces clients, and to allow Amazon WorkSpaces to manage the WorkSpace.

" }, "MigrateWorkspace":{ "name":"MigrateWorkspace", @@ -750,7 +769,7 @@ }, "input":{"shape":"TerminateWorkspacesRequest"}, "output":{"shape":"TerminateWorkspacesResult"}, - "documentation":"

Terminates the specified WorkSpaces.

Terminating a WorkSpace is a permanent action and cannot be undone. The user's data is destroyed. If you need to archive any user data, contact AWS Support before terminating the WorkSpace.

You can terminate a WorkSpace that is in any state except SUSPENDED.

This operation is asynchronous and returns before the WorkSpaces have been completely terminated. After a WorkSpace is terminated, the TERMINATED state is returned only briefly before the WorkSpace directory metadata is cleaned up, so this state is rarely returned. To confirm that a WorkSpace is terminated, check for the WorkSpace ID by using DescribeWorkSpaces. If the WorkSpace ID isn't returned, then the WorkSpace has been successfully terminated.

Simple AD and AD Connector are made available to you free of charge to use with WorkSpaces. If there are no WorkSpaces being used with your Simple AD or AD Connector directory for 30 consecutive days, this directory will be automatically deregistered for use with Amazon WorkSpaces, and you will be charged for this directory as per the AWS Directory Services pricing terms.

To delete empty directories, see Delete the Directory for Your WorkSpaces. If you delete your Simple AD or AD Connector directory, you can always create a new one when you want to start using WorkSpaces again.

" + "documentation":"

Terminates the specified WorkSpaces.

Terminating a WorkSpace is a permanent action and cannot be undone. The user's data is destroyed. If you need to archive any user data, contact Amazon Web Services Support before terminating the WorkSpace.

You can terminate a WorkSpace that is in any state except SUSPENDED.

This operation is asynchronous and returns before the WorkSpaces have been completely terminated. After a WorkSpace is terminated, the TERMINATED state is returned only briefly before the WorkSpace directory metadata is cleaned up, so this state is rarely returned. To confirm that a WorkSpace is terminated, check for the WorkSpace ID by using DescribeWorkSpaces. If the WorkSpace ID isn't returned, then the WorkSpace has been successfully terminated.

Simple AD and AD Connector are made available to you free of charge to use with WorkSpaces. If there are no WorkSpaces being used with your Simple AD or AD Connector directory for 30 consecutive days, this directory will be automatically deregistered for use with Amazon WorkSpaces, and you will be charged for this directory as per the Directory Service pricing terms.

To delete empty directories, see Delete the Directory for Your WorkSpaces. If you delete your Simple AD or AD Connector directory, you can always create a new one when you want to start using WorkSpaces again.

" }, "UpdateConnectionAliasPermission":{ "name":"UpdateConnectionAliasPermission", @@ -819,7 +838,7 @@ {"shape":"InvalidParameterValuesException"}, {"shape":"OperationNotSupportedException"} ], - "documentation":"

Shares or unshares an image with one account in the same AWS Region by specifying whether that account has permission to copy the image. If the copy image permission is granted, the image is shared with that account. If the copy image permission is revoked, the image is unshared with the account.

After an image has been shared, the recipient account can copy the image to other AWS Regions as needed.

In the China (Ningxia) Region, you can copy images only within the same Region.

In the AWS GovCloud (US-West) Region, to copy images to and from other AWS Regions, contact AWS Support.

For more information about sharing images, see Share or Unshare a Custom WorkSpaces Image.

  • To delete an image that has been shared, you must unshare the image before you delete it.

  • Sharing Bring Your Own License (BYOL) images across AWS accounts isn't supported at this time in the AWS GovCloud (US-West) Region. To share BYOL images across accounts in the AWS GovCloud (US-West) Region, contact AWS Support.

" + "documentation":"

Shares or unshares an image with one account in the same Amazon Web Services Region by specifying whether that account has permission to copy the image. If the copy image permission is granted, the image is shared with that account. If the copy image permission is revoked, the image is unshared with the account.

After an image has been shared, the recipient account can copy the image to other Regions as needed.

In the China (Ningxia) Region, you can copy images only within the same Region.

In Amazon Web Services GovCloud (US), to copy images to and from other Regions, contact Amazon Web Services Support.

For more information about sharing images, see Share or Unshare a Custom WorkSpaces Image.

  • To delete an image that has been shared, you must unshare the image before you delete it.

  • Sharing Bring Your Own License (BYOL) images across Amazon Web Services accounts isn't supported at this time in Amazon Web Services GovCloud (US). To share BYOL images across accounts in Amazon Web Services GovCloud (US), contact Amazon Web Services Support.

" } }, "shapes":{ @@ -1058,7 +1077,7 @@ }, "OwnerAccountId":{ "shape":"AwsAccount", - "documentation":"

The identifier of the AWS account that owns the connection alias.

" + "documentation":"

The identifier of the Amazon Web Services account that owns the connection alias.

" }, "Associations":{ "shape":"ConnectionAliasAssociationList", @@ -1076,7 +1095,7 @@ }, "AssociatedAccountId":{ "shape":"AwsAccount", - "documentation":"

The identifier of the AWS account that associated the connection alias with a directory.

" + "documentation":"

The identifier of the Amazon Web Services account that associated the connection alias with a directory.

" }, "ResourceId":{ "shape":"NonEmptyString", @@ -1122,11 +1141,11 @@ "members":{ "SharedAccountId":{ "shape":"AwsAccount", - "documentation":"

The identifier of the AWS account that the connection alias is shared with.

" + "documentation":"

The identifier of the Amazon Web Services account that the connection alias is shared with.

" }, "AllowAssociation":{ "shape":"BooleanObject", - "documentation":"

Indicates whether the specified AWS account is allowed to associate the connection alias with a directory.

" + "documentation":"

Indicates whether the specified Amazon Web Services account is allowed to associate the connection alias with a directory.

" } }, "documentation":"

Describes the permissions for a connection alias. Connection aliases are used for cross-Region redirection. For more information, see Cross-Region Redirection for Amazon WorkSpaces.

" @@ -1210,7 +1229,7 @@ "members":{ "ConnectionString":{ "shape":"ConnectionString", - "documentation":"

A connection string in the form of a fully qualified domain name (FQDN), such as www.example.com.

After you create a connection string, it is always associated to your AWS account. You cannot recreate the same connection string with a different account, even if you delete all instances of it from the original account. The connection string is globally reserved for your account.

" + "documentation":"

A connection string in the form of a fully qualified domain name (FQDN), such as www.example.com.

After you create a connection string, it is always associated to your Amazon Web Services account. You cannot recreate the same connection string with a different account, even if you delete all instances of it from the original account. The connection string is globally reserved for your account.

" }, "Tags":{ "shape":"TagList", @@ -1280,6 +1299,41 @@ "members":{ } }, + "CreateUpdatedWorkspaceImageRequest":{ + "type":"structure", + "required":[ + "Name", + "Description", + "SourceImageId" + ], + "members":{ + "Name":{ + "shape":"WorkspaceImageName", + "documentation":"

The name of the new updated WorkSpace image.

" + }, + "Description":{ + "shape":"WorkspaceImageDescription", + "documentation":"

A description of whether updates for the WorkSpace image are available.

" + }, + "SourceImageId":{ + "shape":"WorkspaceImageId", + "documentation":"

The identifier of the source WorkSpace image.

" + }, + "Tags":{ + "shape":"TagList", + "documentation":"

The tags that you want to add to the new updated WorkSpace image.

To add tags at the same time when you're creating the updated image, you must create an IAM policy that grants your IAM user permissions to use workspaces:CreateTags.

" + } + } + }, + "CreateUpdatedWorkspaceImageResult":{ + "type":"structure", + "members":{ + "ImageId":{ + "shape":"WorkspaceImageId", + "documentation":"

The identifier of the new updated WorkSpace image.

" + } + } + }, "CreateWorkspaceBundleRequest":{ "type":"structure", "required":[ @@ -1680,7 +1734,7 @@ }, "Owner":{ "shape":"BundleOwner", - "documentation":"

The owner of the bundles. You cannot combine this parameter with any other filter.

To describe the bundles provided by AWS, specify AMAZON. To describe the bundles that belong to your account, don't specify a value.

" + "documentation":"

The owner of the bundles. You cannot combine this parameter with any other filter.

To describe the bundles provided by Amazon Web Services, specify AMAZON. To describe the bundles that belong to your account, don't specify a value.

" }, "NextToken":{ "shape":"PaginationToken", @@ -1758,7 +1812,7 @@ }, "ImagePermissions":{ "shape":"ImagePermissions", - "documentation":"

The identifiers of the AWS accounts that the image has been shared with.

" + "documentation":"

The identifiers of the Amazon Web Services accounts that the image has been shared with.

" }, "NextToken":{ "shape":"PaginationToken", @@ -2021,10 +2075,10 @@ "members":{ "SharedAccountId":{ "shape":"AwsAccount", - "documentation":"

The identifier of the AWS account that an image has been shared with.

" + "documentation":"

The identifier of the Amazon Web Services account that an image has been shared with.

" } }, - "documentation":"

Describes the AWS accounts that have been granted permission to use a shared image. For more information about sharing images, see Share or Unshare a Custom WorkSpaces Image.

" + "documentation":"

Describes the Amazon Web Services accounts that have been granted permission to use a shared image. For more information about sharing images, see Share or Unshare a Custom WorkSpaces Image.

" }, "ImagePermissions":{ "type":"list", @@ -2545,7 +2599,7 @@ }, "Tenancy":{ "shape":"Tenancy", - "documentation":"

Indicates whether your WorkSpace directory is dedicated or shared. To use Bring Your Own License (BYOL) images, this value must be set to DEDICATED and your AWS account must be enabled for BYOL. If your account has not been enabled for BYOL, you will receive an InvalidParameterValuesException error. For more information about BYOL images, see Bring Your Own Windows Desktop Images.

" + "documentation":"

Indicates whether your WorkSpace directory is dedicated or shared. To use Bring Your Own License (BYOL) images, this value must be set to DEDICATED and your Amazon Web Services account must be enabled for BYOL. If your account has not been enabled for BYOL, you will receive an InvalidParameterValuesException error. For more information about BYOL images, see Bring Your Own Windows Desktop Images.

" }, "Tags":{ "shape":"TagList", @@ -2929,7 +2983,7 @@ }, "ConnectionAliasPermission":{ "shape":"ConnectionAliasPermission", - "documentation":"

Indicates whether to share or unshare the connection alias with the specified AWS account.

" + "documentation":"

Indicates whether to share or unshare the connection alias with the specified Amazon Web Services account.

" } } }, @@ -2938,6 +2992,26 @@ "members":{ } }, + "UpdateDescription":{ + "type":"string", + "max":255, + "min":1, + "pattern":"^[a-zA-Z0-9_./() -]+$" + }, + "UpdateResult":{ + "type":"structure", + "members":{ + "UpdateAvailable":{ + "shape":"BooleanObject", + "documentation":"

Indicates whether updated drivers or other components are available for the specified WorkSpace image.

" + }, + "Description":{ + "shape":"UpdateDescription", + "documentation":"

A description of whether updates for the WorkSpace image are pending or available.

" + } + }, + "documentation":"

Describes whether a WorkSpace image needs to be updated with the latest drivers and other components required by Amazon WorkSpaces.

Only Windows 10 WorkSpace images can be programmatically updated at this time.

" + }, "UpdateRulesOfIpGroupRequest":{ "type":"structure", "required":[ @@ -2996,7 +3070,7 @@ }, "SharedAccountId":{ "shape":"AwsAccount", - "documentation":"

The identifier of the AWS account to share or unshare the image with.

Before sharing the image, confirm that you are sharing to the correct AWS account ID.

" + "documentation":"

The identifier of the Amazon Web Services account to share or unshare the image with.

Before sharing the image, confirm that you are sharing to the correct Amazon Web Services account ID.

" } } }, @@ -3031,7 +3105,7 @@ }, "DirectoryId":{ "shape":"DirectoryId", - "documentation":"

The identifier of the AWS Directory Service directory for the WorkSpace.

" + "documentation":"

The identifier of the Directory Service directory for the WorkSpace.

" }, "UserName":{ "shape":"UserName", @@ -3067,7 +3141,7 @@ }, "VolumeEncryptionKey":{ "shape":"VolumeEncryptionKey", - "documentation":"

The symmetric AWS KMS customer master key (CMK) used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric CMKs.

" + "documentation":"

The symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.

" }, "UserVolumeEncryptionEnabled":{ "shape":"BooleanObject", @@ -3093,11 +3167,11 @@ "members":{ "DeviceTypeWindows":{ "shape":"AccessPropertyValue", - "documentation":"

Indicates whether users can use Windows clients to access their WorkSpaces. To restrict WorkSpaces access to trusted devices (also known as managed devices) with valid certificates, specify a value of TRUST. For more information, see Restrict WorkSpaces Access to Trusted Devices.

" + "documentation":"

Indicates whether users can use Windows clients to access their WorkSpaces.

" }, "DeviceTypeOsx":{ "shape":"AccessPropertyValue", - "documentation":"

Indicates whether users can use macOS clients to access their WorkSpaces. To restrict WorkSpaces access to trusted devices (also known as managed devices) with valid certificates, specify a value of TRUST. For more information, see Restrict WorkSpaces Access to Trusted Devices.

" + "documentation":"

Indicates whether users can use macOS clients to access their WorkSpaces.

" }, "DeviceTypeWeb":{ "shape":"AccessPropertyValue", @@ -3109,7 +3183,7 @@ }, "DeviceTypeAndroid":{ "shape":"AccessPropertyValue", - "documentation":"

Indicates whether users can use Android devices to access their WorkSpaces.

" + "documentation":"

Indicates whether users can use Android and Android-compatible Chrome OS devices to access their WorkSpaces.

" }, "DeviceTypeChromeOs":{ "shape":"AccessPropertyValue", @@ -3139,7 +3213,7 @@ }, "Owner":{ "shape":"BundleOwner", - "documentation":"

The owner of the bundle. This is the account identifier of the owner, or AMAZON if the bundle is provided by AWS.

" + "documentation":"

The owner of the bundle. This is the account identifier of the owner, or AMAZON if the bundle is provided by Amazon Web Services.

" }, "Description":{ "shape":"Description", @@ -3375,11 +3449,15 @@ }, "Created":{ "shape":"Timestamp", - "documentation":"

The date when the image was created. If the image has been shared, the AWS account that the image has been shared with sees the original creation date of the image.

" + "documentation":"

The date when the image was created. If the image has been shared, the Amazon Web Services account that the image has been shared with sees the original creation date of the image.

" }, "OwnerAccountId":{ "shape":"AwsAccount", - "documentation":"

The identifier of the AWS account that owns the image.

" + "documentation":"

The identifier of the Amazon Web Services account that owns the image.

" + }, + "Updates":{ + "shape":"UpdateResult", + "documentation":"

The updates (if any) that are available for the specified image.

" } }, "documentation":"

Describes a WorkSpace image.

" @@ -3475,11 +3553,11 @@ "members":{ "DirectoryId":{ "shape":"DirectoryId", - "documentation":"

The identifier of the AWS Directory Service directory for the WorkSpace. You can use DescribeWorkspaceDirectories to list the available directories.

" + "documentation":"

The identifier of the Directory Service directory for the WorkSpace. You can use DescribeWorkspaceDirectories to list the available directories.

" }, "UserName":{ "shape":"UserName", - "documentation":"

The user name of the user for the WorkSpace. This user name must exist in the AWS Directory Service directory for the WorkSpace.

" + "documentation":"

The user name of the user for the WorkSpace. This user name must exist in the Directory Service directory for the WorkSpace.

" }, "BundleId":{ "shape":"BundleId", @@ -3487,7 +3565,7 @@ }, "VolumeEncryptionKey":{ "shape":"VolumeEncryptionKey", - "documentation":"

The symmetric AWS KMS customer master key (CMK) used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric CMKs.

" + "documentation":"

The symmetric KMS key used to encrypt data stored on your WorkSpace. Amazon WorkSpaces does not support asymmetric KMS keys.

" }, "UserVolumeEncryptionEnabled":{ "shape":"BooleanObject", diff --git a/botocore/docs/method.py b/botocore/docs/method.py index cbb4c74a..dbadd943 100644 --- a/botocore/docs/method.py +++ b/botocore/docs/method.py @@ -96,10 +96,13 @@ def document_custom_signature(section, name, method, :param exclude: The names of the parameters to exclude from documentation. """ - args, varargs, keywords, defaults = inspect.getargspec(method) - args = args[1:] + argspec = inspect.getfullargspec(method) signature_params = inspect.formatargspec( - args, varargs, keywords, defaults) + args=argspec.args[1:], + varargs=argspec.varargs, + varkw=argspec.varkw, + defaults=argspec.defaults + ) signature_params = signature_params.lstrip('(') signature_params = signature_params.rstrip(')') section.style.start_sphinx_py_method(name, signature_params) diff --git a/botocore/utils.py b/botocore/utils.py index 8c0bd73e..4b0250a5 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -380,7 +380,9 @@ class IMDSFetcher(object): custom_metadata_endpoint = config.get('ec2_metadata_service_endpoint') if requires_ipv6 and custom_metadata_endpoint: - logger.warn("Custom endpoint and IMDS_USE_IPV6 are both set. Using custom endpoint.") + logger.warning( + "Custom endpoint and IMDS_USE_IPV6 are both set. Using custom endpoint." + ) chosen_base_url = None diff --git a/docs/source/conf.py b/docs/source/conf.py index b856d87c..ebc2340f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ copyright = u'2013, Mitch Garnaat' # The short X.Y version. version = '1.21.' # The full version, including alpha/beta/rc tags. -release = '1.21.46' +release = '1.21.53' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..699b31d7 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,4 @@ +[tool.pytest.ini_options] +markers = [ + "slow: marks tests as slow", +] diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 00000000..1f8346f5 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1,11 @@ +nose==1.3.7 +wheel==0.37.0 +behave==1.2.5 +jsonschema==2.5.1 +coverage==5.5 + +# Pytest specific deps +pytest==6.2.5 +pytest-cov==2.12.1 +atomicwrites>=1.0 # Windows requirement +colorama>0.3.0 # Windows requirement diff --git a/setup.py b/setup.py index 5c00cfc6..7b8033a8 100644 --- a/setup.py +++ b/setup.py @@ -56,5 +56,7 @@ setup( 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + 'Programming Language :: Python :: 3.10', ], ) diff --git a/tests/__init__.py b/tests/__init__.py index 755a58f7..2f9961dd 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -28,7 +28,6 @@ from io import BytesIO from subprocess import Popen, PIPE from dateutil.tz import tzlocal -from nose.tools import assert_equal import botocore.loaders import botocore.session @@ -354,16 +353,16 @@ def assert_url_equal(url1, url2): # Because the query string ordering isn't relevant, we have to parse # every single part manually and then handle the query string. - assert_equal(parts1.scheme, parts2.scheme) - assert_equal(parts1.netloc, parts2.netloc) - assert_equal(parts1.path, parts2.path) - assert_equal(parts1.params, parts2.params) - assert_equal(parts1.fragment, parts2.fragment) - assert_equal(parts1.username, parts2.username) - assert_equal(parts1.password, parts2.password) - assert_equal(parts1.hostname, parts2.hostname) - assert_equal(parts1.port, parts2.port) - assert_equal(parse_qs(parts1.query), parse_qs(parts2.query)) + assert parts1.scheme == parts2.scheme + assert parts1.netloc == parts2.netloc + assert parts1.path == parts2.path + assert parts1.params == parts2.params + assert parts1.fragment == parts2.fragment + assert parts1.username == parts2.username + assert parts1.password == parts2.password + assert parts1.hostname == parts2.hostname + assert parts1.port == parts2.port + assert parse_qs(parts1.query) == parse_qs(parts2.query) class HTTPStubberException(Exception): diff --git a/tests/acceptance/features/steps/base.py b/tests/acceptance/features/steps/base.py index 918d1fa7..35a9c8c7 100644 --- a/tests/acceptance/features/steps/base.py +++ b/tests/acceptance/features/steps/base.py @@ -4,7 +4,6 @@ from botocore import xform_name from botocore.exceptions import ClientError from behave import when, then -from nose.tools import assert_equal def _params_from_table(table): @@ -72,7 +71,7 @@ def api_call_with_json_and_error(context, operation): @then(u'I expect the response error code to be "{}"') def then_expected_error(context, code): - assert_equal(context.error_response.response['Error']['Code'], code) + assert context.error_response.response['Error']['Code'] == code @then(u'the value at "{}" should be a list') diff --git a/tests/functional/csm/test_monitoring.py b/tests/functional/csm/test_monitoring.py index b373d189..ee479a4f 100644 --- a/tests/functional/csm/test_monitoring.py +++ b/tests/functional/csm/test_monitoring.py @@ -18,7 +18,7 @@ import os import socket import threading -from nose.tools import assert_equal +import pytest from tests import mock from tests import temporary_file @@ -47,12 +47,6 @@ EXPECTED_EXCEPTIONS_THROWN = ( botocore.exceptions.ClientError, NonRetryableException, RetryableException) -def test_client_monitoring(): - test_cases = _load_test_cases() - for case in test_cases: - yield _run_test_case, case - - def _load_test_cases(): with open(CASES_FILE) as f: loaded_tests = json.loads(f.read()) @@ -79,6 +73,11 @@ def _replace_expected_anys(test_cases): expected_event[entry] = mock.ANY +@pytest.mark.parametrize("test_case", _load_test_cases()) +def test_client_monitoring(test_case): + _run_test_case(test_case) + + @contextlib.contextmanager def _configured_session(case_configuration, listener_port): environ = { @@ -121,8 +120,7 @@ def _run_test_case(case): case['configuration'], listener.port) as session: for api_call in case['apiCalls']: _make_api_call(session, api_call) - assert_equal( - listener.received_events, case['expectedMonitoringEvents']) + assert listener.received_events == case['expectedMonitoringEvents'] def _make_api_call(session, api_call): diff --git a/tests/functional/docs/test_shared_example_config.py b/tests/functional/docs/test_shared_example_config.py index fdd21cba..f03386cb 100644 --- a/tests/functional/docs/test_shared_example_config.py +++ b/tests/functional/docs/test_shared_example_config.py @@ -10,12 +10,14 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import pytest + import botocore.session from botocore.model import OperationNotFoundError from botocore.utils import parse_timestamp -def test_lint_shared_example_configs(): +def _shared_example_configs(): session = botocore.session.Session() loader = session.get_component('data_loader') services = loader.list_available_services('examples-1') @@ -27,10 +29,14 @@ def test_lint_shared_example_configs(): examples = example_config.get("examples", {}) for operation, operation_examples in examples.items(): for example in operation_examples: - yield _lint_single_example, operation, example, service_model + yield operation, example, service_model -def _lint_single_example(operation_name, example_config, service_model): +@pytest.mark.parametrize( + "operation_name, example_config, service_model", + _shared_example_configs() +) +def test_lint_shared_example_configs(operation_name, example_config, service_model): # The operation should actually exist assert_operation_exists(service_model, operation_name) operation_model = service_model.operation_model(operation_name) diff --git a/tests/functional/test_alias.py b/tests/functional/test_alias.py index f9bc5403..bd29c747 100644 --- a/tests/functional/test_alias.py +++ b/tests/functional/test_alias.py @@ -10,6 +10,8 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import pytest + import botocore.session from botocore.stub import Stubber from botocore.exceptions import ParamValidationError @@ -46,16 +48,16 @@ ALIAS_CASES = [ ] -def test_can_use_alias(): +@pytest.mark.parametrize("case", ALIAS_CASES) +def test_can_use_alias(case): session = botocore.session.get_session() - for case in ALIAS_CASES: - yield _can_use_parameter_in_client_call, session, case + _can_use_parameter_in_client_call(session, case) -def test_can_use_original_name(): +@pytest.mark.parametrize("case", ALIAS_CASES) +def test_can_use_original_name(case): session = botocore.session.get_session() - for case in ALIAS_CASES: - yield _can_use_parameter_in_client_call, session, case, False + _can_use_parameter_in_client_call(session, case, False) def _can_use_parameter_in_client_call(session, case, use_alias=True): diff --git a/tests/functional/test_client_class_names.py b/tests/functional/test_client_class_names.py index 6f6a806e..bb9ae0c1 100644 --- a/tests/functional/test_client_class_names.py +++ b/tests/functional/test_client_class_names.py @@ -10,7 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from nose.tools import assert_equal +import pytest import botocore.session @@ -68,14 +68,8 @@ SERVICE_TO_CLASS_NAME = { 'workspaces': 'WorkSpaces' } - -def test_client_has_correct_class_name(): +@pytest.mark.parametrize("service_name", SERVICE_TO_CLASS_NAME) +def test_client_has_correct_class_name(service_name): session = botocore.session.get_session() - for service_name in SERVICE_TO_CLASS_NAME: - client = session.create_client(service_name, REGION) - yield (_assert_class_name_matches_ref_class_name, client, - SERVICE_TO_CLASS_NAME[service_name]) - - -def _assert_class_name_matches_ref_class_name(client, ref_class_name): - assert_equal(client.__class__.__name__, ref_class_name) + client = session.create_client(service_name, REGION) + assert client.__class__.__name__ == SERVICE_TO_CLASS_NAME[service_name] diff --git a/tests/functional/test_cognito_idp.py b/tests/functional/test_cognito_idp.py index 1bd1800e..648253f6 100644 --- a/tests/functional/test_cognito_idp.py +++ b/tests/functional/test_cognito_idp.py @@ -10,77 +10,78 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from nose.tools import assert_false +import pytest from tests import create_session, mock, ClientHTTPStubber -def test_unsigned_operations(): - operation_params = { - 'change_password': { - 'PreviousPassword': 'myoldbadpassword', - 'ProposedPassword': 'mynewgoodpassword', - 'AccessToken': 'foobar' - }, - 'confirm_forgot_password': { - 'ClientId': 'foo', - 'Username': 'myusername', - 'ConfirmationCode': 'thisismeforreal', - 'Password': 'whydowesendpasswordsviaemail' - }, - 'confirm_sign_up': { - 'ClientId': 'foo', - 'Username': 'myusername', - 'ConfirmationCode': 'ireallydowanttosignup' - }, - 'delete_user': { - 'AccessToken': 'foobar' - }, - 'delete_user_attributes': { - 'UserAttributeNames': ['myattribute'], - 'AccessToken': 'foobar' - }, - 'forgot_password': { - 'ClientId': 'foo', - 'Username': 'myusername' - }, - 'get_user': { - 'AccessToken': 'foobar' - }, - 'get_user_attribute_verification_code': { - 'AttributeName': 'myattribute', - 'AccessToken': 'foobar' - }, - 'resend_confirmation_code': { - 'ClientId': 'foo', - 'Username': 'myusername' - }, - 'set_user_settings': { - 'AccessToken': 'randomtoken', - 'MFAOptions': [{ - 'DeliveryMedium': 'SMS', - 'AttributeName': 'someattributename' - }] - }, - 'sign_up': { - 'ClientId': 'foo', - 'Username': 'bar', - 'Password': 'mysupersecurepassword', - }, - 'update_user_attributes': { - 'UserAttributes': [{ - 'Name': 'someattributename', - 'Value': 'newvalue' - }], - 'AccessToken': 'foobar' - }, - 'verify_user_attribute': { - 'AttributeName': 'someattributename', - 'Code': 'someverificationcode', - 'AccessToken': 'foobar' - }, - } +OPERATION_PARAMS = { + 'change_password': { + 'PreviousPassword': 'myoldbadpassword', + 'ProposedPassword': 'mynewgoodpassword', + 'AccessToken': 'foobar' + }, + 'confirm_forgot_password': { + 'ClientId': 'foo', + 'Username': 'myusername', + 'ConfirmationCode': 'thisismeforreal', + 'Password': 'whydowesendpasswordsviaemail' + }, + 'confirm_sign_up': { + 'ClientId': 'foo', + 'Username': 'myusername', + 'ConfirmationCode': 'ireallydowanttosignup' + }, + 'delete_user': { + 'AccessToken': 'foobar' + }, + 'delete_user_attributes': { + 'UserAttributeNames': ['myattribute'], + 'AccessToken': 'foobar' + }, + 'forgot_password': { + 'ClientId': 'foo', + 'Username': 'myusername' + }, + 'get_user': { + 'AccessToken': 'foobar' + }, + 'get_user_attribute_verification_code': { + 'AttributeName': 'myattribute', + 'AccessToken': 'foobar' + }, + 'resend_confirmation_code': { + 'ClientId': 'foo', + 'Username': 'myusername' + }, + 'set_user_settings': { + 'AccessToken': 'randomtoken', + 'MFAOptions': [{ + 'DeliveryMedium': 'SMS', + 'AttributeName': 'someattributename' + }] + }, + 'sign_up': { + 'ClientId': 'foo', + 'Username': 'bar', + 'Password': 'mysupersecurepassword', + }, + 'update_user_attributes': { + 'UserAttributes': [{ + 'Name': 'someattributename', + 'Value': 'newvalue' + }], + 'AccessToken': 'foobar' + }, + 'verify_user_attribute': { + 'AttributeName': 'someattributename', + 'Code': 'someverificationcode', + 'AccessToken': 'foobar' + }, +} +@pytest.mark.parametrize("operation_name, parameters", OPERATION_PARAMS.items()) +def test_unsigned_operations(operation_name, parameters): environ = { 'AWS_ACCESS_KEY_ID': 'access_key', 'AWS_SECRET_ACCESS_KEY': 'secret_key', @@ -91,28 +92,15 @@ def test_unsigned_operations(): session = create_session() session.config_filename = 'no-exist-foo' client = session.create_client('cognito-idp', 'us-west-2') + http_stubber = ClientHTTPStubber(client) - for operation, params in operation_params.items(): - test_case = UnsignedOperationTestCase(client, operation, params) - yield test_case.run + operation = getattr(client, operation_name) + http_stubber.add_response(body=b'{}') + with http_stubber: + operation(**parameters) + request = http_stubber.requests[0] -class UnsignedOperationTestCase(object): - def __init__(self, client, operation_name, parameters): - self._client = client - self._operation_name = operation_name - self._parameters = parameters - self._http_stubber = ClientHTTPStubber(self._client) - - def run(self): - operation = getattr(self._client, self._operation_name) - - self._http_stubber.add_response(body=b'{}') - with self._http_stubber: - operation(**self._parameters) - request = self._http_stubber.requests[0] - - assert_false( - 'authorization' in request.headers, + assert 'authorization' not in request.headers, ( 'authorization header found in unsigned operation' ) diff --git a/tests/functional/test_endpoints.py b/tests/functional/test_endpoints.py index 75592c8c..07d2091a 100644 --- a/tests/functional/test_endpoints.py +++ b/tests/functional/test_endpoints.py @@ -10,7 +10,8 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from nose.tools import assert_equal +import pytest + from botocore.session import get_session @@ -99,7 +100,22 @@ NOT_SUPPORTED_IN_SDK = [ ] -def test_endpoint_matches_service(): +@pytest.fixture(scope="module") +def known_endpoint_prefixes(): + # The entries in endpoints.json are keyed off of the endpoint + # prefix. We don't directly have that data, so we have to load + # every service model and look up its endpoint prefix in its + # ``metadata`` section. + session = get_session() + loader = session.get_component('data_loader') + known_services = loader.list_available_services('service-2') + return [ + session.get_service_model(service_name).endpoint_prefix + for service_name in known_services + ] + + +def _computed_endpoint_prefixes(): # This verifies client names match up with data from the endpoints.json # file. We want to verify that every entry in the endpoints.json # file corresponds to a client we can construct via @@ -120,17 +136,6 @@ def test_endpoint_matches_service(): if service not in NOT_SUPPORTED_IN_SDK: services_in_endpoints_file.add(service) - # Now we need to cross check them against services we know about. - # The entries in endpoints.json are keyed off of the endpoint - # prefix. We don't directly have that data, so we have to load - # every service model and look up its endpoint prefix in its - # ``metadata`` section. - known_services = loader.list_available_services('service-2') - known_endpoint_prefixes = [ - session.get_service_model(service_name).endpoint_prefix - for service_name in known_services - ] - # Now we go through every known endpoint prefix in the endpoints.json # file and ensure it maps to an endpoint prefix we've seen # in a service model. @@ -140,43 +145,45 @@ def test_endpoint_matches_service(): # prefix. endpoint_prefix = ENDPOINT_PREFIX_OVERRIDE.get(endpoint_prefix, endpoint_prefix) - yield (_assert_known_endpoint_prefix, - endpoint_prefix, - known_endpoint_prefixes) + yield endpoint_prefix -def _assert_known_endpoint_prefix(endpoint_prefix, known_endpoint_prefixes): +@pytest.mark.parametrize("endpoint_prefix", _computed_endpoint_prefixes()) +def test_endpoint_matches_service(known_endpoint_prefixes, endpoint_prefix): + # We need to cross check all computed endpoints against our + # known values in endpoints.json, to ensure everything lines + # up correctly. assert endpoint_prefix in known_endpoint_prefixes -def test_service_name_matches_endpoint_prefix(): - # Generates tests for each service to verify that the computed service - # named based on the service id matches the service name used to - # create a client (i.e the directory name in botocore/data) - # unless there is an explicit exception. - session = get_session() - loader = session.get_component('data_loader') - +def _available_services(): # Load the list of available services. The names here represent what # will become the client names. - services = loader.list_available_services('service-2') - - for service in services: - yield _assert_service_name_matches_endpoint_prefix, session, service + session = get_session() + loader = session.get_component('data_loader') + return loader.list_available_services('service-2') -def _assert_service_name_matches_endpoint_prefix(session, service_name): +@pytest.mark.parametrize("service_name", _available_services()) +def test_service_name_matches_endpoint_prefix(service_name): + """Generates tests for each service to verify that the computed service + named based on the service id matches the service name used to + create a client (i.e the directory name in botocore/data) + unless there is an explicit exception. + """ + session = get_session() service_model = session.get_service_model(service_name) computed_name = service_model.service_id.replace(' ', '-').lower() # Handle known exceptions where we have renamed the service directory # for one reason or another. actual_service_name = SERVICE_RENAMES.get(service_name, service_name) - assert_equal( - computed_name, actual_service_name, - "Actual service name `%s` does not match expected service name " - "we computed: `%s`" % ( - actual_service_name, computed_name)) + + err_msg = ( + f"Actual service name `{actual_service_name}` does not match " + f"expected service name we computed: `{computed_name}`" + ) + assert computed_name == actual_service_name, err_msg _S3_ALLOWED_PSEUDO_FIPS_REGIONS = [ @@ -191,24 +198,7 @@ _S3_ALLOWED_PSEUDO_FIPS_REGIONS = [ ] -def _assert_is_not_psuedo_fips_region(region_name): - if region_name in _S3_ALLOWED_PSEUDO_FIPS_REGIONS: - return - - msg = ( - 'New S3 FIPS pseudo-region added: "%s". ' - 'FIPS has compliancy requirements that may not be met in all cases ' - 'for S3 clients due to the custom endpoint resolution and ' - 'construction logic.' - ) - - if 'fips' in region_name: - raise RuntimeError(msg % region_name) - - -def test_no_s3_fips_regions(): - # Fail if additional FIPS pseudo-regions are added to S3. - # This may be removed once proper support is implemented for FIPS in S3. +def _s3_region_names(): session = get_session() loader = session.get_component('data_loader') endpoints = loader.load_data('endpoints') @@ -216,5 +206,21 @@ def test_no_s3_fips_regions(): for partition in endpoints['partitions']: s3_service = partition['services'].get('s3', {}) for region_name in s3_service['endpoints']: - region_name = region_name.lower() - yield _assert_is_not_psuedo_fips_region, region_name + yield region_name.lower() + + +@pytest.mark.parametrize("region_name", _s3_region_names()) +def test_no_s3_fips_regions(region_name): + # Fail if additional FIPS pseudo-regions are added to S3. + # This may be removed once proper support is implemented for FIPS in S3. + if region_name in _S3_ALLOWED_PSEUDO_FIPS_REGIONS: + return + + err_msg = ( + 'New S3 FIPS pseudo-region added: "{region_name}". ' + 'FIPS has compliancy requirements that may not be met in all cases ' + 'for S3 clients due to the custom endpoint resolution and ' + 'construction logic.' + ) + + assert 'fips' not in region_name, err_msg diff --git a/tests/functional/test_event_alias.py b/tests/functional/test_event_alias.py index 604f0128..e29c0963 100644 --- a/tests/functional/test_event_alias.py +++ b/tests/functional/test_event_alias.py @@ -1,3 +1,5 @@ +import pytest + from botocore.session import Session @@ -578,14 +580,43 @@ SERVICES = { } -def test_event_alias(): +def _event_aliases(): + for client_name in SERVICES.keys(): + endpoint_prefix = SERVICES[client_name].get('endpoint_prefix') + service_id = SERVICES[client_name]['service_id'] + yield client_name, service_id + + +def _event_aliases_with_endpoint_prefix(): for client_name in SERVICES.keys(): endpoint_prefix = SERVICES[client_name].get('endpoint_prefix') service_id = SERVICES[client_name]['service_id'] if endpoint_prefix is not None: - yield _assert_handler_called, client_name, endpoint_prefix - yield _assert_handler_called, client_name, service_id - yield _assert_handler_called, client_name, client_name + yield client_name, endpoint_prefix + + +@pytest.mark.parametrize( + "client_name, endpoint_prefix", + _event_aliases_with_endpoint_prefix() +) +def test_event_alias_by_endpoint_prefix(client_name, endpoint_prefix): + _assert_handler_called(client_name, endpoint_prefix) + + +@pytest.mark.parametrize( + "client_name, service_id", + _event_aliases() +) +def test_event_alias_by_service_id(client_name, service_id): + _assert_handler_called(client_name, service_id) + + +@pytest.mark.parametrize( + "client_name, service_id", + _event_aliases() +) +def test_event_alias_by_client_name(client_name, service_id): + _assert_handler_called(client_name, client_name) def _assert_handler_called(client_name, event_part): diff --git a/tests/functional/test_h2_required.py b/tests/functional/test_h2_required.py index e86204d9..a93a560d 100644 --- a/tests/functional/test_h2_required.py +++ b/tests/functional/test_h2_required.py @@ -10,6 +10,8 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import pytest + from botocore.session import get_session _H2_REQUIRED = object() @@ -19,33 +21,41 @@ _KNOWN_SERVICES = { 'lexv2-runtime': ['StartConversation'], } - -def test_all_uses_of_h2_are_known(): +def _all_test_cases(): session = get_session() loader = session.get_component('data_loader') services = loader.list_available_services('service-2') + h2_services = [] + h2_operations = [] for service in services: service_model = session.get_service_model(service) h2_config = service_model.metadata.get('protocolSettings', {}).get('h2') if h2_config == 'required': - yield _assert_h2_service_is_known, service + h2_services.append(service) elif h2_config == 'eventstream': for operation in service_model.operation_names: operation_model = service_model.operation_model(operation) if operation_model.has_event_stream_output: - yield _assert_h2_operation_is_known, service, operation + h2_operations.append([service, operation]) + + return h2_services, h2_operations -def _assert_h2_service_is_known(service): +H2_SERVICES, H2_OPERATIONS = _all_test_cases() + + +@pytest.mark.parametrize("h2_service", H2_SERVICES) +def test_all_uses_of_h2_are_known(h2_service): # Validates that a service that requires HTTP 2 for all operations is known - message = 'Found unknown HTTP 2 service: %s' % service - assert _KNOWN_SERVICES.get(service) is _H2_REQUIRED, message + message = 'Found unknown HTTP 2 service: %s' % h2_service + assert _KNOWN_SERVICES.get(h2_service) is _H2_REQUIRED, message -def _assert_h2_operation_is_known(service, operation): +@pytest.mark.parametrize("h2_service, operation", H2_OPERATIONS) +def test_all_h2_operations_are_known(h2_service, operation): # Validates that an operation that requires HTTP 2 is known - known_operations = _KNOWN_SERVICES.get(service, []) - message = 'Found unknown HTTP 2 operation: %s.%s' % (service, operation) + known_operations = _KNOWN_SERVICES.get(h2_service, []) + message = 'Found unknown HTTP 2 operation: %s.%s' % (h2_service, operation) assert operation in known_operations, message diff --git a/tests/functional/test_model_backcompat.py b/tests/functional/test_model_backcompat.py index 9586b5f1..f4a3ab9e 100644 --- a/tests/functional/test_model_backcompat.py +++ b/tests/functional/test_model_backcompat.py @@ -12,7 +12,6 @@ # language governing permissions and limitations under the License. import os -from nose.tools import assert_equal from botocore.session import Session from tests import ClientHTTPStubber from tests.functional import TEST_MODELS_DIR @@ -56,21 +55,19 @@ def test_old_model_continues_to_work(): 'Content-Type': 'application/x-amz-json-1.1'}, body=b'{"CertificateSummaryList":[]}') response = client.list_certificates() - assert_equal( - response, - {'CertificateSummaryList': [], - 'ResponseMetadata': { - 'HTTPHeaders': { - 'content-length': '29', - 'content-type': 'application/x-amz-json-1.1', - 'date': 'Fri, 26 Oct 2018 01:46:30 GMT', - 'x-amzn-requestid': 'abcd'}, - 'HTTPStatusCode': 200, - 'RequestId': 'abcd', - 'RetryAttempts': 0} - } - ) + assert response == { + 'CertificateSummaryList': [], + 'ResponseMetadata': { + 'HTTPHeaders': { + 'content-length': '29', + 'content-type': 'application/x-amz-json-1.1', + 'date': 'Fri, 26 Oct 2018 01:46:30 GMT', + 'x-amzn-requestid': 'abcd'}, + 'HTTPStatusCode': 200, + 'RequestId': 'abcd', + 'RetryAttempts': 0} + } # Also verify we can use the paginators as well. - assert_equal(client.can_paginate('list_certificates'), True) - assert_equal(client.waiter_names, ['certificate_validated']) + assert client.can_paginate('list_certificates') is True + assert client.waiter_names == ['certificate_validated'] diff --git a/tests/functional/test_model_completeness.py b/tests/functional/test_model_completeness.py index 78dd1529..20c824f1 100644 --- a/tests/functional/test_model_completeness.py +++ b/tests/functional/test_model_completeness.py @@ -10,33 +10,43 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import pytest + from botocore.session import Session from botocore.loaders import Loader from botocore.exceptions import DataNotFoundError -def _test_model_is_not_lost(service_name, type_name, - previous_version, latest_version): +def _paginators_and_waiters_test_cases(): + for service_name in Session().get_available_services(): + versions = Loader().list_api_versions(service_name, 'service-2') + if len(versions) > 1: + for type_name in ['paginators-1', 'waiters-2']: + yield service_name, type_name, versions[-2], versions[-1] + + +@pytest.mark.parametrize( + "service_name, type_name, previous_version, latest_version", + _paginators_and_waiters_test_cases() +) +def test_paginators_and_waiters_are_not_lost_in_new_version( + service_name, type_name, previous_version, latest_version +): # Make sure if a paginator and/or waiter exists in previous version, # there will be a successor existing in latest version. loader = Loader() try: previous = loader.load_service_model( - service_name, type_name, previous_version) + service_name, type_name, previous_version + ) except DataNotFoundError: pass else: try: latest = loader.load_service_model( - service_name, type_name, latest_version) + service_name, type_name, latest_version + ) except DataNotFoundError as e: raise AssertionError( - "%s must exist for %s: %s" % (type_name, service_name, e)) - -def test_paginators_and_waiters_are_not_lost_in_new_version(): - for service_name in Session().get_available_services(): - versions = Loader().list_api_versions(service_name, 'service-2') - if len(versions) > 1: - for type_name in ['paginators-1', 'waiters-2']: - yield (_test_model_is_not_lost, service_name, - type_name, versions[-2], versions[-1]) + f"{type_name} must exist for {service_name}: {e}" + ) diff --git a/tests/functional/test_paginate.py b/tests/functional/test_paginate.py index afbf3816..8203e97a 100644 --- a/tests/functional/test_paginate.py +++ b/tests/functional/test_paginate.py @@ -10,11 +10,10 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from __future__ import division from math import ceil from datetime import datetime -from nose.tools import assert_equal +import pytest from tests import random_chars from tests import BaseSessionTest @@ -217,22 +216,19 @@ class TestCloudwatchLogsPagination(BaseSessionTest): self.assertEqual(len(result['events']), 1) -def test_token_encoding(): - cases = [ +@pytest.mark.parametrize( + "token_dict", + ( {'foo': 'bar'}, {'foo': b'bar'}, {'foo': {'bar': b'baz'}}, {'foo': ['bar', b'baz']}, {'foo': b'\xff'}, {'foo': {'bar': b'baz', 'bin': [b'bam']}}, - ] - - for token_dict in cases: - yield assert_token_encodes_and_decodes, token_dict - - -def assert_token_encodes_and_decodes(token_dict): + ) +) +def test_token_encoding(token_dict): encoded = TokenEncoder().encode(token_dict) assert isinstance(encoded, six.string_types) decoded = TokenDecoder().decode(encoded) - assert_equal(decoded, token_dict) + assert decoded == token_dict diff --git a/tests/functional/test_paginator_config.py b/tests/functional/test_paginator_config.py index 1c6ef44d..ad74298a 100644 --- a/tests/functional/test_paginator_config.py +++ b/tests/functional/test_paginator_config.py @@ -12,6 +12,7 @@ # language governing permissions and limitations under the License. import string +import pytest import jmespath from jmespath.exceptions import JMESPathError @@ -130,7 +131,7 @@ KNOWN_EXTRA_OUTPUT_KEYS = [ ] -def test_lint_pagination_configs(): +def _pagination_configs(): session = botocore.session.get_session() loader = session.get_component('data_loader') services = loader.list_available_services('paginators-1') @@ -141,15 +142,16 @@ def test_lint_pagination_configs(): service_model.api_version) for op_name, single_config in page_config['pagination'].items(): yield ( - _lint_single_paginator, op_name, single_config, service_model ) - -def _lint_single_paginator(operation_name, page_config, - service_model): +@pytest.mark.parametrize( + "operation_name, page_config, service_model", + _pagination_configs() +) +def test_lint_pagination_configs(operation_name, page_config, service_model): _validate_known_pagination_keys(page_config) _valiate_result_key_exists(page_config) _validate_referenced_operation_exists(operation_name, service_model) diff --git a/tests/functional/test_public_apis.py b/tests/functional/test_public_apis.py index ca95e36e..96325184 100644 --- a/tests/functional/test_public_apis.py +++ b/tests/functional/test_public_apis.py @@ -12,6 +12,7 @@ # language governing permissions and limitations under the License. from collections import defaultdict +import pytest from tests import mock from tests import ClientHTTPStubber @@ -46,23 +47,7 @@ class EarlyExit(Exception): pass -def _test_public_apis_will_not_be_signed(client, operation, kwargs): - with ClientHTTPStubber(client) as http_stubber: - http_stubber.responses.append(EarlyExit()) - try: - operation(**kwargs) - except EarlyExit: - pass - request = http_stubber.requests[0] - sig_v2_disabled = 'SignatureVersion=2' not in request.url - assert sig_v2_disabled, "SigV2 is incorrectly enabled" - sig_v3_disabled = 'X-Amzn-Authorization' not in request.headers - assert sig_v3_disabled, "SigV3 is incorrectly enabled" - sig_v4_disabled = 'Authorization' not in request.headers - assert sig_v4_disabled, "SigV4 is incorrectly enabled" - - -def test_public_apis_will_not_be_signed(): +def _public_apis(): session = Session() # Mimic the scenario that user does not have aws credentials setup @@ -73,4 +58,22 @@ def test_public_apis_will_not_be_signed(): for operation_name in PUBLIC_API_TESTS[service_name]: kwargs = PUBLIC_API_TESTS[service_name][operation_name] method = getattr(client, xform_name(operation_name)) - yield _test_public_apis_will_not_be_signed, client, method, kwargs + yield client, method, kwargs + + +@pytest.mark.parametrize("client, operation, kwargs", _public_apis()) +def test_public_apis_will_not_be_signed(client, operation, kwargs): + with ClientHTTPStubber(client) as http_stubber: + http_stubber.responses.append(EarlyExit()) + try: + operation(**kwargs) + except EarlyExit: + pass + request = http_stubber.requests[0] + + sig_v2_disabled = 'SignatureVersion=2' not in request.url + assert sig_v2_disabled, "SigV2 is incorrectly enabled" + sig_v3_disabled = 'X-Amzn-Authorization' not in request.headers + assert sig_v3_disabled, "SigV3 is incorrectly enabled" + sig_v4_disabled = 'Authorization' not in request.headers + assert sig_v4_disabled, "SigV4 is incorrectly enabled" diff --git a/tests/functional/test_regions.py b/tests/functional/test_regions.py index a95a9265..4c1550fb 100644 --- a/tests/functional/test_regions.py +++ b/tests/functional/test_regions.py @@ -10,7 +10,7 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from nose.tools import assert_equal, assert_raises +import pytest from botocore.client import ClientEndpointBridge from botocore.exceptions import NoRegionError @@ -55,13 +55,13 @@ KNOWN_REGIONS = { 'monitoring': 'monitoring.ap-northeast-1.amazonaws.com', 'rds': 'rds.ap-northeast-1.amazonaws.com', 'redshift': 'redshift.ap-northeast-1.amazonaws.com', - 's3': 's3-ap-northeast-1.amazonaws.com', + 's3': 's3.ap-northeast-1.amazonaws.com', 'sdb': 'sdb.ap-northeast-1.amazonaws.com', 'sns': 'sns.ap-northeast-1.amazonaws.com', 'sqs': 'ap-northeast-1.queue.amazonaws.com', 'storagegateway': 'storagegateway.ap-northeast-1.amazonaws.com', 'streams.dynamodb': 'streams.dynamodb.ap-northeast-1.amazonaws.com', - 'sts': 'sts.amazonaws.com', + 'sts': 'sts.ap-northeast-1.amazonaws.com', 'swf': 'swf.ap-northeast-1.amazonaws.com', 'workspaces': 'workspaces.ap-northeast-1.amazonaws.com' }, @@ -87,13 +87,13 @@ KNOWN_REGIONS = { 'monitoring': 'monitoring.ap-southeast-1.amazonaws.com', 'rds': 'rds.ap-southeast-1.amazonaws.com', 'redshift': 'redshift.ap-southeast-1.amazonaws.com', - 's3': 's3-ap-southeast-1.amazonaws.com', + 's3': 's3.ap-southeast-1.amazonaws.com', 'sdb': 'sdb.ap-southeast-1.amazonaws.com', 'sns': 'sns.ap-southeast-1.amazonaws.com', 'sqs': 'ap-southeast-1.queue.amazonaws.com', 'storagegateway': 'storagegateway.ap-southeast-1.amazonaws.com', 'streams.dynamodb': 'streams.dynamodb.ap-southeast-1.amazonaws.com', - 'sts': 'sts.amazonaws.com', + 'sts': 'sts.ap-southeast-1.amazonaws.com', 'swf': 'swf.ap-southeast-1.amazonaws.com', 'workspaces': 'workspaces.ap-southeast-1.amazonaws.com' }, @@ -122,13 +122,13 @@ KNOWN_REGIONS = { 'monitoring': 'monitoring.ap-southeast-2.amazonaws.com', 'rds': 'rds.ap-southeast-2.amazonaws.com', 'redshift': 'redshift.ap-southeast-2.amazonaws.com', - 's3': 's3-ap-southeast-2.amazonaws.com', + 's3': 's3.ap-southeast-2.amazonaws.com', 'sdb': 'sdb.ap-southeast-2.amazonaws.com', 'sns': 'sns.ap-southeast-2.amazonaws.com', 'sqs': 'ap-southeast-2.queue.amazonaws.com', 'storagegateway': 'storagegateway.ap-southeast-2.amazonaws.com', 'streams.dynamodb': 'streams.dynamodb.ap-southeast-2.amazonaws.com', - 'sts': 'sts.amazonaws.com', + 'sts': 'sts.ap-southeast-2.amazonaws.com', 'swf': 'swf.ap-southeast-2.amazonaws.com', 'workspaces': 'workspaces.ap-southeast-2.amazonaws.com' }, @@ -186,7 +186,7 @@ KNOWN_REGIONS = { 'sqs': 'eu-central-1.queue.amazonaws.com', 'storagegateway': 'storagegateway.eu-central-1.amazonaws.com', 'streams.dynamodb': 'streams.dynamodb.eu-central-1.amazonaws.com', - 'sts': 'sts.amazonaws.com', + 'sts': 'sts.eu-central-1.amazonaws.com', 'swf': 'swf.eu-central-1.amazonaws.com' }, 'eu-west-1': { @@ -222,22 +222,19 @@ KNOWN_REGIONS = { 'monitoring': 'monitoring.eu-west-1.amazonaws.com', 'rds': 'rds.eu-west-1.amazonaws.com', 'redshift': 'redshift.eu-west-1.amazonaws.com', - 's3': 's3-eu-west-1.amazonaws.com', + 's3': 's3.eu-west-1.amazonaws.com', 'sdb': 'sdb.eu-west-1.amazonaws.com', 'sns': 'sns.eu-west-1.amazonaws.com', 'sqs': 'eu-west-1.queue.amazonaws.com', 'ssm': 'ssm.eu-west-1.amazonaws.com', 'storagegateway': 'storagegateway.eu-west-1.amazonaws.com', 'streams.dynamodb': 'streams.dynamodb.eu-west-1.amazonaws.com', - 'sts': 'sts.amazonaws.com', + 'sts': 'sts.eu-west-1.amazonaws.com', 'swf': 'swf.eu-west-1.amazonaws.com', 'workspaces': 'workspaces.eu-west-1.amazonaws.com' }, 'fips-us-gov-west-1': { - 's3': 's3-fips-us-gov-west-1.amazonaws.com' - }, - 'local': { - 'dynamodb': 'localhost:8000' + 's3': 's3-fips.us-gov-west-1.amazonaws.com' }, 's3-external-1': { 's3': 's3-external-1.amazonaws.com' @@ -258,13 +255,13 @@ KNOWN_REGIONS = { 'kms': 'kms.sa-east-1.amazonaws.com', 'monitoring': 'monitoring.sa-east-1.amazonaws.com', 'rds': 'rds.sa-east-1.amazonaws.com', - 's3': 's3-sa-east-1.amazonaws.com', + 's3': 's3.sa-east-1.amazonaws.com', 'sdb': 'sdb.sa-east-1.amazonaws.com', 'sns': 'sns.sa-east-1.amazonaws.com', 'sqs': 'sa-east-1.queue.amazonaws.com', 'storagegateway': 'storagegateway.sa-east-1.amazonaws.com', 'streams.dynamodb': 'streams.dynamodb.sa-east-1.amazonaws.com', - 'sts': 'sts.amazonaws.com', + 'sts': 'sts.sa-east-1.amazonaws.com', 'swf': 'swf.sa-east-1.amazonaws.com' }, 'us-east-1': { @@ -310,14 +307,14 @@ KNOWN_REGIONS = { 'redshift': 'redshift.us-east-1.amazonaws.com', 'route53': 'route53.amazonaws.com', 'route53domains': 'route53domains.us-east-1.amazonaws.com', - 's3': 's3.amazonaws.com', + 's3': 's3.us-east-1.amazonaws.com', 'sdb': 'sdb.amazonaws.com', 'sns': 'sns.us-east-1.amazonaws.com', 'sqs': 'queue.amazonaws.com', 'ssm': 'ssm.us-east-1.amazonaws.com', 'storagegateway': 'storagegateway.us-east-1.amazonaws.com', 'streams.dynamodb': 'streams.dynamodb.us-east-1.amazonaws.com', - 'sts': 'sts.amazonaws.com', + 'sts': 'sts.us-east-1.amazonaws.com', 'support': 'support.us-east-1.amazonaws.com', 'swf': 'swf.us-east-1.amazonaws.com', 'workspaces': 'workspaces.us-east-1.amazonaws.com', @@ -339,7 +336,7 @@ KNOWN_REGIONS = { 'monitoring': 'monitoring.us-gov-west-1.amazonaws.com', 'rds': 'rds.us-gov-west-1.amazonaws.com', 'redshift': 'redshift.us-gov-west-1.amazonaws.com', - 's3': 's3-us-gov-west-1.amazonaws.com', + 's3': 's3.us-gov-west-1.amazonaws.com', 'sns': 'sns.us-gov-west-1.amazonaws.com', 'sqs': 'us-gov-west-1.queue.amazonaws.com', 'sts': 'sts.us-gov-west-1.amazonaws.com', @@ -366,13 +363,13 @@ KNOWN_REGIONS = { 'logs': 'logs.us-west-1.amazonaws.com', 'monitoring': 'monitoring.us-west-1.amazonaws.com', 'rds': 'rds.us-west-1.amazonaws.com', - 's3': 's3-us-west-1.amazonaws.com', + 's3': 's3.us-west-1.amazonaws.com', 'sdb': 'sdb.us-west-1.amazonaws.com', 'sns': 'sns.us-west-1.amazonaws.com', 'sqs': 'us-west-1.queue.amazonaws.com', 'storagegateway': 'storagegateway.us-west-1.amazonaws.com', 'streams.dynamodb': 'streams.dynamodb.us-west-1.amazonaws.com', - 'sts': 'sts.amazonaws.com', + 'sts': 'sts.us-west-1.amazonaws.com', 'swf': 'swf.us-west-1.amazonaws.com' }, 'us-west-2': { @@ -408,14 +405,14 @@ KNOWN_REGIONS = { 'monitoring': 'monitoring.us-west-2.amazonaws.com', 'rds': 'rds.us-west-2.amazonaws.com', 'redshift': 'redshift.us-west-2.amazonaws.com', - 's3': 's3-us-west-2.amazonaws.com', + 's3': 's3.us-west-2.amazonaws.com', 'sdb': 'sdb.us-west-2.amazonaws.com', 'sns': 'sns.us-west-2.amazonaws.com', 'sqs': 'us-west-2.queue.amazonaws.com', 'ssm': 'ssm.us-west-2.amazonaws.com', 'storagegateway': 'storagegateway.us-west-2.amazonaws.com', 'streams.dynamodb': 'streams.dynamodb.us-west-2.amazonaws.com', - 'sts': 'sts.amazonaws.com', + 'sts': 'sts.us-west-2.amazonaws.com', 'swf': 'swf.us-west-2.amazonaws.com', 'workspaces': 'workspaces.us-west-2.amazonaws.com' } @@ -445,7 +442,17 @@ def _get_patched_session(): return session -def test_known_endpoints(): +def _known_endpoints_by_region(): + for region_name, service_dict in KNOWN_REGIONS.items(): + for service_name, endpoint in service_dict.items(): + yield service_name, region_name, endpoint + + +@pytest.mark.parametrize( + "service_name, region_name, expected_endpoint", + _known_endpoints_by_region() +) +def test_single_service_region_endpoint(service_name, region_name, expected_endpoint): # Verify the actual values from the partition files. While # TestEndpointHeuristics verified the generic functionality given any # endpoints file, this test actually verifies the partition data against a @@ -454,18 +461,10 @@ def test_known_endpoints(): # logic evolves. resolver = _get_patched_session()._get_internal_component( 'endpoint_resolver') - for region_name, service_dict in KNOWN_REGIONS.items(): - for service_name, endpoint in service_dict.items(): - yield (_test_single_service_region, service_name, - region_name, endpoint, resolver) - - -def _test_single_service_region(service_name, region_name, - expected_endpoint, resolver): bridge = ClientEndpointBridge(resolver, None, None) result = bridge.resolve(service_name, region_name) expected = 'https://%s' % expected_endpoint - assert_equal(result['endpoint_url'], expected) + assert result['endpoint_url'] == expected # Ensure that all S3 regions use s3v4 instead of v4 @@ -480,25 +479,23 @@ def test_all_s3_endpoints_have_s3v4(): assert 'v4' not in resolved['signatureVersions'] -def test_known_endpoints(): +@pytest.mark.parametrize( + "service_name, expected_endpoint", + KNOWN_AWS_PARTITION_WIDE.items() +) +def test_single_service_partition_endpoint(service_name, expected_endpoint): resolver = _get_patched_session()._get_internal_component( 'endpoint_resolver') - for service_name, endpoint in KNOWN_AWS_PARTITION_WIDE.items(): - yield (_test_single_service_partition_endpoint, service_name, - endpoint, resolver) - - -def _test_single_service_partition_endpoint(service_name, expected_endpoint, - resolver): bridge = ClientEndpointBridge(resolver) result = bridge.resolve(service_name) - assert_equal(result['endpoint_url'], expected_endpoint) + assert result['endpoint_url'] == expected_endpoint def test_non_partition_endpoint_requires_region(): resolver = _get_patched_session()._get_internal_component( 'endpoint_resolver') - assert_raises(NoRegionError, resolver.construct_endpoint, 'ec2') + with pytest.raises(NoRegionError): + resolver.construct_endpoint('ec2') class TestEndpointResolution(BaseSessionTest): diff --git a/tests/functional/test_response_shadowing.py b/tests/functional/test_response_shadowing.py index b18b4a21..924f8740 100644 --- a/tests/functional/test_response_shadowing.py +++ b/tests/functional/test_response_shadowing.py @@ -10,40 +10,52 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import pytest + from botocore.session import Session -from nose.tools import assert_false def _all_services(): session = Session() service_names = session.get_available_services() - for service_name in service_names: - yield session.get_service_model(service_name) + return [session.get_service_model(name) for name in service_names] + + +# Only compute our service models once +ALL_SERVICES = _all_services() + + +def _all_service_error_shapes(): + for service_model in ALL_SERVICES: + for shape in service_model.error_shapes: + yield shape def _all_operations(): - for service_model in _all_services(): + for service_model in ALL_SERVICES: for operation_name in service_model.operation_names: - yield service_model.operation_model(operation_name) + yield service_model.operation_model(operation_name).output_shape def _assert_not_shadowed(key, shape): if not shape: return - msg = ( - 'Found shape "%s" that shadows the botocore response key "%s"' + + assert key not in shape.members, ( + f'Found shape "{shape.name}" that shadows the botocore response key "{key}"' ) - assert_false(key in shape.members, msg % (shape.name, key)) -def test_response_metadata_is_not_shadowed(): - for operation_model in _all_operations(): - shape = operation_model.output_shape - yield _assert_not_shadowed, 'ResponseMetadata', shape +@pytest.mark.parametrize("operation_output_shape", _all_operations()) +def test_response_metadata_is_not_shadowed(operation_output_shape): + _assert_not_shadowed('ResponseMetadata', operation_output_shape) -def test_exceptions_do_not_shadow(): - for service_model in _all_services(): - for shape in service_model.error_shapes: - yield _assert_not_shadowed, 'ResponseMetadata', shape - yield _assert_not_shadowed, 'Error', shape +@pytest.mark.parametrize("error_shape", _all_service_error_shapes()) +def test_exceptions_do_not_shadow_response_metadata(error_shape): + _assert_not_shadowed('ResponseMetadata', error_shape) + + +@pytest.mark.parametrize("error_shape", _all_service_error_shapes()) +def test_exceptions_do_not_shadow_error(error_shape): + _assert_not_shadowed('Error', error_shape) diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index 843f0c43..c68a3ef0 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -13,9 +13,10 @@ import base64 import re +import pytest + from tests import temporary_file, requires_crt from tests import unittest, mock, BaseSessionTest, create_session, ClientHTTPStubber -from nose.tools import assert_equal import botocore.session from botocore.config import Config @@ -1223,7 +1224,7 @@ class TestS3GetBucketLifecycle(BaseS3OperationTest): class TestS3PutObject(BaseS3OperationTest): def test_500_error_with_non_xml_body(self): - # Note: This exact test case may not be applicable from + # Note: This exact tesdict may not be applicable from # an integration standpoint if the issue is fixed in the future. # # The issue is that: @@ -1701,51 +1702,51 @@ class TestGeneratePresigned(BaseS3OperationTest): 'get_object', {'Bucket': 'mybucket', 'Key': 'mykey'}) self.assert_is_v2_presigned_url(url) -def test_checksums_included_in_expected_operations(): - """Validate expected calls include Content-MD5 header""" - t = S3ChecksumCases(_verify_checksum_in_headers) - yield t.case('put_bucket_tagging', +def _checksum_test_cases(): + yield ('put_bucket_tagging', {"Bucket": "foo", "Tagging":{"TagSet":[]}}) - yield t.case('put_bucket_lifecycle', + yield ('put_bucket_lifecycle', {"Bucket": "foo", "LifecycleConfiguration":{"Rules":[]}}) - yield t.case('put_bucket_lifecycle_configuration', + yield ('put_bucket_lifecycle_configuration', {"Bucket": "foo", "LifecycleConfiguration":{"Rules":[]}}) - yield t.case('put_bucket_cors', + yield ('put_bucket_cors', {"Bucket": "foo", "CORSConfiguration":{"CORSRules": []}}) - yield t.case('delete_objects', + yield ('delete_objects', {"Bucket": "foo", "Delete": {"Objects": [{"Key": "bar"}]}}) - yield t.case('put_bucket_replication', + yield ('put_bucket_replication', {"Bucket": "foo", "ReplicationConfiguration": {"Role":"", "Rules": []}}) - yield t.case('put_bucket_acl', + yield ('put_bucket_acl', {"Bucket": "foo", "AccessControlPolicy":{}}) - yield t.case('put_bucket_logging', + yield ('put_bucket_logging', {"Bucket": "foo", "BucketLoggingStatus":{}}) - yield t.case('put_bucket_notification', + yield ('put_bucket_notification', {"Bucket": "foo", "NotificationConfiguration":{}}) - yield t.case('put_bucket_policy', + yield ('put_bucket_policy', {"Bucket": "foo", "Policy": ""}) - yield t.case('put_bucket_request_payment', + yield ('put_bucket_request_payment', {"Bucket": "foo", "RequestPaymentConfiguration":{"Payer": ""}}) - yield t.case('put_bucket_versioning', + yield ('put_bucket_versioning', {"Bucket": "foo", "VersioningConfiguration":{}}) - yield t.case('put_bucket_website', + yield ('put_bucket_website', {"Bucket": "foo", "WebsiteConfiguration":{}}) - yield t.case('put_object_acl', + yield ('put_object_acl', {"Bucket": "foo", "Key": "bar", "AccessControlPolicy":{}}) - yield t.case('put_object_legal_hold', + yield ('put_object_legal_hold', {"Bucket": "foo", "Key": "bar", "LegalHold":{"Status": "ON"}}) - yield t.case('put_object_retention', + yield ('put_object_retention', {"Bucket": "foo", "Key": "bar", "Retention":{"RetainUntilDate":"2020-11-05"}}) - yield t.case('put_object_lock_configuration', + yield ('put_object_lock_configuration', {"Bucket": "foo", "ObjectLockConfiguration":{}}) -def _verify_checksum_in_headers(operation, operation_kwargs): +@pytest.mark.parametrize("operation, operation_kwargs", _checksum_test_cases()) +def test_checksums_included_in_expected_operations(operation, operation_kwargs): + """Validate expected calls include Content-MD5 header""" environ = {} with mock.patch('os.environ', environ): environ['AWS_ACCESS_KEY_ID'] = 'access_key' @@ -1761,42 +1762,38 @@ def _verify_checksum_in_headers(operation, operation_kwargs): assert 'Content-MD5' in stub.requests[-1].headers -def test_correct_url_used_for_s3(): - # Test that given various sets of config options and bucket names, - # we construct the expect endpoint url. - t = S3AddressingCases(_verify_expected_endpoint_url) - +def _s3_addressing_test_cases(): # The default behavior for sigv2. DNS compatible buckets - yield t.case(region='us-west-2', bucket='bucket', key='key', + yield dict(region='us-west-2', bucket='bucket', key='key', signature_version='s3', expected_url='https://bucket.s3.us-west-2.amazonaws.com/key') - yield t.case(region='us-east-1', bucket='bucket', key='key', + yield dict(region='us-east-1', bucket='bucket', key='key', signature_version='s3', expected_url='https://bucket.s3.amazonaws.com/key') - yield t.case(region='us-west-1', bucket='bucket', key='key', + yield dict(region='us-west-1', bucket='bucket', key='key', signature_version='s3', expected_url='https://bucket.s3.us-west-1.amazonaws.com/key') - yield t.case(region='us-west-1', bucket='bucket', key='key', + yield dict(region='us-west-1', bucket='bucket', key='key', signature_version='s3', is_secure=False, expected_url='http://bucket.s3.us-west-1.amazonaws.com/key') # Virtual host addressing is independent of signature version. - yield t.case(region='us-west-2', bucket='bucket', key='key', + yield dict(region='us-west-2', bucket='bucket', key='key', signature_version='s3v4', expected_url=( 'https://bucket.s3.us-west-2.amazonaws.com/key')) - yield t.case(region='us-east-1', bucket='bucket', key='key', + yield dict(region='us-east-1', bucket='bucket', key='key', signature_version='s3v4', expected_url='https://bucket.s3.amazonaws.com/key') - yield t.case(region='us-west-1', bucket='bucket', key='key', + yield dict(region='us-west-1', bucket='bucket', key='key', signature_version='s3v4', expected_url=( 'https://bucket.s3.us-west-1.amazonaws.com/key')) - yield t.case(region='us-west-1', bucket='bucket', key='key', + yield dict(region='us-west-1', bucket='bucket', key='key', signature_version='s3v4', is_secure=False, expected_url=( 'http://bucket.s3.us-west-1.amazonaws.com/key')) - yield t.case( + yield dict( region='us-west-1', bucket='bucket-with-num-1', key='key', signature_version='s3v4', is_secure=False, expected_url='http://bucket-with-num-1.s3.us-west-1.amazonaws.com/key') @@ -1804,121 +1801,121 @@ def test_correct_url_used_for_s3(): # Regions outside of the 'aws' partition. # These should still default to virtual hosted addressing # unless explicitly configured otherwise. - yield t.case(region='cn-north-1', bucket='bucket', key='key', + yield dict(region='cn-north-1', bucket='bucket', key='key', signature_version='s3v4', expected_url=( 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key')) # This isn't actually supported because cn-north-1 is sigv4 only, # but we'll still double check that our internal logic is correct # when building the expected url. - yield t.case(region='cn-north-1', bucket='bucket', key='key', + yield dict(region='cn-north-1', bucket='bucket', key='key', signature_version='s3', expected_url=( 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key')) # If the request is unsigned, we should have the default # fix_s3_host behavior which is to use virtual hosting where # possible but fall back to path style when needed. - yield t.case(region='cn-north-1', bucket='bucket', key='key', + yield dict(region='cn-north-1', bucket='bucket', key='key', signature_version=UNSIGNED, expected_url=( 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key')) - yield t.case(region='cn-north-1', bucket='bucket.dot', key='key', + yield dict(region='cn-north-1', bucket='bucket.dot', key='key', signature_version=UNSIGNED, expected_url=( 'https://s3.cn-north-1.amazonaws.com.cn/bucket.dot/key')) # And of course you can explicitly specify which style to use. virtual_hosting = {'addressing_style': 'virtual'} - yield t.case(region='cn-north-1', bucket='bucket', key='key', + yield dict(region='cn-north-1', bucket='bucket', key='key', signature_version=UNSIGNED, s3_config=virtual_hosting, expected_url=( 'https://bucket.s3.cn-north-1.amazonaws.com.cn/key')) path_style = {'addressing_style': 'path'} - yield t.case(region='cn-north-1', bucket='bucket', key='key', + yield dict(region='cn-north-1', bucket='bucket', key='key', signature_version=UNSIGNED, s3_config=path_style, expected_url=( 'https://s3.cn-north-1.amazonaws.com.cn/bucket/key')) # If you don't have a DNS compatible bucket, we use path style. - yield t.case( + yield dict( region='us-west-2', bucket='bucket.dot', key='key', expected_url='https://s3.us-west-2.amazonaws.com/bucket.dot/key') - yield t.case( + yield dict( region='us-east-1', bucket='bucket.dot', key='key', expected_url='https://s3.amazonaws.com/bucket.dot/key') - yield t.case( + yield dict( region='us-east-1', bucket='BucketName', key='key', expected_url='https://s3.amazonaws.com/BucketName/key') - yield t.case( + yield dict( region='us-west-1', bucket='bucket_name', key='key', expected_url='https://s3.us-west-1.amazonaws.com/bucket_name/key') - yield t.case( + yield dict( region='us-west-1', bucket='-bucket-name', key='key', expected_url='https://s3.us-west-1.amazonaws.com/-bucket-name/key') - yield t.case( + yield dict( region='us-west-1', bucket='bucket-name-', key='key', expected_url='https://s3.us-west-1.amazonaws.com/bucket-name-/key') - yield t.case( + yield dict( region='us-west-1', bucket='aa', key='key', expected_url='https://s3.us-west-1.amazonaws.com/aa/key') - yield t.case( + yield dict( region='us-west-1', bucket='a'*64, key='key', expected_url=('https://s3.us-west-1.amazonaws.com/%s/key' % ('a' * 64)) ) # Custom endpoint url should always be used. - yield t.case( + yield dict( customer_provided_endpoint='https://my-custom-s3/', bucket='foo', key='bar', expected_url='https://my-custom-s3/foo/bar') - yield t.case( + yield dict( customer_provided_endpoint='https://my-custom-s3/', bucket='bucket.dots', key='bar', expected_url='https://my-custom-s3/bucket.dots/bar') # Doesn't matter what region you specify, a custom endpoint url always # wins. - yield t.case( + yield dict( customer_provided_endpoint='https://my-custom-s3/', region='us-west-2', bucket='foo', key='bar', expected_url='https://my-custom-s3/foo/bar') # Explicitly configuring "virtual" addressing_style. virtual_hosting = {'addressing_style': 'virtual'} - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=virtual_hosting, expected_url='https://bucket.s3.amazonaws.com/key') - yield t.case( + yield dict( region='us-west-2', bucket='bucket', key='key', s3_config=virtual_hosting, expected_url='https://bucket.s3.us-west-2.amazonaws.com/key') - yield t.case( + yield dict( region='eu-central-1', bucket='bucket', key='key', s3_config=virtual_hosting, expected_url='https://bucket.s3.eu-central-1.amazonaws.com/key') - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=virtual_hosting, customer_provided_endpoint='https://foo.amazonaws.com', expected_url='https://bucket.foo.amazonaws.com/key') - yield t.case( + yield dict( region='unknown', bucket='bucket', key='key', s3_config=virtual_hosting, expected_url='https://bucket.s3.unknown.amazonaws.com/key') # Test us-gov with virtual addressing. - yield t.case( + yield dict( region='us-gov-west-1', bucket='bucket', key='key', s3_config=virtual_hosting, expected_url='https://bucket.s3.us-gov-west-1.amazonaws.com/key') - yield t.case( + yield dict( region='us-gov-west-1', bucket='bucket', key='key', signature_version='s3', expected_url='https://bucket.s3.us-gov-west-1.amazonaws.com/key') - yield t.case( + yield dict( region='fips-us-gov-west-1', bucket='bucket', key='key', signature_version='s3', expected_url='https://bucket.s3-fips.us-gov-west-1.amazonaws.com/key') @@ -1926,67 +1923,67 @@ def test_correct_url_used_for_s3(): # Test path style addressing. path_style = {'addressing_style': 'path'} - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=path_style, expected_url='https://s3.amazonaws.com/bucket/key') - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=path_style, customer_provided_endpoint='https://foo.amazonaws.com/', expected_url='https://foo.amazonaws.com/bucket/key') - yield t.case( + yield dict( region='unknown', bucket='bucket', key='key', s3_config=path_style, expected_url='https://s3.unknown.amazonaws.com/bucket/key') # S3 accelerate use_accelerate = {'use_accelerate_endpoint': True} - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=use_accelerate, expected_url='https://bucket.s3-accelerate.amazonaws.com/key') - yield t.case( + yield dict( # region is ignored with S3 accelerate. region='us-west-2', bucket='bucket', key='key', s3_config=use_accelerate, expected_url='https://bucket.s3-accelerate.amazonaws.com/key') # Provided endpoints still get recognized as accelerate endpoints. - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', customer_provided_endpoint='https://s3-accelerate.amazonaws.com', expected_url='https://bucket.s3-accelerate.amazonaws.com/key') - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', customer_provided_endpoint='http://s3-accelerate.amazonaws.com', expected_url='http://bucket.s3-accelerate.amazonaws.com/key') - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=use_accelerate, is_secure=False, # Note we're using http:// because is_secure=False. expected_url='http://bucket.s3-accelerate.amazonaws.com/key') - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', # s3-accelerate must be the first part of the url. customer_provided_endpoint='https://foo.s3-accelerate.amazonaws.com', expected_url='https://foo.s3-accelerate.amazonaws.com/bucket/key') - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', # The endpoint must be an Amazon endpoint. customer_provided_endpoint='https://s3-accelerate.notamazon.com', expected_url='https://s3-accelerate.notamazon.com/bucket/key') - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', # Extra components must be whitelisted. customer_provided_endpoint='https://s3-accelerate.foo.amazonaws.com', expected_url='https://s3-accelerate.foo.amazonaws.com/bucket/key') - yield t.case( + yield dict( region='unknown', bucket='bucket', key='key', s3_config=use_accelerate, expected_url='https://bucket.s3-accelerate.amazonaws.com/key') # Use virtual even if path is specified for s3 accelerate because # path style will not work with S3 accelerate. - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config={'use_accelerate_endpoint': True, 'addressing_style': 'path'}, @@ -1994,17 +1991,17 @@ def test_correct_url_used_for_s3(): # S3 dual stack endpoints. use_dualstack = {'use_dualstack_endpoint': True} - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=use_dualstack, signature_version='s3', # Still default to virtual hosted when possible on sigv2. expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key') - yield t.case( + yield dict( region=None, bucket='bucket', key='key', s3_config=use_dualstack, # Uses us-east-1 for no region set. expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key') - yield t.case( + yield dict( region='aws-global', bucket='bucket', key='key', s3_config=use_dualstack, # Pseudo-regions should not have any special resolving logic even when @@ -2013,32 +2010,32 @@ def test_correct_url_used_for_s3(): # region name. expected_url=( 'https://bucket.s3.dualstack.aws-global.amazonaws.com/key')) - yield t.case( + yield dict( region='us-west-2', bucket='bucket', key='key', s3_config=use_dualstack, signature_version='s3', # Still default to virtual hosted when possible on sigv2. expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=use_dualstack, signature_version='s3v4', expected_url='https://bucket.s3.dualstack.us-east-1.amazonaws.com/key') - yield t.case( + yield dict( region='us-west-2', bucket='bucket', key='key', s3_config=use_dualstack, signature_version='s3v4', expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') - yield t.case( + yield dict( region='unknown', bucket='bucket', key='key', s3_config=use_dualstack, signature_version='s3v4', expected_url='https://bucket.s3.dualstack.unknown.amazonaws.com/key') # Non DNS compatible buckets use path style for dual stack. - yield t.case( + yield dict( region='us-west-2', bucket='bucket.dot', key='key', s3_config=use_dualstack, # Still default to virtual hosted when possible. expected_url=( 'https://s3.dualstack.us-west-2.amazonaws.com/bucket.dot/key')) # Supports is_secure (use_ssl=False in create_client()). - yield t.case( + yield dict( region='us-west-2', bucket='bucket.dot', key='key', is_secure=False, s3_config=use_dualstack, # Still default to virtual hosted when possible. @@ -2051,7 +2048,7 @@ def test_correct_url_used_for_s3(): 'use_dualstack_endpoint': True, 'addressing_style': 'path', } - yield t.case( + yield dict( region='us-west-2', bucket='bucket', key='key', s3_config=force_path_style, # Still default to virtual hosted when possible. @@ -2062,32 +2059,32 @@ def test_correct_url_used_for_s3(): 'use_accelerate_endpoint': True, 'use_dualstack_endpoint': True, } - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=use_accelerate_dualstack, expected_url=( 'https://bucket.s3-accelerate.dualstack.amazonaws.com/key')) - yield t.case( + yield dict( # Region is ignored with S3 accelerate. region='us-west-2', bucket='bucket', key='key', s3_config=use_accelerate_dualstack, expected_url=( 'https://bucket.s3-accelerate.dualstack.amazonaws.com/key')) # Only s3-accelerate overrides a customer endpoint. - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=use_dualstack, customer_provided_endpoint='https://s3-accelerate.amazonaws.com', expected_url=( 'https://bucket.s3-accelerate.amazonaws.com/key')) - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', # Dualstack is whitelisted. customer_provided_endpoint=( 'https://s3-accelerate.dualstack.amazonaws.com'), expected_url=( 'https://bucket.s3-accelerate.dualstack.amazonaws.com/key')) - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', # Even whitelisted parts cannot be duplicated. customer_provided_endpoint=( @@ -2095,7 +2092,7 @@ def test_correct_url_used_for_s3(): expected_url=( 'https://s3-accelerate.dualstack.dualstack' '.amazonaws.com/bucket/key')) - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', # More than two extra parts is not allowed. customer_provided_endpoint=( @@ -2104,12 +2101,12 @@ def test_correct_url_used_for_s3(): expected_url=( 'https://s3-accelerate.dualstack.dualstack.dualstack.amazonaws.com' '/bucket/key')) - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', # Extra components must be whitelisted. customer_provided_endpoint='https://s3-accelerate.foo.amazonaws.com', expected_url='https://s3-accelerate.foo.amazonaws.com/bucket/key') - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=use_accelerate_dualstack, is_secure=False, # Note we're using http:// because is_secure=False. @@ -2118,7 +2115,7 @@ def test_correct_url_used_for_s3(): # Use virtual even if path is specified for s3 accelerate because # path style will not work with S3 accelerate. use_accelerate_dualstack['addressing_style'] = 'path' - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=use_accelerate_dualstack, expected_url=( @@ -2128,14 +2125,14 @@ def test_correct_url_used_for_s3(): accesspoint_arn = ( 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' ) - yield t.case( + yield dict( region='us-west-2', bucket=accesspoint_arn, key='key', expected_url=( 'https://myendpoint-123456789012.s3-accesspoint.' 'us-west-2.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='us-west-2', bucket=accesspoint_arn, key='key', s3_config={'use_arn_region': True}, expected_url=( @@ -2143,30 +2140,30 @@ def test_correct_url_used_for_s3(): 'us-west-2.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='us-west-2', bucket=accesspoint_arn, key='myendpoint/key', expected_url=( 'https://myendpoint-123456789012.s3-accesspoint.' 'us-west-2.amazonaws.com/myendpoint/key' ) ) - yield t.case( + yield dict( region='us-west-2', bucket=accesspoint_arn, key='foo/myendpoint/key', expected_url=( 'https://myendpoint-123456789012.s3-accesspoint.' 'us-west-2.amazonaws.com/foo/myendpoint/key' ) ) - yield t.case( + yield dict( # Note: The access-point arn has us-west-2 and the client's region is - # us-east-1, for the default case the access-point arn region is used. + # us-east-1, for the defauldict the access-point arn region is used. region='us-east-1', bucket=accesspoint_arn, key='key', expected_url=( 'https://myendpoint-123456789012.s3-accesspoint.' 'us-west-2.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='us-east-1', bucket=accesspoint_arn, key='key', s3_config={'use_arn_region': False}, expected_url=( @@ -2174,7 +2171,7 @@ def test_correct_url_used_for_s3(): 'us-east-1.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='s3-external-1', bucket=accesspoint_arn, key='key', s3_config={'use_arn_region': True}, expected_url=( @@ -2183,7 +2180,7 @@ def test_correct_url_used_for_s3(): ) ) - yield t.case( + yield dict( region='aws-global', bucket=accesspoint_arn, key='key', s3_config={'use_arn_region': True}, expected_url=( @@ -2191,7 +2188,7 @@ def test_correct_url_used_for_s3(): 'us-west-2.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='unknown', bucket=accesspoint_arn, key='key', s3_config={'use_arn_region': False}, expected_url=( @@ -2199,7 +2196,7 @@ def test_correct_url_used_for_s3(): 'unknown.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='unknown', bucket=accesspoint_arn, key='key', s3_config={'use_arn_region': True}, expected_url=( @@ -2210,21 +2207,21 @@ def test_correct_url_used_for_s3(): accesspoint_arn_cn = ( 'arn:aws-cn:s3:cn-north-1:123456789012:accesspoint:myendpoint' ) - yield t.case( + yield dict( region='cn-north-1', bucket=accesspoint_arn_cn, key='key', expected_url=( 'https://myendpoint-123456789012.s3-accesspoint.' 'cn-north-1.amazonaws.com.cn/key' ) ) - yield t.case( + yield dict( region='cn-northwest-1', bucket=accesspoint_arn_cn, key='key', expected_url=( 'https://myendpoint-123456789012.s3-accesspoint.' 'cn-north-1.amazonaws.com.cn/key' ) ) - yield t.case( + yield dict( region='cn-northwest-1', bucket=accesspoint_arn_cn, key='key', s3_config={'use_arn_region': False}, expected_url=( @@ -2235,21 +2232,21 @@ def test_correct_url_used_for_s3(): accesspoint_arn_gov = ( 'arn:aws-us-gov:s3:us-gov-west-1:123456789012:accesspoint:myendpoint' ) - yield t.case( + yield dict( region='us-gov-west-1', bucket=accesspoint_arn_gov, key='key', expected_url=( 'https://myendpoint-123456789012.s3-accesspoint.' 'us-gov-west-1.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='fips-us-gov-west-1', bucket=accesspoint_arn_gov, key='key', expected_url=( 'https://myendpoint-123456789012.s3-accesspoint-fips.' 'us-gov-west-1.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='fips-us-gov-west-1', bucket=accesspoint_arn_gov, key='key', s3_config={'use_arn_region': False}, expected_url=( @@ -2258,7 +2255,7 @@ def test_correct_url_used_for_s3(): ) ) - yield t.case( + yield dict( region='us-west-2', bucket=accesspoint_arn, key='key', is_secure=False, expected_url=( 'http://myendpoint-123456789012.s3-accesspoint.' @@ -2266,9 +2263,9 @@ def test_correct_url_used_for_s3(): ) ) # Dual-stack with access-point arn - yield t.case( + yield dict( # Note: The access-point arn has us-west-2 and the client's region is - # us-east-1, for the default case the access-point arn region is used. + # us-east-1, for the defauldict the access-point arn region is used. region='us-east-1', bucket=accesspoint_arn, key='key', s3_config={ 'use_dualstack_endpoint': True, @@ -2278,7 +2275,7 @@ def test_correct_url_used_for_s3(): 'us-west-2.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='us-east-1', bucket=accesspoint_arn, key='key', s3_config={ 'use_dualstack_endpoint': True, @@ -2289,7 +2286,7 @@ def test_correct_url_used_for_s3(): 'us-east-1.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='us-gov-west-1', bucket=accesspoint_arn_gov, key='key', s3_config={ 'use_dualstack_endpoint': True, @@ -2299,7 +2296,7 @@ def test_correct_url_used_for_s3(): 'us-gov-west-1.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='fips-us-gov-west-1', bucket=accesspoint_arn_gov, key='key', s3_config={ 'use_arn_region': True, @@ -2312,7 +2309,7 @@ def test_correct_url_used_for_s3(): ) # None of the various s3 settings related to paths should affect what # endpoint to use when an access-point is provided. - yield t.case( + yield dict( region='us-west-2', bucket=accesspoint_arn, key='key', s3_config={'adressing_style': 'auto'}, expected_url=( @@ -2320,7 +2317,7 @@ def test_correct_url_used_for_s3(): 'us-west-2.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='us-west-2', bucket=accesspoint_arn, key='key', s3_config={'adressing_style': 'virtual'}, expected_url=( @@ -2328,7 +2325,7 @@ def test_correct_url_used_for_s3(): 'us-west-2.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='us-west-2', bucket=accesspoint_arn, key='key', s3_config={'adressing_style': 'path'}, expected_url=( @@ -2337,31 +2334,31 @@ def test_correct_url_used_for_s3(): ) ) - # Use us-east-1 regional endpoint cases: regional + # Use us-east-1 regional endpoindicts: regional us_east_1_regional_endpoint = { 'us_east_1_regional_endpoint': 'regional' } - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=us_east_1_regional_endpoint, expected_url=( 'https://bucket.s3.us-east-1.amazonaws.com/key')) - yield t.case( + yield dict( region='us-west-2', bucket='bucket', key='key', s3_config=us_east_1_regional_endpoint, expected_url=( 'https://bucket.s3.us-west-2.amazonaws.com/key')) - yield t.case( + yield dict( region=None, bucket='bucket', key='key', s3_config=us_east_1_regional_endpoint, expected_url=( 'https://bucket.s3.amazonaws.com/key')) - yield t.case( + yield dict( region='unknown', bucket='bucket', key='key', s3_config=us_east_1_regional_endpoint, expected_url=( 'https://bucket.s3.unknown.amazonaws.com/key')) - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config={ 'us_east_1_regional_endpoint': 'regional', @@ -2369,7 +2366,7 @@ def test_correct_url_used_for_s3(): }, expected_url=( 'https://bucket.s3.dualstack.us-east-1.amazonaws.com/key')) - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config={ 'us_east_1_regional_endpoint': 'regional', @@ -2377,7 +2374,7 @@ def test_correct_url_used_for_s3(): }, expected_url=( 'https://bucket.s3-accelerate.amazonaws.com/key')) - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config={ 'us_east_1_regional_endpoint': 'regional', @@ -2387,23 +2384,23 @@ def test_correct_url_used_for_s3(): expected_url=( 'https://bucket.s3-accelerate.dualstack.amazonaws.com/key')) - # Use us-east-1 regional endpoint cases: legacy + # Use us-east-1 regional endpoindicts: legacy us_east_1_regional_endpoint_legacy = { 'us_east_1_regional_endpoint': 'legacy' } - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=us_east_1_regional_endpoint_legacy, expected_url=( 'https://bucket.s3.amazonaws.com/key')) - yield t.case( + yield dict( region=None, bucket='bucket', key='key', s3_config=us_east_1_regional_endpoint_legacy, expected_url=( 'https://bucket.s3.amazonaws.com/key')) - yield t.case( + yield dict( region='unknown', bucket='bucket', key='key', s3_config=us_east_1_regional_endpoint_legacy, expected_url=( @@ -2413,7 +2410,7 @@ def test_correct_url_used_for_s3(): 'arn:aws-us-gov:s3-object-lambda:us-gov-west-1:' '123456789012:accesspoint:mybanner' ) - yield t.case( + yield dict( region='fips-us-gov-west-1', bucket=s3_object_lambda_arn_gov, key='key', expected_url=( 'https://mybanner-123456789012.s3-object-lambda-fips.' @@ -2424,7 +2421,7 @@ def test_correct_url_used_for_s3(): 'arn:aws:s3-object-lambda:us-east-1:' '123456789012:accesspoint:mybanner' ) - yield t.case( + yield dict( region='aws-global', bucket=s3_object_lambda_arn, key='key', s3_config={'use_arn_region': True}, expected_url=( @@ -2434,33 +2431,17 @@ def test_correct_url_used_for_s3(): ) -class BaseTestCase: - def __init__(self, verify_function): - self._verify = verify_function - - def case(self, **kwargs): - return self._verify, kwargs +@pytest.mark.parametrize("test_case", _s3_addressing_test_cases()) +def test_correct_url_used_for_s3(test_case): + # Test that given various sets of config options and bucket names, + # we construct the expect endpoint url. + _verify_expected_endpoint_url(**test_case) -class S3AddressingCases(BaseTestCase): - def case(self, region=None, bucket='bucket', key='key', - s3_config=None, is_secure=True, customer_provided_endpoint=None, - expected_url=None, signature_version=None): - return ( - self._verify, region, bucket, key, s3_config, is_secure, - customer_provided_endpoint, expected_url, signature_version - ) - - -class S3ChecksumCases(BaseTestCase): - def case(self, operation, operation_args): - return self._verify, operation, operation_args - - -def _verify_expected_endpoint_url(region, bucket, key, s3_config, - is_secure=True, - customer_provided_endpoint=None, - expected_url=None, signature_version=None): +def _verify_expected_endpoint_url( + region=None, bucket='bucket', key='key', s3_config=None, is_secure=True, + customer_provided_endpoint=None, expected_url=None, signature_version=None +): environ = {} with mock.patch('os.environ', environ): environ['AWS_ACCESS_KEY_ID'] = 'access_key' @@ -2479,7 +2460,7 @@ def _verify_expected_endpoint_url(region, bucket, key, s3_config, with ClientHTTPStubber(s3) as http_stubber: http_stubber.add_response() s3.put_object(Bucket=bucket, Key=key, Body=b'bar') - assert_equal(http_stubber.requests[0].url, expected_url) + assert http_stubber.requests[0].url == expected_url def _create_s3_client(region, is_secure, endpoint_url, s3_config, @@ -2502,90 +2483,85 @@ def _create_s3_client(region, is_secure, endpoint_url, s3_config, return s3 -def test_addressing_for_presigned_urls(): - # See TestGeneratePresigned for more detailed test cases - # on presigned URLs. Here's we're just focusing on the - # adddressing mode used for presigned URLs. - # We special case presigned URLs due to backwards - # compatibility. - t = S3AddressingCases(_verify_presigned_url_addressing) + +def _addressing_for_presigned_url_test_cases(): # us-east-1, or the "global" endpoint. A signature version of # None means the user doesn't have signature version configured. - yield t.case(region='us-east-1', bucket='bucket', key='key', + yield dict(region='us-east-1', bucket='bucket', key='key', signature_version=None, expected_url='https://bucket.s3.amazonaws.com/key') - yield t.case(region='us-east-1', bucket='bucket', key='key', + yield dict(region='us-east-1', bucket='bucket', key='key', signature_version='s3', expected_url='https://bucket.s3.amazonaws.com/key') - yield t.case(region='us-east-1', bucket='bucket', key='key', + yield dict(region='us-east-1', bucket='bucket', key='key', signature_version='s3v4', expected_url='https://bucket.s3.amazonaws.com/key') - yield t.case(region='us-east-1', bucket='bucket', key='key', + yield dict(region='us-east-1', bucket='bucket', key='key', signature_version='s3v4', s3_config={'addressing_style': 'path'}, expected_url='https://s3.amazonaws.com/bucket/key') # A region that supports both 's3' and 's3v4'. - yield t.case(region='us-west-2', bucket='bucket', key='key', + yield dict(region='us-west-2', bucket='bucket', key='key', signature_version=None, expected_url='https://bucket.s3.amazonaws.com/key') - yield t.case(region='us-west-2', bucket='bucket', key='key', + yield dict(region='us-west-2', bucket='bucket', key='key', signature_version='s3', expected_url='https://bucket.s3.amazonaws.com/key') - yield t.case(region='us-west-2', bucket='bucket', key='key', + yield dict(region='us-west-2', bucket='bucket', key='key', signature_version='s3v4', expected_url='https://bucket.s3.amazonaws.com/key') - yield t.case(region='us-west-2', bucket='bucket', key='key', + yield dict(region='us-west-2', bucket='bucket', key='key', signature_version='s3v4', s3_config={'addressing_style': 'path'}, expected_url='https://s3.us-west-2.amazonaws.com/bucket/key') # An 's3v4' only region. - yield t.case(region='us-east-2', bucket='bucket', key='key', + yield dict(region='us-east-2', bucket='bucket', key='key', signature_version=None, expected_url='https://bucket.s3.amazonaws.com/key') - yield t.case(region='us-east-2', bucket='bucket', key='key', + yield dict(region='us-east-2', bucket='bucket', key='key', signature_version='s3', expected_url='https://bucket.s3.amazonaws.com/key') - yield t.case(region='us-east-2', bucket='bucket', key='key', + yield dict(region='us-east-2', bucket='bucket', key='key', signature_version='s3v4', expected_url='https://bucket.s3.amazonaws.com/key') - yield t.case(region='us-east-2', bucket='bucket', key='key', + yield dict(region='us-east-2', bucket='bucket', key='key', signature_version='s3v4', s3_config={'addressing_style': 'path'}, expected_url='https://s3.us-east-2.amazonaws.com/bucket/key') # Dualstack endpoints - yield t.case( + yield dict( region='us-west-2', bucket='bucket', key='key', signature_version=None, s3_config={'use_dualstack_endpoint': True}, expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') - yield t.case( + yield dict( region='us-west-2', bucket='bucket', key='key', signature_version='s3', s3_config={'use_dualstack_endpoint': True}, expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') - yield t.case( + yield dict( region='us-west-2', bucket='bucket', key='key', signature_version='s3v4', s3_config={'use_dualstack_endpoint': True}, expected_url='https://bucket.s3.dualstack.us-west-2.amazonaws.com/key') # Accelerate - yield t.case(region='us-west-2', bucket='bucket', key='key', + yield dict(region='us-west-2', bucket='bucket', key='key', signature_version=None, s3_config={'use_accelerate_endpoint': True}, expected_url='https://bucket.s3-accelerate.amazonaws.com/key') # A region that we don't know about. - yield t.case(region='us-west-50', bucket='bucket', key='key', + yield dict(region='us-west-50', bucket='bucket', key='key', signature_version=None, expected_url='https://bucket.s3.amazonaws.com/key') # Customer provided URL results in us leaving the host untouched. - yield t.case(region='us-west-2', bucket='bucket', key='key', + yield dict(region='us-west-2', bucket='bucket', key='key', signature_version=None, customer_provided_endpoint='https://foo.com/', expected_url='https://foo.com/bucket/key') @@ -2594,14 +2570,14 @@ def test_addressing_for_presigned_urls(): accesspoint_arn = ( 'arn:aws:s3:us-west-2:123456789012:accesspoint:myendpoint' ) - yield t.case( + yield dict( region='us-west-2', bucket=accesspoint_arn, key='key', expected_url=( 'https://myendpoint-123456789012.s3-accesspoint.' 'us-west-2.amazonaws.com/key' ) ) - yield t.case( + yield dict( region='us-east-1', bucket=accesspoint_arn, key='key', s3_config={'use_arn_region': False}, expected_url=( @@ -2614,23 +2590,29 @@ def test_addressing_for_presigned_urls(): us_east_1_regional_endpoint = { 'us_east_1_regional_endpoint': 'regional' } - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=us_east_1_regional_endpoint, signature_version='s3', expected_url=( 'https://bucket.s3.us-east-1.amazonaws.com/key')) - yield t.case( + yield dict( region='us-east-1', bucket='bucket', key='key', s3_config=us_east_1_regional_endpoint, signature_version='s3v4', expected_url=( 'https://bucket.s3.us-east-1.amazonaws.com/key')) -def _verify_presigned_url_addressing(region, bucket, key, s3_config, - is_secure=True, - customer_provided_endpoint=None, - expected_url=None, - signature_version=None): +@pytest.mark.parametrize("test_case", _addressing_for_presigned_url_test_cases()) +def test_addressing_for_presigned_urls(test_case): + # Here's we're just focusing on the addressing mode used for presigned URLs. + # We special case presigned URLs due to backward compatibility. + _verify_presigned_url_addressing(**test_case) + + +def _verify_presigned_url_addressing( + region=None, bucket='bucket', key='key', s3_config=None, is_secure=True, + customer_provided_endpoint=None, expected_url=None, signature_version=None +): s3 = _create_s3_client(region=region, is_secure=is_secure, endpoint_url=customer_provided_endpoint, s3_config=s3_config, @@ -2641,7 +2623,7 @@ def _verify_presigned_url_addressing(region, bucket, key, s3_config, # those are tested elsewhere. We just care about the hostname/path. parts = urlsplit(url) actual = '%s://%s%s' % parts[:3] - assert_equal(actual, expected_url) + assert actual == expected_url class TestS3XMLPayloadEscape(BaseS3OperationTest): diff --git a/tests/functional/test_s3_control_redirects.py b/tests/functional/test_s3_control_redirects.py index b6e53204..13e3175f 100644 --- a/tests/functional/test_s3_control_redirects.py +++ b/tests/functional/test_s3_control_redirects.py @@ -12,7 +12,9 @@ # language governing permissions and limitations under the License. import re from contextlib import contextmanager -from nose.tools import assert_equal, assert_true, assert_raises + +import pytest + from tests import unittest, mock, BaseSessionTest, ClientHTTPStubber from botocore import exceptions @@ -283,22 +285,22 @@ def _assert_signing_name(stubber, expected_name): request = stubber.requests[0] auth_header = request.headers['Authorization'].decode('utf-8') actual_name = V4_AUTH_REGEX.match(auth_header).group('name') - assert_equal(expected_name, actual_name) + assert expected_name == actual_name def _assert_netloc(stubber, expected_netloc): request = stubber.requests[0] url_parts = urlsplit(request.url) - assert_equal(expected_netloc, url_parts.netloc) + assert expected_netloc == url_parts.netloc def _assert_header(stubber, key, value): request = stubber.requests[0] - assert_true(key in request.headers) + assert key in request.headers actual_value = request.headers[key] if isinstance(actual_value, bytes): actual_value = actual_value.decode('utf-8') - assert_equal(value, actual_value) + assert value == actual_value def _assert_headers(stubber, headers): @@ -325,26 +327,18 @@ def _bootstrap_test_case_client(session, test_case): return _bootstrap_client(session, region, config=config) -def test_accesspoint_arn_redirection(): +@pytest.mark.parametrize("test_case", ACCESSPOINT_ARN_TEST_CASES) +def test_accesspoint_arn_redirection(test_case): session = _bootstrap_session() - for test_case in ACCESSPOINT_ARN_TEST_CASES: - client, stubber = _bootstrap_test_case_client(session, test_case) - yield _test_accesspoint_arn, test_case, client, stubber - - -def _test_accesspoint_arn(test_case, client, stubber): + client, stubber = _bootstrap_test_case_client(session, test_case) with _assert_test_case(test_case, client, stubber): client.get_access_point_policy(Name=test_case['arn']) -def test_bucket_arn_redirection(): +@pytest.mark.parametrize("test_case", BUCKET_ARN_TEST_CASES) +def test_bucket_arn_redirection(test_case): session = _bootstrap_session() - for test_case in BUCKET_ARN_TEST_CASES: - client, stubber = _bootstrap_test_case_client(session, test_case) - yield _test_bucket_arn, test_case, client, stubber - - -def _test_bucket_arn(test_case, client, stubber): + client, stubber = _bootstrap_test_case_client(session, test_case) with _assert_test_case(test_case, client, stubber): client.get_bucket(Bucket=test_case['arn']) @@ -372,7 +366,7 @@ def _assert_test_case(test_case, client, stubber): ) % (exception_cls, type(exception_raised)) assert isinstance(exception_raised, exception_cls), error_msg else: - assert_equal(len(stubber.requests), 1) + assert len(stubber.requests) == 1 if 'signing_name' in assertions: _assert_signing_name(stubber, assertions['signing_name']) if 'headers' in assertions: diff --git a/tests/functional/test_service_alias.py b/tests/functional/test_service_alias.py index d82cfbcf..9f0d3dfa 100644 --- a/tests/functional/test_service_alias.py +++ b/tests/functional/test_service_alias.py @@ -10,24 +10,31 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import pytest + import botocore.session from botocore.handlers import SERVICE_NAME_ALIASES -def test_can_use_service_alias(): +CLIENT_KWARGS = { + "region_name": "us-east-1", + "aws_access_key_id": "foo", + "aws_secret_access_key": "bar", +} + + +def _service_alias_test_cases(): session = botocore.session.get_session() for (alias, name) in SERVICE_NAME_ALIASES.items(): - yield _instantiates_the_same_client, session, name, alias + yield session, name, alias -def _instantiates_the_same_client(session, service_name, service_alias): - client_kwargs = { - 'region_name': 'us-east-1', - 'aws_access_key_id': 'foo', - 'aws_secret_access_key': 'bar', - } - original_client = session.create_client(service_name, **client_kwargs) - aliased_client = session.create_client(service_alias, **client_kwargs) +@pytest.mark.parametrize( + "session, service_name, service_alias", _service_alias_test_cases() +) +def test_can_use_service_alias(session, service_name, service_alias): + original_client = session.create_client(service_name, **CLIENT_KWARGS) + aliased_client = session.create_client(service_alias, **CLIENT_KWARGS) original_model_name = original_client.meta.service_model.service_name aliased_model_name = aliased_client.meta.service_model.service_name assert original_model_name == aliased_model_name diff --git a/tests/functional/test_service_names.py b/tests/functional/test_service_names.py index dd831d32..40a5aa3b 100644 --- a/tests/functional/test_service_names.py +++ b/tests/functional/test_service_names.py @@ -12,12 +12,11 @@ # language governing permissions and limitations under the License. import re -from nose.tools import assert_true +import pytest + from botocore.session import get_session -BLACKLIST = [ -] - +BLOCKLIST = [] # Service names are limited here to 50 characters here as that seems like a # reasonable limit in the general case. Services can be added to the @@ -34,31 +33,35 @@ VALID_NAME_EXPLANATION = ( 'characters and dashes. The name must start with a letter and may not end ' 'with a dash' ) +MIN_NAME_LENGTH_EXPLANATION = ( + 'Service name must be greater than or equal to 2 characters in length.' +) +MAX_NAME_LENGTH_EXPLANATION = ( + 'Service name must be less than or equal to 50 characters in length.' +) MIN_SERVICE_NAME_LENGTH = 2 MAX_SERVICE_NAME_LENGTH = 50 -def _assert_name_length(service_name): - if service_name not in BLACKLIST: - service_name_length = len(service_name) - assert_true(service_name_length >= MIN_SERVICE_NAME_LENGTH, - 'Service name must be greater than or equal to 2 ' - 'characters in length.') - assert_true(service_name_length <= MAX_SERVICE_NAME_LENGTH, - 'Service name must be less than or equal to 50 ' - 'characters in length.') - - -def _assert_name_pattern(service_name): - if service_name not in BLACKLIST: - valid = VALID_NAME_REGEX.match(service_name) is not None - assert_true(valid, VALID_NAME_EXPLANATION) - - -def test_service_names_are_valid(): +def _service_names(): session = get_session() loader = session.get_component('data_loader') - service_names = loader.list_available_services('service-2') - for service_name in service_names: - yield _assert_name_length, service_name - yield _assert_name_pattern, service_name + return loader.list_available_services('service-2') + + +@pytest.mark.parametrize("service_name", _service_names()) +def test_service_names_are_valid_length(service_name): + if service_name not in BLOCKLIST: + service_name_length = len(service_name) + is_not_too_short = service_name_length >= MIN_SERVICE_NAME_LENGTH + is_not_too_long = service_name_length <= MAX_SERVICE_NAME_LENGTH + + assert is_not_too_short, MIN_NAME_LENGTH_EXPLANATION + assert is_not_too_long, MAX_NAME_LENGTH_EXPLANATION + + +@pytest.mark.parametrize("service_name", _service_names()) +def test_service_names_are_valid_pattern(service_name): + if service_name not in BLOCKLIST: + valid = VALID_NAME_REGEX.match(service_name) is not None + assert valid, VALID_NAME_EXPLANATION diff --git a/tests/functional/test_six_imports.py b/tests/functional/test_six_imports.py index d3243a12..1151f085 100644 --- a/tests/functional/test_six_imports.py +++ b/tests/functional/test_six_imports.py @@ -2,11 +2,13 @@ import os import botocore import ast +import pytest + ROOTDIR = os.path.dirname(botocore.__file__) -def test_no_bare_six_imports(): +def _all_files(): for rootdir, dirnames, filenames in os.walk(ROOTDIR): if 'vendored' in dirnames: # We don't need to lint our vendored packages. @@ -14,11 +16,11 @@ def test_no_bare_six_imports(): for filename in filenames: if not filename.endswith('.py'): continue - fullname = os.path.join(rootdir, filename) - yield _assert_no_bare_six_imports, fullname + yield os.path.join(rootdir, filename) -def _assert_no_bare_six_imports(filename): +@pytest.mark.parametrize("filename", _all_files()) +def test_no_bare_six_imports(filename): with open(filename) as f: contents = f.read() parsed = ast.parse(contents, filename) diff --git a/tests/functional/test_waiter_config.py b/tests/functional/test_waiter_config.py index 4dfa2f7e..d2b65d9a 100644 --- a/tests/functional/test_waiter_config.py +++ b/tests/functional/test_waiter_config.py @@ -13,6 +13,8 @@ import jmespath from jsonschema import Draft4Validator +import pytest + import botocore.session from botocore.exceptions import UnknownServiceError from botocore.utils import ArgumentGenerator @@ -82,7 +84,7 @@ WAITER_SCHEMA = { } -def test_lint_waiter_configs(): +def _waiter_configs(): session = botocore.session.get_session() validator = Draft4Validator(WAITER_SCHEMA) for service_name in session.get_available_services(): @@ -98,9 +100,14 @@ def test_lint_waiter_configs(): except UnknownServiceError: # The service doesn't have waiters continue - yield _validate_schema, validator, waiter_model - for waiter_name in client.waiter_names: - yield _lint_single_waiter, client, waiter_name, service_model + yield validator, waiter_model, client + + +@pytest.mark.parametrize("validator, waiter_model, client", _waiter_configs()) +def test_lint_waiter_configs(validator, waiter_model, client): + _validate_schema(validator, waiter_model) + for waiter_name in client.waiter_names: + _lint_single_waiter(client, waiter_name, client.meta.service_model) def _lint_single_waiter(client, waiter_name, service_model): diff --git a/tests/integration/test_ec2.py b/tests/integration/test_ec2.py index cc8dcf09..b6fe5826 100644 --- a/tests/integration/test_ec2.py +++ b/tests/integration/test_ec2.py @@ -13,8 +13,6 @@ from tests import unittest import itertools -from nose.plugins.attrib import attr - import botocore.session from botocore.exceptions import ClientError diff --git a/tests/integration/test_emr.py b/tests/integration/test_emr.py index a06b4e35..b9338a76 100644 --- a/tests/integration/test_emr.py +++ b/tests/integration/test_emr.py @@ -10,31 +10,41 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from tests import unittest +import pytest -from nose.tools import assert_true +from tests import unittest import botocore.session from botocore.paginate import PageIterator from botocore.exceptions import OperationNotPageableError -def test_emr_endpoints_work_with_py26(): +@pytest.fixture() +def botocore_session(): + return botocore.session.get_session() + +@pytest.mark.parametrize( + "region", + [ + 'us-east-1', + 'us-west-2', + 'us-west-2', + 'ap-northeast-1', + 'ap-southeast-1', + 'ap-southeast-2', + 'sa-east-1', + 'eu-west-1', + 'eu-central-1' + ] +) +def test_emr_endpoints_work_with_py26(botocore_session, region): # Verify that we can talk to all currently supported EMR endpoints. # Python2.6 has an SSL cert bug where it can't read the SAN of # certain SSL certs. We therefore need to always use the CN # as the hostname. - session = botocore.session.get_session() - for region in ['us-east-1', 'us-west-2', 'us-west-2', 'ap-northeast-1', - 'ap-southeast-1', 'ap-southeast-2', 'sa-east-1', 'eu-west-1', - 'eu-central-1']: - yield _test_can_list_clusters_in_region, session, region - - -def _test_can_list_clusters_in_region(session, region): - client = session.create_client('emr', region_name=region) + client = botocore_session.create_client('emr', region_name=region) response = client.list_clusters() - assert_true('Clusters' in response) + assert 'Clusters' in response # I consider these integration tests because they're diff --git a/tests/integration/test_s3.py b/tests/integration/test_s3.py index 2df03c43..1b61f5cf 100644 --- a/tests/integration/test_s3.py +++ b/tests/integration/test_s3.py @@ -25,7 +25,7 @@ import logging from tarfile import TarFile from contextlib import closing -from nose.plugins.attrib import attr +import pytest import urllib3 from botocore.endpoint import Endpoint @@ -323,7 +323,7 @@ class TestS3Objects(TestS3BaseWithBucket): Bucket=self.bucket_name, Key=key_name) self.assert_status_code(response, 204) - @attr('slow') + @pytest.mark.slow def test_can_paginate(self): for i in range(5): key_name = 'key%s' % i @@ -339,7 +339,7 @@ class TestS3Objects(TestS3BaseWithBucket): for el in responses] self.assertEqual(key_names, ['key0', 'key1', 'key2', 'key3', 'key4']) - @attr('slow') + @pytest.mark.slow def test_can_paginate_with_page_size(self): for i in range(5): key_name = 'key%s' % i @@ -356,7 +356,7 @@ class TestS3Objects(TestS3BaseWithBucket): for el in data] self.assertEqual(key_names, ['key0', 'key1', 'key2', 'key3', 'key4']) - @attr('slow') + @pytest.mark.slow def test_result_key_iters(self): for i in range(5): key_name = 'key/%s/%s' % (i, i) @@ -379,7 +379,7 @@ class TestS3Objects(TestS3BaseWithBucket): self.assertIn('Contents', response) self.assertIn('CommonPrefixes', response) - @attr('slow') + @pytest.mark.slow def test_can_get_and_put_object(self): self.create_object('foobarbaz', body='body contents') time.sleep(3) @@ -929,7 +929,7 @@ class TestS3SigV4Client(BaseS3ClientTest): Key='foo.txt', Body=body) self.assert_status_code(response, 200) - @attr('slow') + @pytest.mark.slow def test_paginate_list_objects_unicode(self): key_names = [ u'non-ascii-key-\xe4\xf6\xfc-01.txt', @@ -952,7 +952,7 @@ class TestS3SigV4Client(BaseS3ClientTest): self.assertEqual(key_names, key_refs) - @attr('slow') + @pytest.mark.slow def test_paginate_list_objects_safe_chars(self): key_names = [ u'-._~safe-chars-key-01.txt', diff --git a/tests/integration/test_smoke.py b/tests/integration/test_smoke.py index 4972b562..43177454 100644 --- a/tests/integration/test_smoke.py +++ b/tests/integration/test_smoke.py @@ -14,7 +14,8 @@ import os from pprint import pformat import warnings import logging -from nose.tools import assert_equal, assert_true + +import pytest from tests import mock, ClientHTTPStubber from botocore import xform_name @@ -245,60 +246,55 @@ def _list_services(dict_entries): return [key for key in dict_entries if key in wanted_services] -def test_can_make_request_with_client(): - # Same as test_can_make_request, but with Client objects - # instead of service/operations. - session = botocore.session.get_session() +@pytest.fixture() +def botocore_session(): + return botocore.session.get_session() + + +def _smoke_tests(): for service_name in _list_services(SMOKE_TESTS): - client = _get_client(session, service_name) for operation_name in SMOKE_TESTS[service_name]: kwargs = SMOKE_TESTS[service_name][operation_name] - method_name = xform_name(operation_name) - yield _make_client_call, client, method_name, kwargs + yield service_name, operation_name, kwargs -def _make_client_call(client, operation_name, kwargs): - method = getattr(client, operation_name) - with warnings.catch_warnings(record=True) as caught_warnings: - response = method(**kwargs) - assert_equal(len(caught_warnings), 0, - "Warnings were emitted during smoke test: %s" - % caught_warnings) - assert_true('Errors' not in response) - - -def test_can_make_request_and_understand_errors_with_client(): - session = botocore.session.get_session() +def _error_tests(): for service_name in _list_services(ERROR_TESTS): - client = _get_client(session, service_name) for operation_name in ERROR_TESTS[service_name]: kwargs = ERROR_TESTS[service_name][operation_name] - method_name = xform_name(operation_name) - yield _make_error_client_call, client, method_name, kwargs + yield service_name, operation_name, kwargs -def _make_error_client_call(client, operation_name, kwargs): - method = getattr(client, operation_name) - try: +@pytest.mark.parametrize("service_name, operation_name, kwargs", _smoke_tests()) +def test_can_make_request_with_client( + botocore_session, service_name, operation_name, kwargs +): + # Same as test_can_make_request, but with Client objects + # instead of service/operations. + client = _get_client(botocore_session, service_name) + method = getattr(client, xform_name(operation_name)) + with warnings.catch_warnings(record=True) as caught_warnings: response = method(**kwargs) - except ClientError as e: - pass - else: - raise AssertionError("Expected client error was not raised " - "for %s.%s" % (client, operation_name)) + err_msg = f"Warnings were emitted during smoke test: {caught_warnings}" + assert len(caught_warnings) == 0, err_msg + assert 'Errors' not in response -def test_client_can_retry_request_properly(): - session = botocore.session.get_session() - for service_name in _list_services(SMOKE_TESTS): - client = _get_client(session, service_name) - for operation_name in SMOKE_TESTS[service_name]: - kwargs = SMOKE_TESTS[service_name][operation_name] - yield (_make_client_call_with_errors, client, - operation_name, kwargs) +@pytest.mark.parametrize("service_name, operation_name, kwargs", _error_tests()) +def test_can_make_request_and_understand_errors_with_client( + botocore_session, service_name, operation_name, kwargs +): + client = _get_client(botocore_session, service_name) + method = getattr(client, xform_name(operation_name)) + with pytest.raises(ClientError): + response = method(**kwargs) -def _make_client_call_with_errors(client, operation_name, kwargs): +@pytest.mark.parametrize("service_name, operation_name, kwargs", _smoke_tests()) +def test_client_can_retry_request_properly( + botocore_session, service_name, operation_name, kwargs +): + client = _get_client(botocore_session, service_name) operation = getattr(client, xform_name(operation_name)) exception = ConnectionClosedError(endpoint_url='https://mock.eror') with ClientHTTPStubber(client, strict=False) as http_stubber: diff --git a/tests/integration/test_utils.py b/tests/integration/test_utils.py index c0f47ffd..7ea41803 100644 --- a/tests/integration/test_utils.py +++ b/tests/integration/test_utils.py @@ -10,45 +10,36 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import pytest + import botocore.session from botocore.utils import ArgumentGenerator -class ArgumentGeneratorError(AssertionError): - def __init__(self, service_name, operation_name, - generated, message): - full_msg = ( - 'Error generating skeleton for %s:%s, %s\nActual:\n%s' % ( - service_name, operation_name, message, generated)) - super(AssertionError, self).__init__(full_msg) +@pytest.fixture(scope="module") +def generator(): + return ArgumentGenerator() -def test_can_generate_all_inputs(): +def _all_inputs(): session = botocore.session.get_session() - generator = ArgumentGenerator() for service_name in session.get_available_services(): service_model = session.get_service_model(service_name) for operation_name in service_model.operation_names: operation_model = service_model.operation_model(operation_name) input_shape = operation_model.input_shape if input_shape is not None and input_shape.members: - yield (_test_can_generate_skeleton, generator, - input_shape, service_name, operation_name) + yield input_shape, service_name, operation_name -def _test_can_generate_skeleton(generator, shape, service_name, - operation_name): - generated = generator.generate_skeleton(shape) +@pytest.mark.parametrize("input_shape, service_name, operation_name", _all_inputs()) +def test_can_generate_all_inputs(generator, input_shape, service_name, operation_name): + generated = generator.generate_skeleton(input_shape) # Do some basic sanity checks to make sure the generated shape # looks right. We're mostly just ensuring that the generate_skeleton # doesn't throw an exception. - if not isinstance(generated, dict): - raise ArgumentGeneratorError( - service_name, operation_name, - generated, 'expected a dict') + assert isinstance(generated, dict) + # The generated skeleton also shouldn't be empty (the test # generator has already filtered out input_shapes of None). - if len(generated) == 0: - raise ArgumentGeneratorError( - service_name, operation_name, - generated, "generated arguments were empty") + assert len(generated) > 0 diff --git a/tests/integration/test_waiters.py b/tests/integration/test_waiters.py index b27b3b0b..0ba6ae38 100644 --- a/tests/integration/test_waiters.py +++ b/tests/integration/test_waiters.py @@ -10,16 +10,15 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. -from tests import unittest, random_chars +import pytest -from nose.plugins.attrib import attr +from tests import unittest, random_chars import botocore.session from botocore.exceptions import WaiterError -# This is the same test as above, except using the client interface. -@attr('slow') +@pytest.mark.slow class TestWaiterForDynamoDB(unittest.TestCase): def setUp(self): self.session = botocore.session.get_session() diff --git a/tests/unit/auth/test_sigv4.py b/tests/unit/auth/test_sigv4.py index ae65dd33..2e629ea1 100644 --- a/tests/unit/auth/test_sigv4.py +++ b/tests/unit/auth/test_sigv4.py @@ -18,8 +18,8 @@ AWS provides a test suite for signature version 4: https://github.com/awslabs/aws-c-auth/tree/v0.3.15/tests/aws-sig-v4-test-suite This module contains logic to run these tests. The test files were -placed in ./aws4_testsuite, and we're using nose's test generators to -dynamically generate testcases based on these files. +placed in ./aws4_testsuite, and we're using those to dynamically +generate testcases based on these files. """ import os @@ -28,6 +28,8 @@ import io import datetime import re +import pytest + from tests import mock import botocore.auth @@ -80,13 +82,7 @@ class RawHTTPRequest(six.moves.BaseHTTPServer.BaseHTTPRequestHandler): self.error_message = message -def test_generator(): - datetime_patcher = mock.patch.object( - botocore.auth.datetime, 'datetime', - mock.Mock(wraps=datetime.datetime) - ) - mocked_datetime = datetime_patcher.start() - mocked_datetime.utcnow.return_value = DATE +def generate_test_cases(): for (dirpath, dirnames, filenames) in os.walk(TESTSUITE_DIR): if not any(f.endswith('.req') for f in filenames): continue @@ -96,10 +92,20 @@ def test_generator(): log.debug("Skipping test: %s", test_case) continue - if HAS_CRT: - yield (_test_crt_signature_version_4, test_case) - else: - yield (_test_signature_version_4, test_case) + yield test_case + + +@pytest.mark.parametrize("test_case", generate_test_cases()) +def test_signature_version_4(test_case): + datetime_patcher = mock.patch.object( + botocore.auth.datetime, 'datetime', + mock.Mock(wraps=datetime.datetime) + ) + mocked_datetime = datetime_patcher.start() + mocked_datetime.utcnow.return_value = DATE + + _test_signature_version_4(test_case) + datetime_patcher.stop() @@ -132,7 +138,7 @@ def create_request_from_raw_request(raw_request): def _test_signature_version_4(test_case): - test_case = _SignatureTestCase(test_case) + test_case = SignatureTestCase(test_case) request = create_request_from_raw_request(test_case.raw_request) auth = botocore.auth.SigV4Auth(test_case.credentials, SERVICE, REGION) @@ -156,7 +162,7 @@ def _test_signature_version_4(test_case): def _test_crt_signature_version_4(test_case): - test_case = _SignatureTestCase(test_case) + test_case = SignatureTestCase(test_case) request = create_request_from_raw_request(test_case.raw_request) # Use CRT logging to diagnose interim steps (canonical request, etc) @@ -178,7 +184,7 @@ def assert_equal(actual, expected, raw_request, part): raise AssertionError(message) -class _SignatureTestCase(object): +class SignatureTestCase(object): def __init__(self, test_case): filepath = os.path.join(TESTSUITE_DIR, test_case, os.path.basename(test_case)) diff --git a/tests/unit/crt/auth/test_crt_sigv4.py b/tests/unit/crt/auth/test_crt_sigv4.py index 7a97215c..e3c30c18 100644 --- a/tests/unit/crt/auth/test_crt_sigv4.py +++ b/tests/unit/crt/auth/test_crt_sigv4.py @@ -1,4 +1,48 @@ -# This will run the CRT version of the test_generator -# on import. When we're off nose, we should split this -# yield test into two and have the CRT portion here. -from tests.unit.auth.test_sigv4 import test_generator +import datetime + +import pytest + +from tests import mock, requires_crt +from tests.unit.auth.test_sigv4 import ( + DATE, + SERVICE, + REGION, + SignatureTestCase, + assert_equal, + create_request_from_raw_request, + generate_test_cases, +) + +import botocore + + +def _test_crt_signature_version_4(test_case): + test_case = SignatureTestCase(test_case) + request = create_request_from_raw_request(test_case.raw_request) + + # Use CRT logging to diagnose interim steps (canonical request, etc) + # import awscrt.io + # awscrt.io.init_logging(awscrt.io.LogLevel.Trace, 'stdout') + auth = botocore.crt.auth.CrtSigV4Auth(test_case.credentials, SERVICE, REGION) + auth.add_auth(request) + actual_auth_header = request.headers["Authorization"] + assert_equal( + actual_auth_header, + test_case.authorization_header, + test_case.raw_request, + "authheader", + ) + + +@requires_crt() +@pytest.mark.parametrize("test_case", generate_test_cases()) +def test_signature_version_4(test_case): + datetime_patcher = mock.patch.object( + botocore.auth.datetime, "datetime", mock.Mock(wraps=datetime.datetime) + ) + mocked_datetime = datetime_patcher.start() + mocked_datetime.utcnow.return_value = DATE + + _test_crt_signature_version_4(test_case) + + datetime_patcher.stop() diff --git a/tests/unit/response_parsing/test_response_parsing.py b/tests/unit/response_parsing/test_response_parsing.py index b182b214..548c2a66 100644 --- a/tests/unit/response_parsing/test_response_parsing.py +++ b/tests/unit/response_parsing/test_response_parsing.py @@ -17,6 +17,9 @@ import json import pprint import logging import difflib + +import pytest + from tests import create_session import botocore.session @@ -102,7 +105,7 @@ def _convert_bytes_to_str(parsed): return parsed -def test_xml_parsing(): +def _xml_test_cases(): for dp in ['responses', 'errors']: data_path = os.path.join(os.path.dirname(__file__), 'xml') data_path = os.path.join(data_path, dp) @@ -119,8 +122,17 @@ def test_xml_parsing(): expected = _get_expected_parsed_result(xmlfile) operation_model = _get_operation_model(service_model, xmlfile) raw_response_body = _get_raw_response_body(xmlfile) - yield _test_parsed_response, xmlfile, raw_response_body, \ - operation_model, expected + yield xmlfile, raw_response_body, operation_model, expected + + +@pytest.mark.parametrize( + "xmlfile, raw_response_body, operation_model, expected", + _xml_test_cases() +) +def test_xml_parsing(xmlfile, raw_response_body, operation_model, expected): + _test_parsed_response( + xmlfile, raw_response_body, operation_model, expected + ) def _get_raw_response_body(xmlfile): @@ -153,7 +165,7 @@ def _get_expected_parsed_result(filename): return json.load(f) -def test_json_errors_parsing(): +def _json_test_cases(): # The outputs/ directory has sample output responses # For each file in outputs/ there's a corresponding file # in expected/ that has the expected parsed response. @@ -179,148 +191,16 @@ def test_json_errors_parsing(): operation_model = service_model.operation_model(op_name) with open(raw_response_file, 'rb') as f: raw_response_body = f.read() - yield _test_parsed_response, raw_response_file, \ - raw_response_body, operation_model, expected + yield raw_response_file, raw_response_body, operation_model, expected -def _uhg_test_json_parsing(): - input_path = os.path.join(os.path.dirname(__file__), 'json') - input_path = os.path.join(input_path, 'inputs') - output_path = os.path.join(os.path.dirname(__file__), 'json') - output_path = os.path.join(output_path, 'outputs') - session = botocore.session.get_session() - jsonfiles = glob.glob('%s/*.json' % input_path) - service_names = set() - for fn in jsonfiles: - service_names.add(os.path.split(fn)[1].split('-')[0]) - for service_name in service_names: - service_model = session.get_service_model(service_name) - service_json_files = glob.glob('%s/%s-*.json' % (input_path, - service_name)) - for jsonfile in service_json_files: - expected = _get_expected_parsed_result(jsonfile) - operation_model = _get_operation_model(service_model, jsonfile) - with open(jsonfile, 'rb') as f: - raw_response_body = f.read() - yield _test_parsed_response, jsonfile, \ - raw_response_body, operation_model, expected - # TODO: handle the __headers crap. - - -#class TestHeaderParsing(unittest.TestCase): -# -# maxDiff = None -# -# def setUp(self): -# self.session = botocore.session.get_session() -# self.s3 = self.session.get_service('s3') -# -# def test_put_object(self): -# http_response = Mock() -# http_response.encoding = 'utf-8' -# http_response.headers = CaseInsensitiveDict( -# {'Date': 'Thu, 22 Aug 2013 02:11:57 GMT', -# 'Content-Length': '0', -# 'x-amz-request-id': '2B74ECB010FF029E', -# 'ETag': '"b081e66e7e0c314285c655cafb4d1e71"', -# 'x-amz-id-2': 'bKECRRBFttBRVbJPIVBLQwwipI0i+s9HMvNFdttR17ouR0pvQSKEJUR+1c6cW1nQ', -# 'Server': 'AmazonS3', -# 'content-type': 'text/xml'}) -# http_response.content = '' -# put_object = self.s3.get_operation('PutObject') -# expected = {"ETag": '"b081e66e7e0c314285c655cafb4d1e71"'} -# response_data = get_response(self.session, put_object, http_response)[1] -# self.assertEqual(response_data, expected) -# -# def test_head_object(self): -# http_response = Mock() -# http_response.encoding = 'utf-8' -# http_response.headers = CaseInsensitiveDict( -# {'Date': 'Thu, 22 Aug 2013 02:11:57 GMT', -# 'Content-Length': '265', -# 'x-amz-request-id': '2B74ECB010FF029E', -# 'ETag': '"40d06eb6194712ac1c915783004ef730"', -# 'Server': 'AmazonS3', -# 'content-type': 'binary/octet-stream', -# 'Content-Type': 'binary/octet-stream', -# 'accept-ranges': 'bytes', -# 'Last-Modified': 'Tue, 20 Aug 2013 18:33:25 GMT', -# 'x-amz-server-side-encryption': 'AES256', -# 'x-amz-meta-mykey1': 'value1', -# 'x-amz-meta-mykey2': 'value2', -# }) -# http_response.content = '' -# http_response.request.method = 'HEAD' -# put_object = self.s3.get_operation('HeadObject') -# expected = {"AcceptRanges": "bytes", -# "ContentType": "binary/octet-stream", -# "LastModified": "Tue, 20 Aug 2013 18:33:25 GMT", -# "ContentLength": "265", -# "ETag": '"40d06eb6194712ac1c915783004ef730"', -# "ServerSideEncryption": "AES256", -# "Metadata": { -# 'mykey1': 'value1', -# 'mykey2': 'value2', -# }} -# response_data = get_response(self.session, put_object, -# http_response)[1] -# self.assertEqual(response_data, expected) -# -# def test_list_objects_with_invalid_content_length(self): -# http_response = Mock() -# http_response.encoding = 'utf-8' -# http_response.headers = CaseInsensitiveDict( -# {'Date': 'Thu, 22 Aug 2013 02:11:57 GMT', -# # We say we have 265 bytes but we're returning 0, -# # this should raise an exception because this is not -# # a HEAD request. -# 'Content-Length': '265', -# 'x-amz-request-id': '2B74ECB010FF029E', -# 'ETag': '"40d06eb6194712ac1c915783004ef730"', -# 'Server': 'AmazonS3', -# 'content-type': 'binary/octet-stream', -# 'Content-Type': 'binary/octet-stream', -# 'accept-ranges': 'bytes', -# 'Last-Modified': 'Tue, 20 Aug 2013 18:33:25 GMT', -# 'x-amz-server-side-encryption': 'AES256' -# }) -# http_response.content = '' -# http_response.request.method = 'GET' -# list_objects = self.s3.get_operation('ListObjects') -# expected = {"AcceptRanges": "bytes", -# "ContentType": "binary/octet-stream", -# "LastModified": "Tue, 20 Aug 2013 18:33:25 GMT", -# "ContentLength": "265", -# "ETag": '"40d06eb6194712ac1c915783004ef730"', -# "ServerSideEncryption": "AES256" -# } -# with self.assertRaises(IncompleteReadError): -# response_data = get_response(self.session, list_objects, -# http_response)[1] -# -# def test_head_object_with_json(self): -# http_response = Mock() -# http_response.encoding = 'utf-8' -# http_response.headers = CaseInsensitiveDict( -# {'Date': 'Thu, 22 Aug 2013 02:11:57 GMT', -# 'Content-Length': '0', -# 'x-amz-request-id': '2B74ECB010FF029E', -# 'ETag': '"40d06eb6194712ac1c915783004ef730"', -# 'Server': 'AmazonS3', -# 'content-type': 'application/json', -# 'Content-Type': 'application/json', -# 'accept-ranges': 'bytes', -# 'Last-Modified': 'Tue, 20 Aug 2013 18:33:25 GMT', -# 'x-amz-server-side-encryption': 'AES256'}) -# http_response.content = '' -# put_object = self.s3.get_operation('HeadObject') -# expected = {"AcceptRanges": "bytes", -# "ContentType": "application/json", -# "LastModified": "Tue, 20 Aug 2013 18:33:25 GMT", -# "ContentLength": "0", -# "ETag": '"40d06eb6194712ac1c915783004ef730"', -# "ServerSideEncryption": "AES256" -# } -# response_data = get_response(self.session, put_object, -# http_response)[1] -# self.assertEqual(response_data, expected) +@pytest.mark.parametrize( + "raw_response_file, raw_response_body, operation_model, expected", + _json_test_cases() +) +def test_json_errors_parsing( + raw_response_file, raw_response_body, operation_model, expected +): + _test_parsed_response( + raw_response_file, raw_response_body, operation_model, expected + ) diff --git a/tests/unit/retries/test_special.py b/tests/unit/retries/test_special.py index 48bbd657..ed3d8831 100644 --- a/tests/unit/retries/test_special.py +++ b/tests/unit/retries/test_special.py @@ -1,8 +1,6 @@ from tests import mock from tests import unittest -from nose.tools import assert_equal, assert_is_instance - from botocore.compat import six from botocore.awsrequest import AWSResponse from botocore.retries import standard, special diff --git a/tests/unit/retries/test_standard.py b/tests/unit/retries/test_standard.py index c6c81aab..b4bf2760 100644 --- a/tests/unit/retries/test_standard.py +++ b/tests/unit/retries/test_standard.py @@ -1,8 +1,8 @@ +import pytest + from tests import mock from tests import unittest -from nose.tools import assert_equal, assert_is_instance - from botocore.retries import standard from botocore.retries import quota from botocore import model @@ -150,43 +150,46 @@ SERVICE_DESCRIPTION_WITH_RETRIES = { }, } - -def test_can_detect_retryable_transient_errors(): +@pytest.mark.parametrize('case', RETRYABLE_TRANSIENT_ERRORS) +def test_can_detect_retryable_transient_errors(case): transient_checker = standard.TransientRetryableChecker() - for case in RETRYABLE_TRANSIENT_ERRORS: - yield (_verify_retryable, transient_checker, None) + case + _verify_retryable(transient_checker, None, *case) -def test_can_detect_retryable_throttled_errors(): +@pytest.mark.parametrize('case', RETRYABLE_THROTTLED_RESPONSES) +def test_can_detect_retryable_throttled_errors(case): throttled_checker = standard.ThrottledRetryableChecker() - for case in RETRYABLE_THROTTLED_RESPONSES: - yield (_verify_retryable, throttled_checker, None) + case + _verify_retryable(throttled_checker, None, *case) -def test_can_detect_modeled_retryable_errors(): +@pytest.mark.parametrize('case', RETRYABLE_MODELED_ERRORS) +def test_can_detect_modeled_retryable_errors(case): modeled_retry_checker = standard.ModeledRetryableChecker() - test_params = (_verify_retryable, modeled_retry_checker, - get_operation_model_with_retries()) - for case in RETRYABLE_MODELED_ERRORS: - test_case = test_params + case - yield test_case + _verify_retryable( + modeled_retry_checker, get_operation_model_with_retries(), *case + ) -def test_standard_retry_conditions(): - # This is verifying that the high level object used for checking - # retry conditions still handles all the individual testcases. +@pytest.mark.parametrize('case', + [ + case for case in + RETRYABLE_TRANSIENT_ERRORS + + RETRYABLE_THROTTLED_RESPONSES + + RETRYABLE_MODELED_ERRORS + if case[2] + ] +) +def test_standard_retry_conditions(case): + """This is verifying that the high level object used for checking + retry conditions still handles all the individual testcases. + + It's possible that cases that are retryable for an individual checker + aren't retryable for a different checker. We need to filter out all + the False cases (if case[2]). + """ standard_checker = standard.StandardRetryConditions() op_model = get_operation_model_with_retries() - all_cases = ( - RETRYABLE_TRANSIENT_ERRORS + RETRYABLE_THROTTLED_RESPONSES + - RETRYABLE_MODELED_ERRORS) - # It's possible that cases that are retryable for an individual checker - # are retryable for a different checker. We need to filter out all - # the False cases. - all_cases = [c for c in all_cases if c[2]] - test_params = (_verify_retryable, standard_checker, op_model) - for case in all_cases: - yield test_params + case + _verify_retryable(standard_checker, op_model, *case) def get_operation_model_with_retries(): @@ -213,7 +216,7 @@ def _verify_retryable(checker, operation_model, http_response=http_response, caught_exception=caught_exception, ) - assert_equal(checker.is_retryable(context), is_retryable) + assert checker.is_retryable(context) == is_retryable def arbitrary_retry_context(): @@ -233,36 +236,38 @@ def test_can_honor_max_attempts(): checker = standard.MaxAttemptsChecker(max_attempts=3) context = arbitrary_retry_context() context.attempt_number = 1 - assert_equal(checker.is_retryable(context), True) + assert checker.is_retryable(context) is True context.attempt_number = 2 - assert_equal(checker.is_retryable(context), True) + assert checker.is_retryable(context) is True context.attempt_number = 3 - assert_equal(checker.is_retryable(context), False) + assert checker.is_retryable(context) is False def test_max_attempts_adds_metadata_key_when_reached(): checker = standard.MaxAttemptsChecker(max_attempts=3) context = arbitrary_retry_context() context.attempt_number = 3 - assert_equal(checker.is_retryable(context), False) - assert_equal(context.get_retry_metadata(), {'MaxAttemptsReached': True}) + assert checker.is_retryable(context) is False + assert context.get_retry_metadata() == {'MaxAttemptsReached': True} def test_can_create_default_retry_handler(): mock_client = mock.Mock() mock_client.meta.service_model.service_id = model.ServiceId('my-service') - assert_is_instance(standard.register_retry_handler(mock_client), - standard.RetryHandler) + assert isinstance( + standard.register_retry_handler(mock_client), + standard.RetryHandler + ) call_args_list = mock_client.meta.events.register.call_args_list # We should have registered the retry quota to after-calls first_call = call_args_list[0][0] second_call = call_args_list[1][0] # Not sure if there's a way to verify the class associated with the # bound method matches what we expect. - assert_equal(first_call[0], 'after-call.my-service') - assert_equal(second_call[0], 'needs-retry.my-service') + assert first_call[0] == 'after-call.my-service' + assert second_call[0] == 'needs-retry.my-service' class TestRetryHandler(unittest.TestCase): diff --git a/tests/unit/test_compat.py b/tests/unit/test_compat.py index 04af7310..f86eed69 100644 --- a/tests/unit/test_compat.py +++ b/tests/unit/test_compat.py @@ -12,7 +12,7 @@ # language governing permissions and limitations under the License. import datetime -from nose.tools import assert_equal, assert_raises +import pytest from botocore.exceptions import MD5UnavailableError from botocore.compat import ( @@ -97,7 +97,13 @@ class TestGetMD5(unittest.TestCase): get_md5() -def test_compat_shell_split_windows(): +@pytest.fixture +def shell_split_runner(): + # Single runner fixture for all tests + return ShellSplitTestRunner() + + +def get_windows_test_cases(): windows_cases = { r'': [], r'spam \\': [r'spam', '\\\\'], @@ -120,14 +126,21 @@ def test_compat_shell_split_windows(): r'a\\\"b c d': [r'a\"b', r'c', r'd'], r'a\\\\"b c" d e': [r'a\\b c', r'd', r'e'] } - runner = ShellSplitTestRunner() - for input_string, expected_output in windows_cases.items(): - yield runner.assert_equal, input_string, expected_output, "win32" - - yield runner.assert_raises, r'"', ValueError, "win32" + return windows_cases.items() -def test_compat_shell_split_unix(): +@pytest.mark.parametrize("input_string, expected_output", get_windows_test_cases()) +def test_compat_shell_split_windows( + shell_split_runner, input_string, expected_output +): + shell_split_runner.assert_equal(input_string, expected_output, "win32") + + +def test_compat_shell_split_windows_raises_error(shell_split_runner): + shell_split_runner.assert_raises(r'"', ValueError, "win32") + + +def get_unix_test_cases(): unix_cases = { r'': [], r'spam \\': [r'spam', '\\'], @@ -150,21 +163,38 @@ def test_compat_shell_split_unix(): r'a\\\"b c d': [r'a\"b', r'c', r'd'], r'a\\\\"b c" d e': [r'a\\b c', r'd', r'e'] } - runner = ShellSplitTestRunner() - for input_string, expected_output in unix_cases.items(): - yield runner.assert_equal, input_string, expected_output, "linux2" - yield runner.assert_equal, input_string, expected_output, "darwin" + return unix_cases.items() - yield runner.assert_raises, r'"', ValueError, "linux2" - yield runner.assert_raises, r'"', ValueError, "darwin" + +@pytest.mark.parametrize("input_string, expected_output", get_unix_test_cases()) +def test_compat_shell_split_unix_linux2( + shell_split_runner, input_string, expected_output +): + shell_split_runner.assert_equal(input_string, expected_output, "linux2") + + +@pytest.mark.parametrize("input_string, expected_output", get_unix_test_cases()) +def test_compat_shell_split_unix_darwin( + shell_split_runner, input_string, expected_output +): + shell_split_runner.assert_equal(input_string, expected_output, "darwin") + + +def test_compat_shell_split_unix_linux2_raises_error(shell_split_runner): + shell_split_runner.assert_raises(r'"', ValueError, "linux2") + + +def test_compat_shell_split_unix_darwin_raises_error(shell_split_runner): + shell_split_runner.assert_raises(r'"', ValueError, "darwin") class ShellSplitTestRunner(object): def assert_equal(self, s, expected, platform): - assert_equal(compat_shell_split(s, platform), expected) + assert compat_shell_split(s, platform) == expected def assert_raises(self, s, exception_cls, platform): - assert_raises(exception_cls, compat_shell_split, s, platform) + with pytest.raises(exception_cls): + compat_shell_split(s, platform) class TestTimezoneOperations(unittest.TestCase): diff --git a/tests/unit/test_config_provider.py b/tests/unit/test_config_provider.py index 73471803..c3ccc141 100644 --- a/tests/unit/test_config_provider.py +++ b/tests/unit/test_config_provider.py @@ -10,9 +10,10 @@ # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. +import pytest + from tests import mock from tests import unittest -from nose.tools import assert_equal import botocore import botocore.session as session @@ -447,15 +448,12 @@ def assert_chain_does_provide(providers, expected_value): provider = ChainProvider( providers=providers, ) - value = provider.provide() - assert_equal(value, expected_value) + assert provider.provide() == expected_value -def test_chain_provider(): - # Each case is a tuple with the first element being the expected return - # value form the ChainProvider. The second value being a list of return - # values from the individual providers that are in the chain. - cases = [ +@pytest.mark.parametrize( + 'case', + ( (None, []), (None, [None]), ('foo', ['foo']), @@ -466,11 +464,16 @@ def test_chain_provider(): ('bar', [None, 'bar', None]), ('foo', ['foo', 'bar', None]), ('foo', ['foo', 'bar', 'baz']), - ] - for case in cases: - yield assert_chain_does_provide, \ - _make_providers_that_return(case[1]), \ - case[0] + ) +) +def test_chain_provider(case): + # Each case is a tuple with the first element being the expected return + # value from the ChainProvider. The second value being a list of return + # values from the individual providers that are in the chain. + assert_chain_does_provide( + _make_providers_that_return(case[1]), + case[0] + ) class TestChainProvider(unittest.TestCase): diff --git a/tests/unit/test_eventstream.py b/tests/unit/test_eventstream.py index 6d574468..678c9329 100644 --- a/tests/unit/test_eventstream.py +++ b/tests/unit/test_eventstream.py @@ -11,7 +11,7 @@ # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. """Unit tests for the binary event stream decoder. """ -from nose.tools import assert_equal, raises +import pytest from tests import mock @@ -240,18 +240,12 @@ NEGATIVE_CASES = [ def assert_message_equal(message_a, message_b): """Asserts all fields for two messages are equal. """ - assert_equal( - message_a.prelude.total_length, - message_b.prelude.total_length - ) - assert_equal( - message_a.prelude.headers_length, - message_b.prelude.headers_length - ) - assert_equal(message_a.prelude.crc, message_b.prelude.crc) - assert_equal(message_a.headers, message_b.headers) - assert_equal(message_a.payload, message_b.payload) - assert_equal(message_a.crc, message_b.crc) + assert message_a.prelude.total_length == message_b.prelude.total_length + assert message_a.prelude.headers_length == message_b.prelude.headers_length + assert message_a.prelude.crc == message_b.prelude.crc + assert message_a.headers == message_b.headers + assert message_a.payload == message_b.payload + assert message_a.crc == message_b.crc def test_partial_message(): @@ -262,7 +256,7 @@ def test_partial_message(): mid_point = 15 event_buffer.add_data(data[:mid_point]) messages = list(event_buffer) - assert_equal(messages, []) + assert messages == [] event_buffer.add_data(data[mid_point:len(data)]) for message in event_buffer: assert_message_equal(message, EMPTY_MESSAGE[1]) @@ -277,10 +271,10 @@ def check_message_decodes(encoded, decoded): assert_message_equal(messages[0], decoded) -def test_positive_cases(): +@pytest.mark.parametrize("encoded, decoded", POSITIVE_CASES) +def test_positive_cases(encoded, decoded): """Test that all positive cases decode how we expect. """ - for (encoded, decoded) in POSITIVE_CASES: - yield check_message_decodes, encoded, decoded + check_message_decodes(encoded, decoded) def test_all_positive_cases(): @@ -298,11 +292,11 @@ def test_all_positive_cases(): assert_message_equal(expected, decoded) -def test_negative_cases(): +@pytest.mark.parametrize("encoded, exception", NEGATIVE_CASES) +def test_negative_cases(encoded, exception): """Test that all negative cases raise the expected exception. """ - for (encoded, exception) in NEGATIVE_CASES: - test_function = raises(exception)(check_message_decodes) - yield test_function, encoded, None + with pytest.raises(exception): + check_message_decodes(encoded, None) def test_header_parser(): @@ -329,87 +323,88 @@ def test_header_parser(): parser = EventStreamHeaderParser() headers = parser.parse(headers_data) - assert_equal(headers, expected_headers) + assert headers == expected_headers def test_message_prelude_properties(): """Test that calculated properties from the payload are correct. """ # Total length: 40, Headers Length: 15, random crc prelude = MessagePrelude(40, 15, 0x00000000) - assert_equal(prelude.payload_length, 9) - assert_equal(prelude.headers_end, 27) - assert_equal(prelude.payload_end, 36) + assert prelude.payload_length == 9 + assert prelude.headers_end == 27 + assert prelude.payload_end == 36 def test_message_to_response_dict(): response_dict = PAYLOAD_ONE_STR_HEADER[1].to_response_dict() - assert_equal(response_dict['status_code'], 200) + assert response_dict['status_code'] ==200 + expected_headers = {'content-type': 'application/json'} - assert_equal(response_dict['headers'], expected_headers) - assert_equal(response_dict['body'], b"{'foo':'bar'}") + assert response_dict['headers'] == expected_headers + assert response_dict['body'] == b"{'foo':'bar'}" def test_message_to_response_dict_error(): response_dict = ERROR_EVENT_MESSAGE[1].to_response_dict() - assert_equal(response_dict['status_code'], 400) + assert response_dict['status_code'] == 400 headers = { ':message-type': 'error', ':error-code': 'code', ':error-message': 'message', } - assert_equal(response_dict['headers'], headers) - assert_equal(response_dict['body'], b'') + assert response_dict['headers'] == headers + assert response_dict['body'] == b'' def test_unpack_uint8(): (value, bytes_consumed) = DecodeUtils.unpack_uint8(b'\xDE') - assert_equal(bytes_consumed, 1) - assert_equal(value, 0xDE) + assert bytes_consumed == 1 + assert value == 0xDE def test_unpack_uint32(): (value, bytes_consumed) = DecodeUtils.unpack_uint32(b'\xDE\xAD\xBE\xEF') - assert_equal(bytes_consumed, 4) - assert_equal(value, 0xDEADBEEF) + assert bytes_consumed == 4 + assert value == 0xDEADBEEF def test_unpack_int8(): (value, bytes_consumed) = DecodeUtils.unpack_int8(b'\xFE') - assert_equal(bytes_consumed, 1) - assert_equal(value, -2) + assert bytes_consumed == 1 + assert value == -2 def test_unpack_int16(): (value, bytes_consumed) = DecodeUtils.unpack_int16(b'\xFF\xFE') - assert_equal(bytes_consumed, 2) - assert_equal(value, -2) + assert bytes_consumed == 2 + assert value == -2 def test_unpack_int32(): (value, bytes_consumed) = DecodeUtils.unpack_int32(b'\xFF\xFF\xFF\xFE') - assert_equal(bytes_consumed, 4) - assert_equal(value, -2) + assert bytes_consumed == 4 + assert value == -2 def test_unpack_int64(): test_bytes = b'\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFE' (value, bytes_consumed) = DecodeUtils.unpack_int64(test_bytes) - assert_equal(bytes_consumed, 8) - assert_equal(value, -2) + assert bytes_consumed == 8 + assert value == -2 def test_unpack_array_short(): test_bytes = b'\x00\x10application/json' (value, bytes_consumed) = DecodeUtils.unpack_byte_array(test_bytes) - assert_equal(bytes_consumed, 18) - assert_equal(value, b'application/json') + assert bytes_consumed == 18 + assert value == b'application/json' def test_unpack_byte_array_int(): (value, array_bytes_consumed) = DecodeUtils.unpack_byte_array( b'\x00\x00\x00\x10application/json', length_byte_size=4) - assert_equal(array_bytes_consumed, 20) - assert_equal(value, b'application/json') + assert array_bytes_consumed == 20 + assert value == b'application/json' def test_unpack_utf8_string(): @@ -417,14 +412,14 @@ def test_unpack_utf8_string(): utf8_string = b'\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e' encoded = length + utf8_string (value, bytes_consumed) = DecodeUtils.unpack_utf8_string(encoded) - assert_equal(bytes_consumed, 11) - assert_equal(value, utf8_string.decode('utf-8')) + assert bytes_consumed == 11 + assert value == utf8_string.decode('utf-8') def test_unpack_prelude(): data = b'\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03' prelude = DecodeUtils.unpack_prelude(data) - assert_equal(prelude, ((1, 2, 3), 12)) + assert prelude == ((1, 2, 3), 12) def create_mock_raw_stream(*data): @@ -445,7 +440,7 @@ def test_event_stream_wrapper_iteration(): output_shape = mock.Mock() event_stream = EventStream(raw_stream, output_shape, parser, '') events = list(event_stream) - assert_equal(len(events), 1) + assert len(events) == 1 response_dict = { 'headers': {'event-id': 0x0000a00c}, @@ -455,14 +450,14 @@ def test_event_stream_wrapper_iteration(): parser.parse.assert_called_with(response_dict, output_shape) -@raises(EventStreamError) def test_eventstream_wrapper_iteration_error(): raw_stream = create_mock_raw_stream(ERROR_EVENT_MESSAGE[0]) parser = mock.Mock(spec=EventStreamXMLParser) parser.parse.return_value = {} output_shape = mock.Mock() event_stream = EventStream(raw_stream, output_shape, parser, '') - list(event_stream) + with pytest.raises(EventStreamError): + list(event_stream) def test_event_stream_wrapper_close(): @@ -492,7 +487,6 @@ def test_event_stream_initial_response(): assert event.payload == payload -@raises(NoInitialResponseError) def test_event_stream_initial_response_wrong_type(): raw_stream = create_mock_raw_stream( b"\x00\x00\x00+\x00\x00\x00\x0e4\x8b\xec{\x08event-id\x04\x00", @@ -501,13 +495,14 @@ def test_event_stream_initial_response_wrong_type(): parser = mock.Mock(spec=EventStreamXMLParser) output_shape = mock.Mock() event_stream = EventStream(raw_stream, output_shape, parser, '') - event_stream.get_initial_response() + with pytest.raises(NoInitialResponseError): + event_stream.get_initial_response() -@raises(NoInitialResponseError) def test_event_stream_initial_response_no_event(): raw_stream = create_mock_raw_stream(b'') parser = mock.Mock(spec=EventStreamXMLParser) output_shape = mock.Mock() event_stream = EventStream(raw_stream, output_shape, parser, '') - event_stream.get_initial_response() + with pytest.raises(NoInitialResponseError): + event_stream.get_initial_response() diff --git a/tests/unit/test_exceptions.py b/tests/unit/test_exceptions.py index e147697a..a81fbf85 100644 --- a/tests/unit/test_exceptions.py +++ b/tests/unit/test_exceptions.py @@ -14,8 +14,6 @@ import pickle from tests import unittest -from nose.tools import assert_equal - import botocore.awsrequest import botocore.session from botocore import exceptions @@ -24,7 +22,7 @@ from botocore import exceptions def test_client_error_can_handle_missing_code_or_message(): response = {'Error': {}} expect = 'An error occurred (Unknown) when calling the blackhole operation: Unknown' - assert_equal(str(exceptions.ClientError(response, 'blackhole')), expect) + assert str(exceptions.ClientError(response, 'blackhole')) == expect def test_client_error_has_operation_name_set(): @@ -36,7 +34,7 @@ def test_client_error_has_operation_name_set(): def test_client_error_set_correct_operation_name(): response = {'Error': {}} exception = exceptions.ClientError(response, 'blackhole') - assert_equal(exception.operation_name, 'blackhole') + assert exception.operation_name == 'blackhole' def test_retry_info_added_when_present(): diff --git a/tests/unit/test_http_client_exception_mapping.py b/tests/unit/test_http_client_exception_mapping.py index 9eee38c4..0c8d168d 100644 --- a/tests/unit/test_http_client_exception_mapping.py +++ b/tests/unit/test_http_client_exception_mapping.py @@ -1,27 +1,21 @@ -from nose.tools import assert_raises +import pytest from botocore import exceptions as botocore_exceptions from botocore.vendored.requests import exceptions as requests_exceptions from botocore.vendored.requests.packages.urllib3 import exceptions as urllib3_exceptions -EXCEPTION_MAPPING = [ - (botocore_exceptions.ReadTimeoutError, requests_exceptions.ReadTimeout), - (botocore_exceptions.ReadTimeoutError, urllib3_exceptions.ReadTimeoutError), - (botocore_exceptions.ConnectTimeoutError, requests_exceptions.ConnectTimeout), - (botocore_exceptions.ProxyConnectionError, requests_exceptions.ProxyError), - (botocore_exceptions.SSLError, requests_exceptions.SSLError), -] - -def _raise_exception(exception): - raise exception(endpoint_url=None, proxy_url=None, error=None) - - -def _test_exception_mapping(new_exception, old_exception): +@pytest.mark.parametrize( + "new_exception, old_exception", + ( + (botocore_exceptions.ReadTimeoutError, requests_exceptions.ReadTimeout), + (botocore_exceptions.ReadTimeoutError, urllib3_exceptions.ReadTimeoutError), + (botocore_exceptions.ConnectTimeoutError, requests_exceptions.ConnectTimeout), + (botocore_exceptions.ProxyConnectionError, requests_exceptions.ProxyError), + (botocore_exceptions.SSLError, requests_exceptions.SSLError), + ), +) +def test_http_client_exception_mapping(new_exception, old_exception): # assert that the new exception can still be caught by the old vendored one - assert_raises(old_exception, _raise_exception, new_exception) - - -def test_http_client_exception_mapping(): - for new_exception, old_exception in EXCEPTION_MAPPING: - yield _test_exception_mapping, new_exception, old_exception + with pytest.raises(old_exception): + raise new_exception(endpoint_url=None, proxy_url=None, error=None) diff --git a/tests/unit/test_http_session.py b/tests/unit/test_http_session.py index 33cf62cd..92780522 100644 --- a/tests/unit/test_http_session.py +++ b/tests/unit/test_http_session.py @@ -1,9 +1,10 @@ import socket -from tests import mock, unittest -from nose.tools import raises +import pytest from urllib3.exceptions import NewConnectionError, ProtocolError +from tests import mock, unittest + from botocore.vendored import six from botocore.awsrequest import AWSRequest from botocore.awsrequest import AWSHTTPConnectionPool, AWSHTTPSConnectionPool @@ -389,15 +390,15 @@ class TestURLLib3Session(unittest.TestCase): session = URLLib3Session() session.send(self.request.prepare()) - @raises(EndpointConnectionError) def test_catches_new_connection_error(self): error = NewConnectionError(None, None) - self.make_request_with_error(error) + with pytest.raises(EndpointConnectionError): + self.make_request_with_error(error) - @raises(ConnectionClosedError) def test_catches_bad_status_line(self): error = ProtocolError(None) - self.make_request_with_error(error) + with pytest.raises(ConnectionClosedError): + self.make_request_with_error(error) def test_aws_connection_classes_are_used(self): session = URLLib3Session() diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py index 258906a6..0fedbab8 100644 --- a/tests/unit/test_model.py +++ b/tests/unit/test_model.py @@ -1,3 +1,5 @@ +import pytest + from tests import unittest from botocore import model @@ -5,30 +7,11 @@ from botocore.compat import OrderedDict from botocore.exceptions import MissingServiceIdError -def test_missing_model_attribute_raises_exception(): - # We're using a nose test generator here to cut down - # on the duplication. The property names below - # all have the same test logic. +@pytest.mark.parametrize("property_name", ['api_version', 'protocol']) +def test_missing_model_attribute_raises_exception(property_name): service_model = model.ServiceModel({'metadata': {'endpointPrefix': 'foo'}}) - property_names = ['api_version', 'protocol'] - - def _test_attribute_raise_exception(attr_name): - try: - getattr(service_model, attr_name) - except model.UndefinedModelAttributeError: - # This is what we expect, so the test passes. - pass - except Exception as e: - raise AssertionError("Expected UndefinedModelAttributeError to " - "be raised, but %s was raised instead" % - (e.__class__)) - else: - raise AssertionError( - "Expected UndefinedModelAttributeError to " - "be raised, but no exception was raised for: %s" % attr_name) - - for name in property_names: - yield _test_attribute_raise_exception, name + with pytest.raises(model.UndefinedModelAttributeError): + getattr(service_model, property_name) class TestServiceId(unittest.TestCase): diff --git a/tests/unit/test_parsers.py b/tests/unit/test_parsers.py index d2e14535..29ea92ad 100644 --- a/tests/unit/test_parsers.py +++ b/tests/unit/test_parsers.py @@ -12,9 +12,10 @@ # language governing permissions and limitations under the License. from tests import unittest, RawResponse import datetime +import itertools from dateutil.tz import tzutc -from nose.tools import assert_equal +import pytest from botocore import parsers from botocore import model @@ -392,7 +393,7 @@ class TestTaggedUnions(unittest.TestCase): 'unknown to client'), captured_log.records[0].getMessage()) - def test_base_json_parser_handles_unknwon_member(self): + def test_base_json_parser_handles_unknown_member(self): parser = parsers.JSONParser() response = b'{"Foo": "mystring"}' headers = {'x-amzn-requestid': 'request-id'} @@ -1424,27 +1425,26 @@ class TestParseErrorResponses(unittest.TestCase): # still populate an empty string. self.assertEqual(error['Message'], '') +def _generic_test_bodies(): + generic_html_body = ( + 'Http/1.1 Service Unavailable' + ).encode('utf-8') + empty_body = b'' + none_body = None -def test_can_handle_generic_error_message(): + return [generic_html_body, empty_body, none_body] + +@pytest.mark.parametrize("parser, body", + itertools.product( + parsers.PROTOCOL_PARSERS.values(), + _generic_test_bodies() + ), +) +def test_can_handle_generic_error_message(parser, body): # There are times when you can get a service to respond with a generic # html error page. We should be able to handle this case. - for parser_cls in parsers.PROTOCOL_PARSERS.values(): - generic_html_body = ( - 'Http/1.1 Service Unavailable' - ).encode('utf-8') - empty_body = b'' - none_body = None - yield _assert_parses_generic_error, parser_cls(), generic_html_body - yield _assert_parses_generic_error, parser_cls(), empty_body - yield _assert_parses_generic_error, parser_cls(), none_body - - -def _assert_parses_generic_error(parser, body): - # There are times when you can get a service to respond with a generic - # html error page. We should be able to handle this case. - parsed = parser.parse({ - 'body': body, 'headers': {}, 'status_code': 503}, None) - assert_equal( - parsed['Error'], - {'Code': '503', 'Message': 'Service Unavailable'}) - assert_equal(parsed['ResponseMetadata']['HTTPStatusCode'], 503) + parsed = parser().parse( + {'body': body, 'headers': {}, 'status_code': 503}, None + ) + assert parsed['Error'] == {'Code': '503', 'Message': 'Service Unavailable'} + assert parsed['ResponseMetadata']['HTTPStatusCode'] == 503 diff --git a/tests/unit/test_protocols.py b/tests/unit/test_protocols.py index f2c65a43..c33cfea9 100644 --- a/tests/unit/test_protocols.py +++ b/tests/unit/test_protocols.py @@ -16,8 +16,8 @@ This is a test runner for all the JSON tests defined in ``tests/unit/protocols/``, including both the input/output tests. -You can use the normal ``nosetests tests/unit/test_protocols.py`` to run -this test. In addition, there are several env vars you can use during +You can use the normal ``python -m pytest tests/unit/test_protocols.py`` +to run this test. In addition, there are several env vars you can use during development. Tests are broken down by filename, test suite, testcase. When a test fails @@ -37,25 +37,28 @@ failed test. To run tests from only a single file, you can set the BOTOCORE_TEST env var:: - BOTOCORE_TEST=tests/unit/compliance/input/json.json nosetests tests/unit/test_protocols.py + BOTOCORE_TEST=tests/unit/compliance/input/json.json pytest tests/unit/test_protocols.py To run a single test suite you can set the BOTOCORE_TEST_ID env var: BOTOCORE_TEST=tests/unit/compliance/input/json.json BOTOCORE_TEST_ID=5 \ - nosetests tests/unit/test_protocols.py + pytest tests/unit/test_protocols.py To run a single test case in a suite (useful when debugging a single test), you can set the BOTOCORE_TEST_ID env var with the ``suite_id:test_id`` syntax. - BOTOCORE_TEST_ID=5:1 nosetests test/unit/test_protocols.py + BOTOCORE_TEST_ID=5:1 pytest test/unit/test_protocols.py """ import os import copy +from enum import Enum from base64 import b64decode from dateutil.tz import tzutc +import pytest + from botocore.awsrequest import HeadersDict from botocore.compat import json, OrderedDict, urlsplit from botocore.eventstream import EventStream @@ -69,8 +72,6 @@ from botocore.awsrequest import prepare_request_dict from calendar import timegm from botocore.model import NoShapeFoundError -from nose.tools import assert_equal as _assert_equal - TEST_DIR = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'protocols') @@ -94,19 +95,34 @@ PROTOCOL_TEST_BLACKLIST = [ ] -def test_compliance(): +class TestType(Enum): + # Tell test runner to ignore this class + __test__ = False + + INPUT = "input" + OUTPUT = "output" + + +def _compliance_tests(test_type=None): + inp = test_type is None or test_type is TestType.INPUT + out = test_type is None or test_type is TestType.OUTPUT + for full_path in _walk_files(): if full_path.endswith('.json'): for model, case, basename in _load_cases(full_path): if model.get('description') in PROTOCOL_TEST_BLACKLIST: continue - if 'params' in case: - yield _test_input, model, case, basename - elif 'response' in case: - yield _test_output, model, case, basename + if 'params' in case and inp: + yield model, case, basename + elif 'response' in case and out: + yield model, case, basename -def _test_input(json_description, case, basename): +@pytest.mark.parametrize( + "json_description, case, basename", + _compliance_tests(TestType.INPUT) +) +def test_input_compliance(json_description, case, basename): service_description = copy.deepcopy(json_description) service_description['operations'] = { case.get('name', 'OperationName'): case, @@ -152,8 +168,11 @@ class MockRawResponse(object): def stream(self): yield self._data - -def _test_output(json_description, case, basename): +@pytest.mark.parametrize( + "json_description, case, basename", + _compliance_tests(TestType.OUTPUT) +) +def test_output_compliance(json_description, case, basename): service_description = copy.deepcopy(json_description) operation_name = case.get('name', 'OperationName') service_description['operations'] = { @@ -322,7 +341,7 @@ def assert_equal(first, second, prefix): # A better assert equals. It allows you to just provide # prefix instead of the entire message. try: - _assert_equal(first, second) + assert first == second except Exception: try: better = "%s (actual != expected)\n%s !=\n%s" % ( diff --git a/tests/unit/test_session.py b/tests/unit/test_session.py index 87879c2f..5431a069 100644 --- a/tests/unit/test_session.py +++ b/tests/unit/test_session.py @@ -19,6 +19,8 @@ import logging import tempfile import shutil +import pytest + import botocore.session import botocore.exceptions @@ -718,21 +720,29 @@ class TestSessionComponent(BaseSessionTest): self.assertIs( self.session._get_internal_component('internal'), component) with self.assertRaises(ValueError): - self.session.get_component('internal') + # get_component has been deprecated to the public + with pytest.warns(DeprecationWarning): + self.session.get_component('internal') def test_internal_endpoint_resolver_is_same_as_deprecated_public(self): endpoint_resolver = self.session._get_internal_component( 'endpoint_resolver') - self.assertIs( - self.session.get_component('endpoint_resolver'), endpoint_resolver) + # get_component has been deprecated to the public + with pytest.warns(DeprecationWarning): + self.assertIs( + self.session.get_component('endpoint_resolver'), + endpoint_resolver + ) def test_internal_exceptions_factory_is_same_as_deprecated_public(self): exceptions_factory = self.session._get_internal_component( 'exceptions_factory') - self.assertIs( - self.session.get_component('exceptions_factory'), - exceptions_factory - ) + # get_component has been deprecated to the public + with pytest.warns(DeprecationWarning): + self.assertIs( + self.session.get_component('exceptions_factory'), + exceptions_factory + ) class TestClientMonitoring(BaseSessionTest): diff --git a/tests/unit/test_session_legacy.py b/tests/unit/test_session_legacy.py index 4c685d99..0c7d5f25 100644 --- a/tests/unit/test_session_legacy.py +++ b/tests/unit/test_session_legacy.py @@ -19,6 +19,8 @@ import logging import tempfile import shutil +import pytest + import botocore.session import botocore.exceptions from botocore.model import ServiceModel @@ -702,21 +704,29 @@ class TestSessionComponent(BaseSessionTest): self.assertIs( self.session._get_internal_component('internal'), component) with self.assertRaises(ValueError): - self.session.get_component('internal') + # get_component has been deprecated to the public + with pytest.warns(DeprecationWarning): + self.session.get_component('internal') def test_internal_endpoint_resolver_is_same_as_deprecated_public(self): endpoint_resolver = self.session._get_internal_component( 'endpoint_resolver') - self.assertIs( - self.session.get_component('endpoint_resolver'), endpoint_resolver) + # get_component has been deprecated to the public + with pytest.warns(DeprecationWarning): + self.assertIs( + self.session.get_component('endpoint_resolver'), + endpoint_resolver + ) def test_internal_exceptions_factory_is_same_as_deprecated_public(self): exceptions_factory = self.session._get_internal_component( 'exceptions_factory') - self.assertIs( - self.session.get_component('exceptions_factory'), - exceptions_factory - ) + # get_component has been deprecated to the public + with pytest.warns(DeprecationWarning): + self.assertIs( + self.session.get_component('exceptions_factory'), + exceptions_factory + ) class TestComponentLocator(unittest.TestCase):