New upstream version 1.4.1

This commit is contained in:
Eric Evans 2016-11-08 18:23:44 -06:00
parent 57f5c10fc5
commit 76cdadddd4
80 changed files with 7421 additions and 1496 deletions

View file

@ -9,4 +9,4 @@
"description": "Supports low-level clients for most services",
"type": "feature"
}
]
]

View file

@ -14,4 +14,4 @@
"description": "Update to Botocore 0.94.0",
"type": "feature"
}
]
]

View file

@ -19,4 +19,4 @@
"description": "Update to Botocore 0.97.0",
"type": "feature"
}
]
]

View file

@ -9,4 +9,4 @@
"description": "Update to Botocore 0.99.0",
"type": "feature"
}
]
]

View file

@ -4,4 +4,4 @@
"description": "Update to Botocore 0.100.0.",
"type": "feature"
}
]
]

View file

@ -14,4 +14,4 @@
"description": "Update to Botocore 0.102.0.",
"type": "feature"
}
]
]

View file

@ -14,4 +14,4 @@
"description": "Update to Botocore 0.103.0.",
"type": "feature"
}
]
]

View file

@ -9,4 +9,4 @@
"description": "Add resource model for Amazon DynamoDB.",
"type": "feature"
}
]
]

View file

@ -4,4 +4,4 @@
"description": "Update to Botocore 0.107.0.",
"type": "feature"
}
]
]

View file

@ -14,4 +14,4 @@
"description": "Add support for passing of ``botocore.client.Config`` object to instantiation of clients.",
"type": "feature"
}
]
]

View file

@ -9,4 +9,4 @@
"description": "Add batch writer. (`issue 118 <https://github.com/boto/boto3/pull/118>`__)",
"type": "feature"
}
]
]

View file

@ -19,4 +19,4 @@
"description": "Make requests with a customized HTTP user-agent",
"type": "feature"
}
]
]

View file

@ -4,4 +4,4 @@
"description": "Update resource model. (`issue 129 <https://github.com/boto/boto3/pull/129>`__)",
"type": "feature"
}
]
]

View file

@ -1,7 +1,7 @@
[
{
"category": "Installation",
"description": "Fix regression when installing via older versions of pip on python 2.6. (`issue 132 <https://github.com/boto/boto3/pull/132`__)",
"description": "Fix regression when installing via older versions of pip on python 2.6. (`issue 132 <https://github.com/boto/boto3/pull/132>`__)",
"type": "bugfix"
}
]
]

View file

@ -9,4 +9,4 @@
"description": "``s3.Bucket.load`` (`issue 128 <https://github.com/boto/boto3/pull/128>`__)",
"type": "bugfix"
}
]
]

View file

@ -4,4 +4,4 @@
"description": "Update to Botocore 0.76.0.",
"type": "feature"
}
]
]

View file

@ -19,4 +19,4 @@
"description": "Fix an issue accessing SQS message bodies (`issue 33 <https://github.com/boto/boto3/issues/33>`__)",
"type": "bugfix"
}
]
]

View file

@ -9,4 +9,4 @@
"description": "Update to Botocore 0.78.0",
"type": "feature"
}
]
]

View file

@ -24,4 +24,4 @@
"description": "Update to Botocore 0.80.0",
"type": "feature"
}
]
]

View file

@ -29,4 +29,4 @@
"description": "Update to Botocore 0.86.0",
"type": "feature"
}
]
]

View file

@ -19,4 +19,4 @@
"description": "Update to Botocore 0.87.0",
"type": "feature"
}
]
]

View file

@ -4,4 +4,4 @@
"description": "Update to Botocore 0.92.0",
"type": "feature"
}
]
]

View file

@ -1,7 +1,7 @@
[
{
"category": "``EC2.Vpc.filter``",
"description": "Fix issue with clobbering of ``Filtering`` paramter. (`issue 154 `https://github.com/boto/boto3/pull/154`__)",
"description": "Fix issue with clobbering of ``Filtering`` paramter. (`issue 154 <https://github.com/boto/boto3/pull/154>`__)",
"type": "bugfix"
}
]
]

View file

@ -4,4 +4,4 @@
"description": "Fix issue when creating multiple tags. (`issue 160 <https://github.com/boto/boto3/pull/160>`__)",
"type": "bugfix"
}
]
]

View file

@ -1,7 +1,7 @@
[
{
"category": "``session.Session``",
"description": "Add ``events`` property to access session's event emitter. (`issue 204 <https://github.com/boto/boto3/pull/204`__)",
"description": "Add ``events`` property to access session's event emitter. (`issue 204 <https://github.com/boto/boto3/pull/204>`__)",
"type": "feature"
},
{
@ -14,4 +14,4 @@
"description": "Fix misspelling of error class to ``DynamoDBOperationNotSupportedError``. (`issue 218 <https://github.com/boto/boto3/pull/218>`__)",
"type": "bugfix"
}
]
]

View file

@ -9,4 +9,4 @@
"description": "Add support for customizable timeouts.",
"type": "feature"
}
]
]

View file

@ -9,4 +9,4 @@
"description": "Both S3 Bucket and Object obtain upload_file() and download_file() (`issue 243 <https://github.com/boto/boto3/pull/243>`__)",
"type": "feature"
}
]
]

View file

@ -19,4 +19,4 @@
"description": "Fix model issue where creating key pair does not have a ``key_material`` on ``KeyPair`` resource. (`issue 290 <https://github.com/boto/boto3/pull/290>`__)",
"type": "bugfix"
}
]
]

View file

@ -4,4 +4,4 @@
"description": "Fix issue in formatting that broke PyPI distributable",
"type": "bugfix"
}
]
]

View file

@ -14,4 +14,4 @@
"description": "Add docstrings for resource collections and waiters (`issue 267 <https://github.com/boto/boto3/pull/267>`__, `issue 261 <https://github.com/boto/boto3/pull/261>`__)",
"type": "feature"
}
]
]

View file

@ -24,4 +24,4 @@
"description": "Progress callback will be triggered when rewinding stream. (`issue 395 <https://github.com/boto/boto3/pull/395>`__)",
"type": "bugfix"
}
]
]

View file

@ -9,4 +9,4 @@
"description": "Fix issue with hanging downloads. (`issue 471 <https://github.com/boto/boto3/pull/471>`__)",
"type": "bugfix"
}
]
]

View file

@ -4,4 +4,4 @@
"description": "Forward ``extra_args`` when using multipart downloads. (`issue 503 <https://github.com/boto/boto3/pull/503>`__)",
"type": "bugfix"
}
]
]

View file

@ -4,4 +4,4 @@
"description": "Update resource model to include ``Route`` resources. (`issue 532 <https://github.com/boto/boto3/pull/532>`__)",
"type": "feature"
}
]
]

View file

@ -19,4 +19,4 @@
"description": "Add get_available_subresources to Resources (`#113 <https://github.com/boto/boto3/issues/113>`__)",
"type": "feature"
}
]
]

37
.changes/1.4.0.json Normal file
View file

@ -0,0 +1,37 @@
[
{
"category": "DynamoDB",
"description": "Add request auto de-duplication based on specified primary keys for batch_writer. (`#605 <https://github.com/boto/boto3/issues/605>`__)",
"type": "feature"
},
{
"category": "s3",
"description": "Add managed file-like object uploads to S3 client, Bucket, and Object.",
"type": "feature"
},
{
"category": "Session",
"description": "Fixed Session.__repr__ region argument name.",
"type": "bugfix"
},
{
"category": "s3",
"description": "Add managed copies to S3 client, Bucket, and Object.",
"type": "feature"
},
{
"category": "s3",
"description": "Add managed downloads to file-like objects in the S3 client, Bucket, and Object.",
"type": "feature"
},
{
"category": "s3",
"description": "Port ``s3.transfer`` module to use ``s3transfer`` package. Please refer to `Upgrading Notes <https://boto3.readthedocs.io/en/latest/guide/upgrading.html>`_ when upgrading. In porting the logic over, various performance issues and bugs were fixed.",
"type": "bugfix"
},
{
"category": "s3",
"description": "Add ``io_chunksize`` parameter to ``TransferConfig``",
"type": "feature"
}
]

17
.changes/1.4.1.json Normal file
View file

@ -0,0 +1,17 @@
[
{
"category": "Session",
"description": "Expose available_profiles property for Session (``#704 <https://github.com/boto/boto3/issues/704>`__)",
"type": "feature"
},
{
"category": "s3",
"description": "Fix issue when transfers would not exit quickly from signals",
"type": "bugfix"
},
{
"category": "``sqs.Queue``",
"description": "Fix issue in DeadLetterSourceQueues collection",
"type": "bugfix"
}
]

View file

@ -2,6 +2,26 @@
CHANGELOG
=========
1.4.1
=====
* feature:Session: Expose available_profiles property for Session (``#704 <https://github.com/boto/boto3/issues/704>`__)
* bugfix:s3: Fix issue when transfers would not exit quickly from signals
* bugfix:``sqs.Queue``: Fix issue in DeadLetterSourceQueues collection
1.4.0
=====
* feature:DynamoDB: Add request auto de-duplication based on specified primary keys for batch_writer. (`#605 <https://github.com/boto/boto3/issues/605>`__)
* feature:s3: Add managed file-like object uploads to S3 client, Bucket, and Object.
* bugfix:Session: Fixed Session.__repr__ region argument name.
* feature:s3: Add managed copies to S3 client, Bucket, and Object.
* feature:s3: Add managed downloads to file-like objects in the S3 client, Bucket, and Object.
* bugfix:s3: Port ``s3.transfer`` module to use ``s3transfer`` package. Please refer to `Upgrading Notes <https://boto3.readthedocs.io/en/latest/guide/upgrading.html>`_ when upgrading. In porting the logic over, various performance issues and bugs were fixed.
* feature:s3: Add ``io_chunksize`` parameter to ``TransferConfig``
1.3.1
=====
@ -80,7 +100,7 @@ CHANGELOG
1.1.2
=====
* feature:``session.Session``: Add ``events`` property to access session's event emitter. (`issue 204 <https://github.com/boto/boto3/pull/204`__)
* feature:``session.Session``: Add ``events`` property to access session's event emitter. (`issue 204 <https://github.com/boto/boto3/pull/204>`__)
* bugfix:``Glacier.Account``: Fix issue with resource model. (`issue 196 <https://github.com/boto/boto3/pull/196>`__)
* bugfix:``DynamoDB``: Fix misspelling of error class to ``DynamoDBOperationNotSupportedError``. (`issue 218 <https://github.com/boto/boto3/pull/218>`__)
@ -94,7 +114,7 @@ CHANGELOG
1.1.0
=====
* bugfix:``EC2.Vpc.filter``: Fix issue with clobbering of ``Filtering`` paramter. (`issue 154 `https://github.com/boto/boto3/pull/154`__)
* bugfix:``EC2.Vpc.filter``: Fix issue with clobbering of ``Filtering`` paramter. (`issue 154 <https://github.com/boto/boto3/pull/154>`__)
0.0.22
@ -107,7 +127,7 @@ CHANGELOG
0.0.21
======
* bugfix:Installation: Fix regression when installing via older versions of pip on python 2.6. (`issue 132 <https://github.com/boto/boto3/pull/132`__)
* bugfix:Installation: Fix regression when installing via older versions of pip on python 2.6. (`issue 132 <https://github.com/boto/boto3/pull/132>`__)
0.0.20

View file

@ -1,5 +1,5 @@
For more information, please see the official docs at
http://boto3.readthedocs.org/
https://boto3.readthedocs.io/
Contributing Code
-----------------

View file

@ -8,16 +8,18 @@ Boto3 is the Amazon Web Services (AWS) Software Development Kit (SDK) for
Python, which allows Python developers to write software that makes use
of services like Amazon S3 and Amazon EC2. You can find the latest, most
up to date, documentation at `Read the Docs`_, including a list of
services that are supported.
services that are supported. To see only those features which have been
released, check out the `stable docs`_.
.. _boto: https://docs.pythonboto.org/
.. _`Read the Docs`: https://boto3.readthedocs.org/en/latest/
.. _`stable docs`: https://boto3.readthedocs.io/en/stable/
.. _`Read the Docs`: https://boto3.readthedocs.io/en/latest/
.. |Build Status| image:: http://img.shields.io/travis/boto/boto3/develop.svg?style=flat
:target: https://travis-ci.org/boto/boto3
:alt: Build Status
.. |Docs| image:: https://readthedocs.org/projects/boto3/badge/?version=latest&style=flat
:target: https://boto3.readthedocs.org/en/latest/
:target: https://boto3.readthedocs.io/en/latest/
:alt: Read the docs
.. |Downloads| image:: http://img.shields.io/pypi/dm/boto3.svg?style=flat
:target: https://pypi.python.org/pypi/boto3/

View file

@ -17,12 +17,13 @@ from boto3.session import Session
__author__ = 'Amazon Web Services'
__version__ = '1.3.1'
__version__ = '1.4.1'
# The default Boto3 session; autoloaded when needed.
DEFAULT_SESSION = None
def setup_default_session(**kwargs):
"""
Set up a default session, passing through any parameters to the session
@ -32,6 +33,7 @@ def setup_default_session(**kwargs):
global DEFAULT_SESSION
DEFAULT_SESSION = Session(**kwargs)
def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None):
"""
Add a stream handler for the given name and level to the logging module.
@ -58,6 +60,7 @@ def set_stream_logger(name='boto3', level=logging.DEBUG, format_string=None):
handler.setFormatter(formatter)
logger.addHandler(handler)
def _get_default_session():
"""
Get the default session, creating one if needed.
@ -70,6 +73,7 @@ def _get_default_session():
return DEFAULT_SESSION
def client(*args, **kwargs):
"""
Create a low-level service client by name using the default session.
@ -78,6 +82,7 @@ def client(*args, **kwargs):
"""
return _get_default_session().client(*args, **kwargs)
def resource(*args, **kwargs):
"""
Create a resource service client by name using the default session.
@ -86,6 +91,7 @@ def resource(*args, **kwargs):
"""
return _get_default_session().resource(*args, **kwargs)
# Set up logging to ``/dev/null`` like a library is supposed to.
# http://docs.python.org/3.3/howto/logging.html#configuring-logging-for-a-library
class NullHandler(logging.Handler):

View file

@ -36,7 +36,8 @@
"type": "Stack",
"identifiers": [
{ "target": "Name", "source": "response", "path": "Stacks[].StackName" }
]
],
"path": "Stacks[]"
}
}
}

View file

@ -0,0 +1,2567 @@
{
"service": {
"actions": {
"CreateDhcpOptions": {
"request": { "operation": "CreateDhcpOptions" },
"resource": {
"type": "DhcpOptions",
"identifiers": [
{ "target": "Id", "source": "response", "path": "DhcpOptions.DhcpOptionsId" }
],
"path": "DhcpOptions"
}
},
"CreateInstances": {
"request": { "operation": "RunInstances" },
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Instances[].InstanceId" }
],
"path": "Instances[]"
}
},
"CreateInternetGateway": {
"request": { "operation": "CreateInternetGateway" },
"resource": {
"type": "InternetGateway",
"identifiers": [
{ "target": "Id", "source": "response", "path": "InternetGateway.InternetGatewayId" }
],
"path": "InternetGateway"
}
},
"CreateKeyPair": {
"request": { "operation": "CreateKeyPair" },
"resource": {
"type": "KeyPair",
"identifiers": [
{ "target": "Name", "source": "response", "path": "KeyName" }
],
"path": "@"
}
},
"CreateNetworkAcl": {
"request": { "operation": "CreateNetworkAcl" },
"resource": {
"type": "NetworkAcl",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkAcl.NetworkAclId" }
],
"path": "NetworkAcl"
}
},
"CreateNetworkInterface": {
"request": { "operation": "CreateNetworkInterface" },
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkInterface.NetworkInterfaceId" }
],
"path": "NetworkInterface"
}
},
"CreatePlacementGroup": {
"request": { "operation": "CreatePlacementGroup" },
"resource": {
"type": "PlacementGroup",
"identifiers": [
{ "target": "Name", "source": "requestParameter", "path": "GroupName" }
]
}
},
"CreateRouteTable": {
"request": { "operation": "CreateRouteTable" },
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "response", "path": "RouteTable.RouteTableId" }
],
"path": "RouteTable"
}
},
"CreateSecurityGroup": {
"request": { "operation": "CreateSecurityGroup" },
"resource": {
"type": "SecurityGroup",
"identifiers": [
{ "target": "Id", "source": "response", "path": "GroupId" }
]
}
},
"CreateSnapshot": {
"request": { "operation": "CreateSnapshot" },
"resource": {
"type": "Snapshot",
"identifiers": [
{ "target": "Id", "source": "response", "path": "SnapshotId" }
],
"path": "@"
}
},
"CreateSubnet": {
"request": { "operation": "CreateSubnet" },
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Subnet.SubnetId" }
],
"path": "Subnet"
}
},
"CreateTags": {
"request": { "operation": "CreateTags" }
},
"CreateVolume": {
"request": { "operation": "CreateVolume" },
"resource": {
"type": "Volume",
"identifiers": [
{ "target": "Id", "source": "response", "path": "VolumeId" }
],
"path": "@"
}
},
"CreateVpc": {
"request": { "operation": "CreateVpc" },
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Vpc.VpcId" }
],
"path": "Vpc"
}
},
"CreateVpcPeeringConnection": {
"request": { "operation": "CreateVpcPeeringConnection" },
"resource": {
"type": "VpcPeeringConnection",
"identifiers": [
{ "target": "Id", "source": "response", "path": "VpcPeeringConnection.VpcPeeringConnectionId" }
],
"path": "VpcPeeringConnection"
}
},
"DisassociateRouteTable": {
"request": { "operation": "DisassociateRouteTable" }
},
"ImportKeyPair": {
"request": { "operation": "ImportKeyPair" },
"resource": {
"type": "KeyPairInfo",
"identifiers": [
{ "target": "Name", "source": "response", "path": "KeyName" }
]
}
},
"RegisterImage": {
"request": { "operation": "RegisterImage" },
"resource": {
"type": "Image",
"identifiers": [
{ "target": "Id", "source": "response", "path": "ImageId" }
]
}
}
},
"has": {
"DhcpOptions": {
"resource": {
"type": "DhcpOptions",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"Image": {
"resource": {
"type": "Image",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"Instance": {
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"InternetGateway": {
"resource": {
"type": "InternetGateway",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"KeyPair": {
"resource": {
"type": "KeyPairInfo",
"identifiers": [
{ "target": "Name", "source": "input" }
]
}
},
"NetworkAcl": {
"resource": {
"type": "NetworkAcl",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"NetworkInterface": {
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"PlacementGroup": {
"resource": {
"type": "PlacementGroup",
"identifiers": [
{ "target": "Name", "source": "input" }
]
}
},
"RouteTable": {
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"RouteTableAssociation": {
"resource": {
"type": "RouteTableAssociation",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"SecurityGroup": {
"resource": {
"type": "SecurityGroup",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"Snapshot": {
"resource": {
"type": "Snapshot",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"Subnet": {
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"Volume": {
"resource": {
"type": "Volume",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"Vpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"VpcPeeringConnection": {
"resource": {
"type": "VpcPeeringConnection",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
}
},
"hasMany": {
"ClassicAddresses": {
"request": {
"operation": "DescribeAddresses",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "domain" },
{ "target": "Filters[0].Values[0]", "source": "string", "value": "standard" }
]
},
"resource": {
"type": "ClassicAddress",
"identifiers": [
{ "target": "PublicIp", "source": "response", "path": "Addresses[].PublicIp" }
],
"path": "Addresses[]"
}
},
"DhcpOptionsSets": {
"request": { "operation": "DescribeDhcpOptions" },
"resource": {
"type": "DhcpOptions",
"identifiers": [
{ "target": "Id", "source": "response", "path": "DhcpOptions[].DhcpOptionsId" }
],
"path": "DhcpOptions[]"
}
},
"Images": {
"request": { "operation": "DescribeImages" },
"resource": {
"type": "Image",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Images[].ImageId" }
],
"path": "Images[]"
}
},
"Instances": {
"request": { "operation": "DescribeInstances" },
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Reservations[].Instances[].InstanceId" }
],
"path": "Reservations[].Instances[]"
}
},
"InternetGateways": {
"request": { "operation": "DescribeInternetGateways" },
"resource": {
"type": "InternetGateway",
"identifiers": [
{ "target": "Id", "source": "response", "path": "InternetGateways[].InternetGatewayId" }
],
"path": "InternetGateways[]"
}
},
"KeyPairs": {
"request": { "operation": "DescribeKeyPairs" },
"resource": {
"type": "KeyPairInfo",
"identifiers": [
{ "target": "Name", "source": "response", "path": "KeyPairs[].KeyName" }
],
"path": "KeyPairs[]"
}
},
"NetworkAcls": {
"request": { "operation": "DescribeNetworkAcls" },
"resource": {
"type": "NetworkAcl",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkAcls[].NetworkAclId" }
],
"path": "NetworkAcls[]"
}
},
"NetworkInterfaces": {
"request": { "operation": "DescribeNetworkInterfaces" },
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkInterfaces[].NetworkInterfaceId" }
],
"path": "NetworkInterfaces[]"
}
},
"PlacementGroups": {
"request": { "operation": "DescribePlacementGroups" },
"resource": {
"type": "PlacementGroup",
"identifiers": [
{ "target": "Name", "source": "response", "path": "PlacementGroups[].GroupName" }
],
"path": "PlacementGroups[]"
}
},
"RouteTables": {
"request": { "operation": "DescribeRouteTables" },
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "response", "path": "RouteTables[].RouteTableId" }
],
"path": "RouteTables[]"
}
},
"SecurityGroups": {
"request": { "operation": "DescribeSecurityGroups" },
"resource": {
"type": "SecurityGroup",
"identifiers": [
{ "target": "Id", "source": "response", "path": "SecurityGroups[].GroupId" }
],
"path": "SecurityGroups[]"
}
},
"Snapshots": {
"request": { "operation": "DescribeSnapshots" },
"resource": {
"type": "Snapshot",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Snapshots[].SnapshotId" }
],
"path": "Snapshots[]"
}
},
"Subnets": {
"request": { "operation": "DescribeSubnets" },
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Subnets[].SubnetId" }
],
"path": "Subnets[]"
}
},
"Volumes": {
"request": { "operation": "DescribeVolumes" },
"resource": {
"type": "Volume",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Volumes[].VolumeId" }
],
"path": "Volumes[]"
}
},
"VpcAddresses": {
"request": {
"operation": "DescribeAddresses",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "domain" },
{ "target": "Filters[0].Values[0]", "source": "string", "value": "vpc" }
]
},
"resource": {
"type": "VpcAddress",
"identifiers": [
{ "target": "AllocationId", "source": "response", "path": "Addresses[].AllocationId" }
],
"path": "Addresses[]"
}
},
"VpcPeeringConnections": {
"request": { "operation": "DescribeVpcPeeringConnections" },
"resource": {
"type": "VpcPeeringConnection",
"identifiers": [
{ "target": "Id", "source": "response", "path": "VpcPeeringConnections[].VpcPeeringConnectionId" }
],
"path": "VpcPeeringConnections[]"
}
},
"Vpcs": {
"request": { "operation": "DescribeVpcs" },
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Vpcs[].VpcId" }
],
"path": "Vpcs[]"
}
}
}
},
"resources": {
"ClassicAddress": {
"identifiers": [
{
"name": "PublicIp"
}
],
"shape": "Address",
"load": {
"request": {
"operation": "DescribeAddresses",
"params": [
{ "target": "PublicIps[]", "source": "identifier", "name": "PublicIp" }
]
},
"path": "Addresses[0]"
},
"actions": {
"Associate": {
"request": {
"operation": "AssociateAddress",
"params": [
{ "target": "PublicIp", "source": "identifier", "name": "PublicIp" }
]
}
},
"Disassociate": {
"request": {
"operation": "DisassociateAddress",
"params": [
{ "target": "PublicIp", "source": "data", "path": "PublicIp" }
]
}
},
"Release": {
"request": {
"operation": "ReleaseAddress",
"params": [
{ "target": "PublicIp", "source": "data", "path": "PublicIp" }
]
}
}
}
},
"DhcpOptions": {
"identifiers": [
{
"name": "Id",
"memberName": "DhcpOptionsId"
}
],
"shape": "DhcpOptions",
"load": {
"request": {
"operation": "DescribeDhcpOptions",
"params": [
{ "target": "DhcpOptionsIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "DhcpOptions[0]"
},
"actions": {
"AssociateWithVpc": {
"request": {
"operation": "AssociateDhcpOptions",
"params": [
{ "target": "DhcpOptionsId", "source": "identifier", "name": "Id" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteDhcpOptions",
"params": [
{ "target": "DhcpOptionsId", "source": "identifier", "name": "Id" }
]
}
}
}
},
"Image": {
"identifiers": [
{
"name": "Id",
"memberName": "ImageId"
}
],
"shape": "Image",
"load": {
"request": {
"operation": "DescribeImages",
"params": [
{ "target": "ImageIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "Images[0]"
},
"actions": {
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Deregister": {
"request": {
"operation": "DeregisterImage",
"params": [
{ "target": "ImageId", "source": "identifier", "name": "Id" }
]
}
},
"DescribeAttribute": {
"request": {
"operation": "DescribeImageAttribute",
"params": [
{ "target": "ImageId", "source": "identifier", "name": "Id" }
]
}
},
"ModifyAttribute": {
"request": {
"operation": "ModifyImageAttribute",
"params": [
{ "target": "ImageId", "source": "identifier", "name": "Id" }
]
}
},
"ResetAttribute": {
"request": {
"operation": "ResetImageAttribute",
"params": [
{ "target": "ImageId", "source": "identifier", "name": "Id" }
]
}
}
}
},
"Instance": {
"identifiers": [
{
"name": "Id",
"memberName": "InstanceId"
}
],
"shape": "Instance",
"load": {
"request": {
"operation": "DescribeInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "Reservations[0].Instances[0]"
},
"actions": {
"AttachClassicLinkVpc": {
"request": {
"operation": "AttachClassicLinkVpc",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"AttachVolume": {
"request": {
"operation": "AttachVolume",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"ConsoleOutput": {
"request": {
"operation": "GetConsoleOutput",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"CreateImage": {
"request": {
"operation": "CreateImage",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Image",
"identifiers": [
{ "target": "Id", "source": "response", "path": "ImageId" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"DescribeAttribute": {
"request": {
"operation": "DescribeInstanceAttribute",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"DetachClassicLinkVpc": {
"request": {
"operation": "DetachClassicLinkVpc",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"DetachVolume": {
"request": {
"operation": "DetachVolume",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"ModifyAttribute": {
"request": {
"operation": "ModifyInstanceAttribute",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"Monitor": {
"request": {
"operation": "MonitorInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
}
},
"PasswordData": {
"request": {
"operation": "GetPasswordData",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"Reboot": {
"request": {
"operation": "RebootInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
}
},
"ReportStatus": {
"request": {
"operation": "ReportInstanceStatus",
"params": [
{ "target": "Instances[0]", "source": "identifier", "name": "Id" }
]
}
},
"ResetAttribute": {
"request": {
"operation": "ResetInstanceAttribute",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"ResetKernel": {
"request": {
"operation": "ResetInstanceAttribute",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" },
{ "target": "Attribute", "source": "string", "value": "kernel" }
]
}
},
"ResetRamdisk": {
"request": {
"operation": "ResetInstanceAttribute",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" },
{ "target": "Attribute", "source": "string", "value": "ramdisk" }
]
}
},
"ResetSourceDestCheck": {
"request": {
"operation": "ResetInstanceAttribute",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" },
{ "target": "Attribute", "source": "string", "value": "sourceDestCheck" }
]
}
},
"Start": {
"request": {
"operation": "StartInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
}
},
"Stop": {
"request": {
"operation": "StopInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
}
},
"Terminate": {
"request": {
"operation": "TerminateInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
}
},
"Unmonitor": {
"request": {
"operation": "UnmonitorInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
}
}
},
"batchActions": {
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[]", "source": "identifier", "name": "Id" }
]
}
},
"Monitor": {
"request": {
"operation": "MonitorInstances",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
]
}
},
"Reboot": {
"request": {
"operation": "RebootInstances",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
]
}
},
"Start": {
"request": {
"operation": "StartInstances",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
]
}
},
"Stop": {
"request": {
"operation": "StopInstances",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
]
}
},
"Terminate": {
"request": {
"operation": "TerminateInstances",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
]
}
},
"Unmonitor": {
"request": {
"operation": "UnmonitorInstances",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
]
}
}
},
"waiters": {
"Exists": {
"waiterName": "InstanceExists",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
],
"path": "Reservations[0].Instances[0]"
},
"Running": {
"waiterName": "InstanceRunning",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
],
"path": "Reservations[0].Instances[0]"
},
"Stopped": {
"waiterName": "InstanceStopped",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
],
"path": "Reservations[0].Instances[0]"
},
"Terminated": {
"waiterName": "InstanceTerminated",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
],
"path": "Reservations[0].Instances[0]"
}
},
"has": {
"ClassicAddress": {
"resource": {
"type": "ClassicAddress",
"identifiers": [
{ "target": "PublicIp", "source": "data", "path": "PublicIpAddress" }
]
}
},
"Image": {
"resource": {
"type": "Image",
"identifiers": [
{ "target": "Id", "source": "data", "path": "ImageId" }
]
}
},
"KeyPair": {
"resource": {
"type": "KeyPairInfo",
"identifiers": [
{ "target": "Name", "source": "data", "path": "KeyName" }
]
}
},
"NetworkInterfaces": {
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "data", "path": "NetworkInterfaces[].NetworkInterfaceId" }
],
"path": "NetworkInterfaces[]"
}
},
"PlacementGroup": {
"resource": {
"type": "PlacementGroup",
"identifiers": [
{ "target": "Name", "source": "data", "path": "Placement.GroupName" }
]
}
},
"Subnet": {
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "data", "path": "SubnetId" }
]
}
},
"Vpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "VpcId" }
]
}
}
},
"hasMany": {
"Volumes": {
"request": {
"operation": "DescribeVolumes",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "attachment.instance-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Volume",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Volumes[].VolumeId" }
],
"path": "Volumes[]"
}
},
"VpcAddresses": {
"request": {
"operation": "DescribeAddresses",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "instance-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "VpcAddress",
"identifiers": [
{ "target": "AllocationId", "source": "response", "path": "Addresses[].AllocationId" }
],
"path": "Addresses[]"
}
}
}
},
"InternetGateway": {
"identifiers": [
{
"name": "Id",
"memberName": "InternetGatewayId"
}
],
"shape": "InternetGateway",
"load": {
"request": {
"operation": "DescribeInternetGateways",
"params": [
{ "target": "InternetGatewayIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "InternetGateways[0]"
},
"actions": {
"AttachToVpc": {
"request": {
"operation": "AttachInternetGateway",
"params": [
{ "target": "InternetGatewayId", "source": "identifier", "name": "Id" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteInternetGateway",
"params": [
{ "target": "InternetGatewayId", "source": "identifier", "name": "Id" }
]
}
},
"DetachFromVpc": {
"request": {
"operation": "DetachInternetGateway",
"params": [
{ "target": "InternetGatewayId", "source": "identifier", "name": "Id" }
]
}
}
}
},
"KeyPair": {
"identifiers": [
{
"name": "Name",
"memberName": "KeyName"
}
],
"shape": "KeyPair",
"actions": {
"Delete": {
"request": {
"operation": "DeleteKeyPair",
"params": [
{ "target": "KeyName", "source": "identifier", "name": "Name" }
]
}
}
}
},
"KeyPairInfo": {
"identifiers": [
{
"name": "Name",
"memberName": "KeyName"
}
],
"shape": "KeyPairInfo",
"load": {
"request": {
"operation": "DescribeKeyPairs",
"params": [
{ "target": "KeyNames[0]", "source": "identifier", "name": "Name" }
]
},
"path": "KeyPairs[0]"
},
"actions": {
"Delete": {
"request": {
"operation": "DeleteKeyPair",
"params": [
{ "target": "KeyName", "source": "identifier", "name": "Name" }
]
}
}
}
},
"NetworkAcl": {
"identifiers": [
{
"name": "Id",
"memberName": "NetworkAclId"
}
],
"shape": "NetworkAcl",
"load": {
"request": {
"operation": "DescribeNetworkAcls",
"params": [
{ "target": "NetworkAclIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "NetworkAcls[0]"
},
"actions": {
"CreateEntry": {
"request": {
"operation": "CreateNetworkAclEntry",
"params": [
{ "target": "NetworkAclId", "source": "identifier", "name": "Id" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteNetworkAcl",
"params": [
{ "target": "NetworkAclId", "source": "identifier", "name": "Id" }
]
}
},
"DeleteEntry": {
"request": {
"operation": "DeleteNetworkAclEntry",
"params": [
{ "target": "NetworkAclId", "source": "identifier", "name": "Id" }
]
}
},
"ReplaceAssociation": {
"request": {
"operation": "ReplaceNetworkAclAssociation",
"params": [
{ "target": "NetworkAclId", "source": "identifier", "name": "Id" }
]
}
},
"ReplaceEntry": {
"request": {
"operation": "ReplaceNetworkAclEntry",
"params": [
{ "target": "NetworkAclId", "source": "identifier", "name": "Id" }
]
}
}
},
"has": {
"Vpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "VpcId" }
]
}
}
}
},
"NetworkInterface": {
"identifiers": [
{
"name": "Id",
"memberName": "NetworkInterfaceId"
}
],
"shape": "NetworkInterface",
"load": {
"request": {
"operation": "DescribeNetworkInterfaces",
"params": [
{ "target": "NetworkInterfaceIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "NetworkInterfaces[0]"
},
"actions": {
"AssignPrivateIpAddresses": {
"request": {
"operation": "AssignPrivateIpAddresses",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
},
"Attach": {
"request": {
"operation": "AttachNetworkInterface",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteNetworkInterface",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
},
"DescribeAttribute": {
"request": {
"operation": "DescribeNetworkInterfaceAttribute",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
},
"Detach": {
"request": {
"operation": "DetachNetworkInterface",
"params": [
{ "target": "AttachmentId", "source": "data", "path": "Attachment.AttachmentId" }
]
}
},
"ModifyAttribute": {
"request": {
"operation": "ModifyNetworkInterfaceAttribute",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
},
"ResetAttribute": {
"request": {
"operation": "ResetNetworkInterfaceAttribute",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
},
"UnassignPrivateIpAddresses": {
"request": {
"operation": "UnassignPrivateIpAddresses",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
}
},
"has": {
"Association": {
"resource": {
"type": "NetworkInterfaceAssociation",
"identifiers": [
{ "target": "Id", "source": "data", "path": "Association.AssociationId" }
],
"path": "Association"
}
},
"Subnet": {
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "data", "path": "SubnetId" }
]
}
},
"Vpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "VpcId" }
]
}
}
}
},
"NetworkInterfaceAssociation": {
"identifiers": [
{
"name": "Id"
}
],
"shape": "InstanceNetworkInterfaceAssociation",
"load": {
"request": {
"operation": "DescribeNetworkInterfaces",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "association.association-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"path": "NetworkInterfaces[0].Association"
},
"actions": {
"Delete": {
"request": {
"operation": "DisassociateAddress",
"params": [
{ "target": "AssociationId", "source": "identifier", "name": "Id" }
]
}
}
},
"has": {
"Address": {
"resource": {
"type": "VpcAddress",
"identifiers": [
{ "target": "AllocationId", "source": "data", "path": "AllocationId" }
]
}
}
}
},
"PlacementGroup": {
"identifiers": [
{
"name": "Name",
"memberName": "GroupName"
}
],
"shape": "PlacementGroup",
"load": {
"request": {
"operation": "DescribePlacementGroups",
"params": [
{ "target": "GroupNames[0]", "source": "identifier", "name": "Name" }
]
},
"path": "PlacementGroups[0]"
},
"actions": {
"Delete": {
"request": {
"operation": "DeletePlacementGroup",
"params": [
{ "target": "GroupName", "source": "identifier", "name": "Name" }
]
}
}
},
"hasMany": {
"Instances": {
"request": {
"operation": "DescribeInstances",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "placement-group-name" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Name" }
]
},
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Reservations[].Instances[].InstanceId" }
],
"path": "Reservations[].Instances[]"
}
}
}
},
"Route": {
"identifiers": [
{ "name": "RouteTableId" },
{
"name": "DestinationCidrBlock",
"memberName": "DestinationCidrBlock"
}
],
"shape": "Route",
"actions": {
"Delete": {
"request": {
"operation": "DeleteRoute",
"params": [
{ "target": "RouteTableId", "source": "identifier", "name": "RouteTableId" },
{ "target": "DestinationCidrBlock", "source": "identifier", "name": "DestinationCidrBlock" }
]
}
},
"Replace": {
"request": {
"operation": "ReplaceRoute",
"params": [
{ "target": "RouteTableId", "source": "identifier", "name": "RouteTableId" },
{ "target": "DestinationCidrBlock", "source": "identifier", "name": "DestinationCidrBlock" }
]
}
}
},
"has": {
"RouteTable": {
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "identifier", "name": "RouteTableId" }
]
}
}
}
},
"RouteTable": {
"identifiers": [
{
"name": "Id",
"memberName": "RouteTableId"
}
],
"shape": "RouteTable",
"load": {
"request": {
"operation": "DescribeRouteTables",
"params": [
{ "target": "RouteTableIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "RouteTables[0]"
},
"actions": {
"AssociateWithSubnet": {
"request": {
"operation": "AssociateRouteTable",
"params": [
{ "target": "RouteTableId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "RouteTableAssociation",
"identifiers": [
{ "target": "Id", "source": "response", "path": "AssociationId" }
]
}
},
"CreateRoute": {
"request": {
"operation": "CreateRoute",
"params": [
{ "target": "RouteTableId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Route",
"identifiers": [
{ "target": "RouteTableId", "source": "identifier", "name": "Id" },
{ "target": "DestinationCidrBlock", "source": "requestParameter", "path": "DestinationCidrBlock" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteRouteTable",
"params": [
{ "target": "RouteTableId", "source": "identifier", "name": "Id" }
]
}
}
},
"has": {
"Routes": {
"resource": {
"type": "Route",
"identifiers": [
{ "target": "RouteTableId", "source": "identifier", "name": "Id" },
{ "target": "DestinationCidrBlock", "source": "data", "path": "Routes[].DestinationCidrBlock" }
],
"path": "Routes[]"
}
},
"Vpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "VpcId" }
]
}
}
},
"hasMany": {
"Associations": {
"request": {
"operation": "DescribeRouteTables",
"params": [
{ "target": "RouteTableIds[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "RouteTableAssociation",
"identifiers": [
{ "target": "Id", "source": "response", "path": "RouteTables[0].Associations[].RouteTableAssociationId" }
],
"path": "RouteTables[0].Associations[]"
}
}
}
},
"RouteTableAssociation": {
"identifiers": [
{
"name": "Id",
"memberName": "RouteTableAssociationId"
}
],
"shape": "RouteTableAssociation",
"actions": {
"Delete": {
"request": {
"operation": "DisassociateRouteTable",
"params": [
{ "target": "AssociationId", "source": "identifier", "name": "Id" }
]
}
},
"ReplaceSubnet": {
"request": {
"operation": "ReplaceRouteTableAssociation",
"params": [
{ "target": "AssociationId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "RouteTableAssociation",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NewAssociationId" }
]
}
}
},
"has": {
"RouteTable": {
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "data", "path": "RouteTableId" }
]
}
},
"Subnet": {
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "data", "path": "SubnetId" }
]
}
}
}
},
"SecurityGroup": {
"identifiers": [
{
"name": "Id",
"memberName": "GroupId"
}
],
"shape": "SecurityGroup",
"load": {
"request": {
"operation": "DescribeSecurityGroups",
"params": [
{ "target": "GroupIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "SecurityGroups[0]"
},
"actions": {
"AuthorizeEgress": {
"request": {
"operation": "AuthorizeSecurityGroupEgress",
"params": [
{ "target": "GroupId", "source": "identifier", "name": "Id" }
]
}
},
"AuthorizeIngress": {
"request": {
"operation": "AuthorizeSecurityGroupIngress",
"params": [
{ "target": "GroupId", "source": "identifier", "name": "Id" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteSecurityGroup",
"params": [
{ "target": "GroupId", "source": "identifier", "name": "Id" }
]
}
},
"RevokeEgress": {
"request": {
"operation": "RevokeSecurityGroupEgress",
"params": [
{ "target": "GroupId", "source": "identifier", "name": "Id" }
]
}
},
"RevokeIngress": {
"request": {
"operation": "RevokeSecurityGroupIngress",
"params": [
{ "target": "GroupId", "source": "identifier", "name": "Id" }
]
}
}
}
},
"Snapshot": {
"identifiers": [
{
"name": "Id",
"memberName": "SnapshotId"
}
],
"shape": "Snapshot",
"load": {
"request": {
"operation": "DescribeSnapshots",
"params": [
{ "target": "SnapshotIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "Snapshots[0]"
},
"actions": {
"Copy": {
"request": {
"operation": "CopySnapshot",
"params": [
{ "target": "SourceSnapshotId", "source": "identifier", "name": "Id" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteSnapshot",
"params": [
{ "target": "SnapshotId", "source": "identifier", "name": "Id" }
]
}
},
"DescribeAttribute": {
"request": {
"operation": "DescribeSnapshotAttribute",
"params": [
{ "target": "SnapshotId", "source": "identifier", "name": "Id" }
]
}
},
"ModifyAttribute": {
"request": {
"operation": "ModifySnapshotAttribute",
"params": [
{ "target": "SnapshotId", "source": "identifier", "name": "Id" }
]
}
},
"ResetAttribute": {
"request": {
"operation": "ResetSnapshotAttribute",
"params": [
{ "target": "SnapshotId", "source": "identifier", "name": "Id" }
]
}
}
},
"waiters": {
"Completed": {
"waiterName": "SnapshotCompleted",
"params": [
{ "target": "SnapshotIds[]", "source": "identifier", "name": "Id" }
],
"path": "Snapshots[]"
}
},
"has": {
"Volume": {
"resource": {
"type": "Volume",
"identifiers": [
{ "target": "Id", "source": "data", "path": "VolumeId" }
]
}
}
}
},
"Subnet": {
"identifiers": [
{
"name": "Id",
"memberName": "SubnetId"
}
],
"shape": "Subnet",
"load": {
"request": {
"operation": "DescribeSubnets",
"params": [
{ "target": "SubnetIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "Subnets[0]"
},
"actions": {
"CreateInstances": {
"request": {
"operation": "RunInstances",
"params": [
{ "target": "SubnetId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Instances[].InstanceId" }
],
"path": "Instances[]"
}
},
"CreateNetworkInterface": {
"request": {
"operation": "CreateNetworkInterface",
"params": [
{ "target": "SubnetId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkInterface.NetworkInterfaceId" }
],
"path": "NetworkInterface"
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteSubnet",
"params": [
{ "target": "SubnetId", "source": "identifier", "name": "Id" }
]
}
}
},
"has": {
"Vpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "VpcId" }
]
}
}
},
"hasMany": {
"Instances": {
"request": {
"operation": "DescribeInstances",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "subnet-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Reservations[].Instances[].InstanceId" }
],
"path": "Reservations[].Instances[]"
}
},
"NetworkInterfaces": {
"request": {
"operation": "DescribeNetworkInterfaces",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "subnet-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkInterfaces[].NetworkInterfaceId" }
],
"path": "NetworkInterfaces[]"
}
}
}
},
"Tag": {
"identifiers": [
{
"name": "ResourceId",
"memberName": "ResourceId"
},
{
"name": "Key",
"memberName": "Key"
},
{
"name": "Value",
"memberName": "Value"
}
],
"shape": "TagDescription",
"load": {
"request": {
"operation": "DescribeTags",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "key" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Key" },
{ "target": "Filters[1].Name", "source": "string", "value": "value" },
{ "target": "Filters[1].Values[0]", "source": "identifier", "name": "Value" }
]
},
"path": "Tags[0]"
},
"actions": {
"Delete": {
"request": {
"operation": "DeleteTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "ResourceId" },
{ "target": "Tags[0].Key", "source": "identifier", "name": "Key" },
{ "target": "Tags[0].Value", "source": "identifier", "name": "Value" }
]
}
}
},
"batchActions": {
"Delete": {
"request": {
"operation": "DeleteTags",
"params": [
{ "target": "Resources[]", "source": "identifier", "name": "ResourceId" },
{ "target": "Tags[*].Key", "source": "identifier", "name": "Key" },
{ "target": "Tags[*].Value", "source": "identifier", "name": "Value" }
]
}
}
}
},
"Volume": {
"identifiers": [
{
"name": "Id",
"memberName": "VolumeId"
}
],
"shape": "Volume",
"load": {
"request": {
"operation": "DescribeVolumes",
"params": [
{ "target": "VolumeIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "Volumes[0]"
},
"actions": {
"AttachToInstance": {
"request": {
"operation": "AttachVolume",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
}
},
"CreateSnapshot": {
"request": {
"operation": "CreateSnapshot",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Snapshot",
"identifiers": [
{ "target": "Id", "source": "response", "path": "SnapshotId" }
],
"path": "@"
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteVolume",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
}
},
"DescribeAttribute": {
"request": {
"operation": "DescribeVolumeAttribute",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
}
},
"DescribeStatus": {
"request": {
"operation": "DescribeVolumeStatus",
"params": [
{ "target": "VolumeIds[0]", "source": "identifier", "name": "Id" }
]
}
},
"DetachFromInstance": {
"request": {
"operation": "DetachVolume",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
}
},
"EnableIo": {
"request": {
"operation": "EnableVolumeIO",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
}
},
"ModifyAttribute": {
"request": {
"operation": "ModifyVolumeAttribute",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
}
}
},
"hasMany": {
"Snapshots": {
"request": {
"operation": "DescribeSnapshots",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "volume-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Snapshot",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Snapshots[].SnapshotId" }
],
"path": "Snapshots[]"
}
}
}
},
"Vpc": {
"identifiers": [
{
"name": "Id",
"memberName": "VpcId"
}
],
"shape": "Vpc",
"load": {
"request": {
"operation": "DescribeVpcs",
"params": [
{ "target": "VpcIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "Vpcs[0]"
},
"actions": {
"AssociateDhcpOptions": {
"request": {
"operation": "AssociateDhcpOptions",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"AttachClassicLinkInstance": {
"request": {
"operation": "AttachClassicLinkVpc",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"AttachInternetGateway": {
"request": {
"operation": "AttachInternetGateway",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"CreateNetworkAcl": {
"request": {
"operation": "CreateNetworkAcl",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "NetworkAcl",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkAcl.NetworkAclId" }
],
"path": "NetworkAcl"
}
},
"CreateRouteTable": {
"request": {
"operation": "CreateRouteTable",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "response", "path": "RouteTable.RouteTableId" }
],
"path": "RouteTable"
}
},
"CreateSecurityGroup": {
"request": {
"operation": "CreateSecurityGroup",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "SecurityGroup",
"identifiers": [
{ "target": "Id", "source": "response", "path": "GroupId" }
]
}
},
"CreateSubnet": {
"request": {
"operation": "CreateSubnet",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Subnet.SubnetId" }
],
"path": "Subnet"
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteVpc",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"DescribeAttribute": {
"request": {
"operation": "DescribeVpcAttribute",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"DetachClassicLinkInstance": {
"request": {
"operation": "DetachClassicLinkVpc",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"DetachInternetGateway": {
"request": {
"operation": "DetachInternetGateway",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"DisableClassicLink": {
"request": {
"operation": "DisableVpcClassicLink",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"EnableClassicLink": {
"request": {
"operation": "EnableVpcClassicLink",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"ModifyAttribute": {
"request": {
"operation": "ModifyVpcAttribute",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"RequestVpcPeeringConnection": {
"request": {
"operation": "CreateVpcPeeringConnection",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "VpcPeeringConnection",
"identifiers": [
{ "target": "Id", "source": "response", "path": "VpcPeeringConnection.VpcPeeringConnectionId" }
],
"path": "VpcPeeringConnection"
}
}
},
"has": {
"DhcpOptions": {
"resource": {
"type": "DhcpOptions",
"identifiers": [
{ "target": "Id", "source": "data", "path": "DhcpOptionsId" }
]
}
}
},
"hasMany": {
"AcceptedVpcPeeringConnections": {
"request": {
"operation": "DescribeVpcPeeringConnections",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "accepter-vpc-info.vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "VpcPeeringConnection",
"identifiers": [
{ "target": "Id", "source": "response", "path": "VpcPeeringConnections[].VpcPeeringConnectionId" }
],
"path": "VpcPeeringConnections[]"
}
},
"Instances": {
"request": {
"operation": "DescribeInstances",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Reservations[].Instances[].InstanceId" }
],
"path": "Reservations[].Instances[]"
}
},
"InternetGateways": {
"request": {
"operation": "DescribeInternetGateways",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "attachment.vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "InternetGateway",
"identifiers": [
{ "target": "Id", "source": "response", "path": "InternetGateways[].InternetGatewayId" }
],
"path": "InternetGateways[]"
}
},
"NetworkAcls": {
"request": {
"operation": "DescribeNetworkAcls",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "NetworkAcl",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkAcls[].NetworkAclId" }
],
"path": "NetworkAcls[]"
}
},
"NetworkInterfaces": {
"request": {
"operation": "DescribeNetworkInterfaces",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkInterfaces[].NetworkInterfaceId" }
],
"path": "NetworkInterfaces[]"
}
},
"RequestedVpcPeeringConnections": {
"request": {
"operation": "DescribeVpcPeeringConnections",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "requester-vpc-info.vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "VpcPeeringConnection",
"identifiers": [
{ "target": "Id", "source": "response", "path": "VpcPeeringConnections[].VpcPeeringConnectionId" }
],
"path": "VpcPeeringConnections[]"
}
},
"RouteTables": {
"request": {
"operation": "DescribeRouteTables",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "response", "path": "RouteTables[].RouteTableId" }
],
"path": "RouteTables[]"
}
},
"SecurityGroups": {
"request": {
"operation": "DescribeSecurityGroups",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "SecurityGroup",
"identifiers": [
{ "target": "Id", "source": "response", "path": "SecurityGroups[].GroupId" }
],
"path": "SecurityGroups[]"
}
},
"Subnets": {
"request": {
"operation": "DescribeSubnets",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Subnets[].SubnetId" }
],
"path": "Subnets[]"
}
}
}
},
"VpcPeeringConnection": {
"identifiers": [
{
"name": "Id",
"memberName": "VpcPeeringConnectionId"
}
],
"shape": "VpcPeeringConnection",
"load": {
"request": {
"operation": "DescribeVpcPeeringConnections",
"params": [
{ "target": "VpcPeeringConnectionIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "VpcPeeringConnections[0]"
},
"actions": {
"Accept": {
"request": {
"operation": "AcceptVpcPeeringConnection",
"params": [
{ "target": "VpcPeeringConnectionId", "source": "identifier", "name": "Id" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteVpcPeeringConnection",
"params": [
{ "target": "VpcPeeringConnectionId", "source": "identifier", "name": "Id" }
]
}
},
"Reject": {
"request": {
"operation": "RejectVpcPeeringConnection",
"params": [
{ "target": "VpcPeeringConnectionId", "source": "identifier", "name": "Id" }
]
}
}
},
"waiters": {
"Exists": {
"waiterName": "VpcPeeringConnectionExists",
"params": [
{ "target": "VpcPeeringConnectionIds[]", "source": "identifier", "name": "Id" }
],
"path": "VpcPeeringConnections[0]"
}
},
"has": {
"AccepterVpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "AccepterVpcInfo.VpcId" }
]
}
},
"RequesterVpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "RequesterVpcInfo.VpcId" }
]
}
}
}
},
"VpcAddress": {
"identifiers": [
{
"name": "AllocationId"
}
],
"shape": "Address",
"load": {
"request": {
"operation": "DescribeAddresses",
"params": [
{ "target": "AllocationIds[0]", "source": "identifier", "name": "AllocationId" }
]
},
"path": "Addresses[0]"
},
"actions": {
"Associate": {
"request": {
"operation": "AssociateAddress",
"params": [
{ "target": "AllocationId", "source": "identifier", "name": "AllocationId" }
]
}
},
"Release": {
"request": {
"operation": "ReleaseAddress",
"params": [
{ "target": "AllocationId", "source": "data", "path": "AllocationId" }
]
}
}
},
"has": {
"Association": {
"resource": {
"type": "NetworkInterfaceAssociation",
"identifiers": [
{ "target": "Id", "source": "data", "path": "AssociationId" }
]
}
}
}
}
}
}

View file

@ -0,0 +1,2567 @@
{
"service": {
"actions": {
"CreateDhcpOptions": {
"request": { "operation": "CreateDhcpOptions" },
"resource": {
"type": "DhcpOptions",
"identifiers": [
{ "target": "Id", "source": "response", "path": "DhcpOptions.DhcpOptionsId" }
],
"path": "DhcpOptions"
}
},
"CreateInstances": {
"request": { "operation": "RunInstances" },
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Instances[].InstanceId" }
],
"path": "Instances[]"
}
},
"CreateInternetGateway": {
"request": { "operation": "CreateInternetGateway" },
"resource": {
"type": "InternetGateway",
"identifiers": [
{ "target": "Id", "source": "response", "path": "InternetGateway.InternetGatewayId" }
],
"path": "InternetGateway"
}
},
"CreateKeyPair": {
"request": { "operation": "CreateKeyPair" },
"resource": {
"type": "KeyPair",
"identifiers": [
{ "target": "Name", "source": "response", "path": "KeyName" }
],
"path": "@"
}
},
"CreateNetworkAcl": {
"request": { "operation": "CreateNetworkAcl" },
"resource": {
"type": "NetworkAcl",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkAcl.NetworkAclId" }
],
"path": "NetworkAcl"
}
},
"CreateNetworkInterface": {
"request": { "operation": "CreateNetworkInterface" },
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkInterface.NetworkInterfaceId" }
],
"path": "NetworkInterface"
}
},
"CreatePlacementGroup": {
"request": { "operation": "CreatePlacementGroup" },
"resource": {
"type": "PlacementGroup",
"identifiers": [
{ "target": "Name", "source": "requestParameter", "path": "GroupName" }
]
}
},
"CreateRouteTable": {
"request": { "operation": "CreateRouteTable" },
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "response", "path": "RouteTable.RouteTableId" }
],
"path": "RouteTable"
}
},
"CreateSecurityGroup": {
"request": { "operation": "CreateSecurityGroup" },
"resource": {
"type": "SecurityGroup",
"identifiers": [
{ "target": "Id", "source": "response", "path": "GroupId" }
]
}
},
"CreateSnapshot": {
"request": { "operation": "CreateSnapshot" },
"resource": {
"type": "Snapshot",
"identifiers": [
{ "target": "Id", "source": "response", "path": "SnapshotId" }
],
"path": "@"
}
},
"CreateSubnet": {
"request": { "operation": "CreateSubnet" },
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Subnet.SubnetId" }
],
"path": "Subnet"
}
},
"CreateTags": {
"request": { "operation": "CreateTags" }
},
"CreateVolume": {
"request": { "operation": "CreateVolume" },
"resource": {
"type": "Volume",
"identifiers": [
{ "target": "Id", "source": "response", "path": "VolumeId" }
],
"path": "@"
}
},
"CreateVpc": {
"request": { "operation": "CreateVpc" },
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Vpc.VpcId" }
],
"path": "Vpc"
}
},
"CreateVpcPeeringConnection": {
"request": { "operation": "CreateVpcPeeringConnection" },
"resource": {
"type": "VpcPeeringConnection",
"identifiers": [
{ "target": "Id", "source": "response", "path": "VpcPeeringConnection.VpcPeeringConnectionId" }
],
"path": "VpcPeeringConnection"
}
},
"DisassociateRouteTable": {
"request": { "operation": "DisassociateRouteTable" }
},
"ImportKeyPair": {
"request": { "operation": "ImportKeyPair" },
"resource": {
"type": "KeyPairInfo",
"identifiers": [
{ "target": "Name", "source": "response", "path": "KeyName" }
]
}
},
"RegisterImage": {
"request": { "operation": "RegisterImage" },
"resource": {
"type": "Image",
"identifiers": [
{ "target": "Id", "source": "response", "path": "ImageId" }
]
}
}
},
"has": {
"DhcpOptions": {
"resource": {
"type": "DhcpOptions",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"Image": {
"resource": {
"type": "Image",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"Instance": {
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"InternetGateway": {
"resource": {
"type": "InternetGateway",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"KeyPair": {
"resource": {
"type": "KeyPairInfo",
"identifiers": [
{ "target": "Name", "source": "input" }
]
}
},
"NetworkAcl": {
"resource": {
"type": "NetworkAcl",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"NetworkInterface": {
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"PlacementGroup": {
"resource": {
"type": "PlacementGroup",
"identifiers": [
{ "target": "Name", "source": "input" }
]
}
},
"RouteTable": {
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"RouteTableAssociation": {
"resource": {
"type": "RouteTableAssociation",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"SecurityGroup": {
"resource": {
"type": "SecurityGroup",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"Snapshot": {
"resource": {
"type": "Snapshot",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"Subnet": {
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"Volume": {
"resource": {
"type": "Volume",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"Vpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
},
"VpcPeeringConnection": {
"resource": {
"type": "VpcPeeringConnection",
"identifiers": [
{ "target": "Id", "source": "input" }
]
}
}
},
"hasMany": {
"ClassicAddresses": {
"request": {
"operation": "DescribeAddresses",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "domain" },
{ "target": "Filters[0].Values[0]", "source": "string", "value": "standard" }
]
},
"resource": {
"type": "ClassicAddress",
"identifiers": [
{ "target": "PublicIp", "source": "response", "path": "Addresses[].PublicIp" }
],
"path": "Addresses[]"
}
},
"DhcpOptionsSets": {
"request": { "operation": "DescribeDhcpOptions" },
"resource": {
"type": "DhcpOptions",
"identifiers": [
{ "target": "Id", "source": "response", "path": "DhcpOptions[].DhcpOptionsId" }
],
"path": "DhcpOptions[]"
}
},
"Images": {
"request": { "operation": "DescribeImages" },
"resource": {
"type": "Image",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Images[].ImageId" }
],
"path": "Images[]"
}
},
"Instances": {
"request": { "operation": "DescribeInstances" },
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Reservations[].Instances[].InstanceId" }
],
"path": "Reservations[].Instances[]"
}
},
"InternetGateways": {
"request": { "operation": "DescribeInternetGateways" },
"resource": {
"type": "InternetGateway",
"identifiers": [
{ "target": "Id", "source": "response", "path": "InternetGateways[].InternetGatewayId" }
],
"path": "InternetGateways[]"
}
},
"KeyPairs": {
"request": { "operation": "DescribeKeyPairs" },
"resource": {
"type": "KeyPairInfo",
"identifiers": [
{ "target": "Name", "source": "response", "path": "KeyPairs[].KeyName" }
],
"path": "KeyPairs[]"
}
},
"NetworkAcls": {
"request": { "operation": "DescribeNetworkAcls" },
"resource": {
"type": "NetworkAcl",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkAcls[].NetworkAclId" }
],
"path": "NetworkAcls[]"
}
},
"NetworkInterfaces": {
"request": { "operation": "DescribeNetworkInterfaces" },
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkInterfaces[].NetworkInterfaceId" }
],
"path": "NetworkInterfaces[]"
}
},
"PlacementGroups": {
"request": { "operation": "DescribePlacementGroups" },
"resource": {
"type": "PlacementGroup",
"identifiers": [
{ "target": "Name", "source": "response", "path": "PlacementGroups[].GroupName" }
],
"path": "PlacementGroups[]"
}
},
"RouteTables": {
"request": { "operation": "DescribeRouteTables" },
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "response", "path": "RouteTables[].RouteTableId" }
],
"path": "RouteTables[]"
}
},
"SecurityGroups": {
"request": { "operation": "DescribeSecurityGroups" },
"resource": {
"type": "SecurityGroup",
"identifiers": [
{ "target": "Id", "source": "response", "path": "SecurityGroups[].GroupId" }
],
"path": "SecurityGroups[]"
}
},
"Snapshots": {
"request": { "operation": "DescribeSnapshots" },
"resource": {
"type": "Snapshot",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Snapshots[].SnapshotId" }
],
"path": "Snapshots[]"
}
},
"Subnets": {
"request": { "operation": "DescribeSubnets" },
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Subnets[].SubnetId" }
],
"path": "Subnets[]"
}
},
"Volumes": {
"request": { "operation": "DescribeVolumes" },
"resource": {
"type": "Volume",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Volumes[].VolumeId" }
],
"path": "Volumes[]"
}
},
"VpcAddresses": {
"request": {
"operation": "DescribeAddresses",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "domain" },
{ "target": "Filters[0].Values[0]", "source": "string", "value": "vpc" }
]
},
"resource": {
"type": "VpcAddress",
"identifiers": [
{ "target": "AllocationId", "source": "response", "path": "Addresses[].AllocationId" }
],
"path": "Addresses[]"
}
},
"VpcPeeringConnections": {
"request": { "operation": "DescribeVpcPeeringConnections" },
"resource": {
"type": "VpcPeeringConnection",
"identifiers": [
{ "target": "Id", "source": "response", "path": "VpcPeeringConnections[].VpcPeeringConnectionId" }
],
"path": "VpcPeeringConnections[]"
}
},
"Vpcs": {
"request": { "operation": "DescribeVpcs" },
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Vpcs[].VpcId" }
],
"path": "Vpcs[]"
}
}
}
},
"resources": {
"ClassicAddress": {
"identifiers": [
{
"name": "PublicIp"
}
],
"shape": "Address",
"load": {
"request": {
"operation": "DescribeAddresses",
"params": [
{ "target": "PublicIps[]", "source": "identifier", "name": "PublicIp" }
]
},
"path": "Addresses[0]"
},
"actions": {
"Associate": {
"request": {
"operation": "AssociateAddress",
"params": [
{ "target": "PublicIp", "source": "identifier", "name": "PublicIp" }
]
}
},
"Disassociate": {
"request": {
"operation": "DisassociateAddress",
"params": [
{ "target": "PublicIp", "source": "data", "path": "PublicIp" }
]
}
},
"Release": {
"request": {
"operation": "ReleaseAddress",
"params": [
{ "target": "PublicIp", "source": "data", "path": "PublicIp" }
]
}
}
}
},
"DhcpOptions": {
"identifiers": [
{
"name": "Id",
"memberName": "DhcpOptionsId"
}
],
"shape": "DhcpOptions",
"load": {
"request": {
"operation": "DescribeDhcpOptions",
"params": [
{ "target": "DhcpOptionsIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "DhcpOptions[0]"
},
"actions": {
"AssociateWithVpc": {
"request": {
"operation": "AssociateDhcpOptions",
"params": [
{ "target": "DhcpOptionsId", "source": "identifier", "name": "Id" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteDhcpOptions",
"params": [
{ "target": "DhcpOptionsId", "source": "identifier", "name": "Id" }
]
}
}
}
},
"Image": {
"identifiers": [
{
"name": "Id",
"memberName": "ImageId"
}
],
"shape": "Image",
"load": {
"request": {
"operation": "DescribeImages",
"params": [
{ "target": "ImageIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "Images[0]"
},
"actions": {
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Deregister": {
"request": {
"operation": "DeregisterImage",
"params": [
{ "target": "ImageId", "source": "identifier", "name": "Id" }
]
}
},
"DescribeAttribute": {
"request": {
"operation": "DescribeImageAttribute",
"params": [
{ "target": "ImageId", "source": "identifier", "name": "Id" }
]
}
},
"ModifyAttribute": {
"request": {
"operation": "ModifyImageAttribute",
"params": [
{ "target": "ImageId", "source": "identifier", "name": "Id" }
]
}
},
"ResetAttribute": {
"request": {
"operation": "ResetImageAttribute",
"params": [
{ "target": "ImageId", "source": "identifier", "name": "Id" }
]
}
}
}
},
"Instance": {
"identifiers": [
{
"name": "Id",
"memberName": "InstanceId"
}
],
"shape": "Instance",
"load": {
"request": {
"operation": "DescribeInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "Reservations[0].Instances[0]"
},
"actions": {
"AttachClassicLinkVpc": {
"request": {
"operation": "AttachClassicLinkVpc",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"AttachVolume": {
"request": {
"operation": "AttachVolume",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"ConsoleOutput": {
"request": {
"operation": "GetConsoleOutput",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"CreateImage": {
"request": {
"operation": "CreateImage",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Image",
"identifiers": [
{ "target": "Id", "source": "response", "path": "ImageId" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"DescribeAttribute": {
"request": {
"operation": "DescribeInstanceAttribute",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"DetachClassicLinkVpc": {
"request": {
"operation": "DetachClassicLinkVpc",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"DetachVolume": {
"request": {
"operation": "DetachVolume",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"ModifyAttribute": {
"request": {
"operation": "ModifyInstanceAttribute",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"Monitor": {
"request": {
"operation": "MonitorInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
}
},
"PasswordData": {
"request": {
"operation": "GetPasswordData",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"Reboot": {
"request": {
"operation": "RebootInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
}
},
"ReportStatus": {
"request": {
"operation": "ReportInstanceStatus",
"params": [
{ "target": "Instances[0]", "source": "identifier", "name": "Id" }
]
}
},
"ResetAttribute": {
"request": {
"operation": "ResetInstanceAttribute",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" }
]
}
},
"ResetKernel": {
"request": {
"operation": "ResetInstanceAttribute",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" },
{ "target": "Attribute", "source": "string", "value": "kernel" }
]
}
},
"ResetRamdisk": {
"request": {
"operation": "ResetInstanceAttribute",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" },
{ "target": "Attribute", "source": "string", "value": "ramdisk" }
]
}
},
"ResetSourceDestCheck": {
"request": {
"operation": "ResetInstanceAttribute",
"params": [
{ "target": "InstanceId", "source": "identifier", "name": "Id" },
{ "target": "Attribute", "source": "string", "value": "sourceDestCheck" }
]
}
},
"Start": {
"request": {
"operation": "StartInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
}
},
"Stop": {
"request": {
"operation": "StopInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
}
},
"Terminate": {
"request": {
"operation": "TerminateInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
}
},
"Unmonitor": {
"request": {
"operation": "UnmonitorInstances",
"params": [
{ "target": "InstanceIds[0]", "source": "identifier", "name": "Id" }
]
}
}
},
"batchActions": {
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[]", "source": "identifier", "name": "Id" }
]
}
},
"Monitor": {
"request": {
"operation": "MonitorInstances",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
]
}
},
"Reboot": {
"request": {
"operation": "RebootInstances",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
]
}
},
"Start": {
"request": {
"operation": "StartInstances",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
]
}
},
"Stop": {
"request": {
"operation": "StopInstances",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
]
}
},
"Terminate": {
"request": {
"operation": "TerminateInstances",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
]
}
},
"Unmonitor": {
"request": {
"operation": "UnmonitorInstances",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
]
}
}
},
"waiters": {
"Exists": {
"waiterName": "InstanceExists",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
],
"path": "Reservations[0].Instances[0]"
},
"Running": {
"waiterName": "InstanceRunning",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
],
"path": "Reservations[0].Instances[0]"
},
"Stopped": {
"waiterName": "InstanceStopped",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
],
"path": "Reservations[0].Instances[0]"
},
"Terminated": {
"waiterName": "InstanceTerminated",
"params": [
{ "target": "InstanceIds[]", "source": "identifier", "name": "Id" }
],
"path": "Reservations[0].Instances[0]"
}
},
"has": {
"ClassicAddress": {
"resource": {
"type": "ClassicAddress",
"identifiers": [
{ "target": "PublicIp", "source": "data", "path": "PublicIpAddress" }
]
}
},
"Image": {
"resource": {
"type": "Image",
"identifiers": [
{ "target": "Id", "source": "data", "path": "ImageId" }
]
}
},
"KeyPair": {
"resource": {
"type": "KeyPairInfo",
"identifiers": [
{ "target": "Name", "source": "data", "path": "KeyName" }
]
}
},
"NetworkInterfaces": {
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "data", "path": "NetworkInterfaces[].NetworkInterfaceId" }
],
"path": "NetworkInterfaces[]"
}
},
"PlacementGroup": {
"resource": {
"type": "PlacementGroup",
"identifiers": [
{ "target": "Name", "source": "data", "path": "Placement.GroupName" }
]
}
},
"Subnet": {
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "data", "path": "SubnetId" }
]
}
},
"Vpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "VpcId" }
]
}
}
},
"hasMany": {
"Volumes": {
"request": {
"operation": "DescribeVolumes",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "attachment.instance-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Volume",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Volumes[].VolumeId" }
],
"path": "Volumes[]"
}
},
"VpcAddresses": {
"request": {
"operation": "DescribeAddresses",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "instance-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "VpcAddress",
"identifiers": [
{ "target": "AllocationId", "source": "response", "path": "Addresses[].AllocationId" }
],
"path": "Addresses[]"
}
}
}
},
"InternetGateway": {
"identifiers": [
{
"name": "Id",
"memberName": "InternetGatewayId"
}
],
"shape": "InternetGateway",
"load": {
"request": {
"operation": "DescribeInternetGateways",
"params": [
{ "target": "InternetGatewayIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "InternetGateways[0]"
},
"actions": {
"AttachToVpc": {
"request": {
"operation": "AttachInternetGateway",
"params": [
{ "target": "InternetGatewayId", "source": "identifier", "name": "Id" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteInternetGateway",
"params": [
{ "target": "InternetGatewayId", "source": "identifier", "name": "Id" }
]
}
},
"DetachFromVpc": {
"request": {
"operation": "DetachInternetGateway",
"params": [
{ "target": "InternetGatewayId", "source": "identifier", "name": "Id" }
]
}
}
}
},
"KeyPair": {
"identifiers": [
{
"name": "Name",
"memberName": "KeyName"
}
],
"shape": "KeyPair",
"actions": {
"Delete": {
"request": {
"operation": "DeleteKeyPair",
"params": [
{ "target": "KeyName", "source": "identifier", "name": "Name" }
]
}
}
}
},
"KeyPairInfo": {
"identifiers": [
{
"name": "Name",
"memberName": "KeyName"
}
],
"shape": "KeyPairInfo",
"load": {
"request": {
"operation": "DescribeKeyPairs",
"params": [
{ "target": "KeyNames[0]", "source": "identifier", "name": "Name" }
]
},
"path": "KeyPairs[0]"
},
"actions": {
"Delete": {
"request": {
"operation": "DeleteKeyPair",
"params": [
{ "target": "KeyName", "source": "identifier", "name": "Name" }
]
}
}
}
},
"NetworkAcl": {
"identifiers": [
{
"name": "Id",
"memberName": "NetworkAclId"
}
],
"shape": "NetworkAcl",
"load": {
"request": {
"operation": "DescribeNetworkAcls",
"params": [
{ "target": "NetworkAclIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "NetworkAcls[0]"
},
"actions": {
"CreateEntry": {
"request": {
"operation": "CreateNetworkAclEntry",
"params": [
{ "target": "NetworkAclId", "source": "identifier", "name": "Id" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteNetworkAcl",
"params": [
{ "target": "NetworkAclId", "source": "identifier", "name": "Id" }
]
}
},
"DeleteEntry": {
"request": {
"operation": "DeleteNetworkAclEntry",
"params": [
{ "target": "NetworkAclId", "source": "identifier", "name": "Id" }
]
}
},
"ReplaceAssociation": {
"request": {
"operation": "ReplaceNetworkAclAssociation",
"params": [
{ "target": "NetworkAclId", "source": "identifier", "name": "Id" }
]
}
},
"ReplaceEntry": {
"request": {
"operation": "ReplaceNetworkAclEntry",
"params": [
{ "target": "NetworkAclId", "source": "identifier", "name": "Id" }
]
}
}
},
"has": {
"Vpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "VpcId" }
]
}
}
}
},
"NetworkInterface": {
"identifiers": [
{
"name": "Id",
"memberName": "NetworkInterfaceId"
}
],
"shape": "NetworkInterface",
"load": {
"request": {
"operation": "DescribeNetworkInterfaces",
"params": [
{ "target": "NetworkInterfaceIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "NetworkInterfaces[0]"
},
"actions": {
"AssignPrivateIpAddresses": {
"request": {
"operation": "AssignPrivateIpAddresses",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
},
"Attach": {
"request": {
"operation": "AttachNetworkInterface",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteNetworkInterface",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
},
"DescribeAttribute": {
"request": {
"operation": "DescribeNetworkInterfaceAttribute",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
},
"Detach": {
"request": {
"operation": "DetachNetworkInterface",
"params": [
{ "target": "AttachmentId", "source": "data", "path": "Attachment.AttachmentId" }
]
}
},
"ModifyAttribute": {
"request": {
"operation": "ModifyNetworkInterfaceAttribute",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
},
"ResetAttribute": {
"request": {
"operation": "ResetNetworkInterfaceAttribute",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
},
"UnassignPrivateIpAddresses": {
"request": {
"operation": "UnassignPrivateIpAddresses",
"params": [
{ "target": "NetworkInterfaceId", "source": "identifier", "name": "Id" }
]
}
}
},
"has": {
"Association": {
"resource": {
"type": "NetworkInterfaceAssociation",
"identifiers": [
{ "target": "Id", "source": "data", "path": "Association.AssociationId" }
],
"path": "Association"
}
},
"Subnet": {
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "data", "path": "SubnetId" }
]
}
},
"Vpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "VpcId" }
]
}
}
}
},
"NetworkInterfaceAssociation": {
"identifiers": [
{
"name": "Id"
}
],
"shape": "InstanceNetworkInterfaceAssociation",
"load": {
"request": {
"operation": "DescribeNetworkInterfaces",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "association.association-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"path": "NetworkInterfaces[0].Association"
},
"actions": {
"Delete": {
"request": {
"operation": "DisassociateAddress",
"params": [
{ "target": "AssociationId", "source": "identifier", "name": "Id" }
]
}
}
},
"has": {
"Address": {
"resource": {
"type": "VpcAddress",
"identifiers": [
{ "target": "AllocationId", "source": "data", "path": "AllocationId" }
]
}
}
}
},
"PlacementGroup": {
"identifiers": [
{
"name": "Name",
"memberName": "GroupName"
}
],
"shape": "PlacementGroup",
"load": {
"request": {
"operation": "DescribePlacementGroups",
"params": [
{ "target": "GroupNames[0]", "source": "identifier", "name": "Name" }
]
},
"path": "PlacementGroups[0]"
},
"actions": {
"Delete": {
"request": {
"operation": "DeletePlacementGroup",
"params": [
{ "target": "GroupName", "source": "identifier", "name": "Name" }
]
}
}
},
"hasMany": {
"Instances": {
"request": {
"operation": "DescribeInstances",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "placement-group-name" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Name" }
]
},
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Reservations[].Instances[].InstanceId" }
],
"path": "Reservations[].Instances[]"
}
}
}
},
"Route": {
"identifiers": [
{ "name": "RouteTableId" },
{
"name": "DestinationCidrBlock",
"memberName": "DestinationCidrBlock"
}
],
"shape": "Route",
"actions": {
"Delete": {
"request": {
"operation": "DeleteRoute",
"params": [
{ "target": "RouteTableId", "source": "identifier", "name": "RouteTableId" },
{ "target": "DestinationCidrBlock", "source": "identifier", "name": "DestinationCidrBlock" }
]
}
},
"Replace": {
"request": {
"operation": "ReplaceRoute",
"params": [
{ "target": "RouteTableId", "source": "identifier", "name": "RouteTableId" },
{ "target": "DestinationCidrBlock", "source": "identifier", "name": "DestinationCidrBlock" }
]
}
}
},
"has": {
"RouteTable": {
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "identifier", "name": "RouteTableId" }
]
}
}
}
},
"RouteTable": {
"identifiers": [
{
"name": "Id",
"memberName": "RouteTableId"
}
],
"shape": "RouteTable",
"load": {
"request": {
"operation": "DescribeRouteTables",
"params": [
{ "target": "RouteTableIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "RouteTables[0]"
},
"actions": {
"AssociateWithSubnet": {
"request": {
"operation": "AssociateRouteTable",
"params": [
{ "target": "RouteTableId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "RouteTableAssociation",
"identifiers": [
{ "target": "Id", "source": "response", "path": "AssociationId" }
]
}
},
"CreateRoute": {
"request": {
"operation": "CreateRoute",
"params": [
{ "target": "RouteTableId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Route",
"identifiers": [
{ "target": "RouteTableId", "source": "identifier", "name": "Id" },
{ "target": "DestinationCidrBlock", "source": "requestParameter", "path": "DestinationCidrBlock" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteRouteTable",
"params": [
{ "target": "RouteTableId", "source": "identifier", "name": "Id" }
]
}
}
},
"has": {
"Routes": {
"resource": {
"type": "Route",
"identifiers": [
{ "target": "RouteTableId", "source": "identifier", "name": "Id" },
{ "target": "DestinationCidrBlock", "source": "data", "path": "Routes[].DestinationCidrBlock" }
],
"path": "Routes[]"
}
},
"Vpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "VpcId" }
]
}
}
},
"hasMany": {
"Associations": {
"request": {
"operation": "DescribeRouteTables",
"params": [
{ "target": "RouteTableIds[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "RouteTableAssociation",
"identifiers": [
{ "target": "Id", "source": "response", "path": "RouteTables[0].Associations[].RouteTableAssociationId" }
],
"path": "RouteTables[0].Associations[]"
}
}
}
},
"RouteTableAssociation": {
"identifiers": [
{
"name": "Id",
"memberName": "RouteTableAssociationId"
}
],
"shape": "RouteTableAssociation",
"actions": {
"Delete": {
"request": {
"operation": "DisassociateRouteTable",
"params": [
{ "target": "AssociationId", "source": "identifier", "name": "Id" }
]
}
},
"ReplaceSubnet": {
"request": {
"operation": "ReplaceRouteTableAssociation",
"params": [
{ "target": "AssociationId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "RouteTableAssociation",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NewAssociationId" }
]
}
}
},
"has": {
"RouteTable": {
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "data", "path": "RouteTableId" }
]
}
},
"Subnet": {
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "data", "path": "SubnetId" }
]
}
}
}
},
"SecurityGroup": {
"identifiers": [
{
"name": "Id",
"memberName": "GroupId"
}
],
"shape": "SecurityGroup",
"load": {
"request": {
"operation": "DescribeSecurityGroups",
"params": [
{ "target": "GroupIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "SecurityGroups[0]"
},
"actions": {
"AuthorizeEgress": {
"request": {
"operation": "AuthorizeSecurityGroupEgress",
"params": [
{ "target": "GroupId", "source": "identifier", "name": "Id" }
]
}
},
"AuthorizeIngress": {
"request": {
"operation": "AuthorizeSecurityGroupIngress",
"params": [
{ "target": "GroupId", "source": "identifier", "name": "Id" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteSecurityGroup",
"params": [
{ "target": "GroupId", "source": "identifier", "name": "Id" }
]
}
},
"RevokeEgress": {
"request": {
"operation": "RevokeSecurityGroupEgress",
"params": [
{ "target": "GroupId", "source": "identifier", "name": "Id" }
]
}
},
"RevokeIngress": {
"request": {
"operation": "RevokeSecurityGroupIngress",
"params": [
{ "target": "GroupId", "source": "identifier", "name": "Id" }
]
}
}
}
},
"Snapshot": {
"identifiers": [
{
"name": "Id",
"memberName": "SnapshotId"
}
],
"shape": "Snapshot",
"load": {
"request": {
"operation": "DescribeSnapshots",
"params": [
{ "target": "SnapshotIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "Snapshots[0]"
},
"actions": {
"Copy": {
"request": {
"operation": "CopySnapshot",
"params": [
{ "target": "SourceSnapshotId", "source": "identifier", "name": "Id" }
]
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteSnapshot",
"params": [
{ "target": "SnapshotId", "source": "identifier", "name": "Id" }
]
}
},
"DescribeAttribute": {
"request": {
"operation": "DescribeSnapshotAttribute",
"params": [
{ "target": "SnapshotId", "source": "identifier", "name": "Id" }
]
}
},
"ModifyAttribute": {
"request": {
"operation": "ModifySnapshotAttribute",
"params": [
{ "target": "SnapshotId", "source": "identifier", "name": "Id" }
]
}
},
"ResetAttribute": {
"request": {
"operation": "ResetSnapshotAttribute",
"params": [
{ "target": "SnapshotId", "source": "identifier", "name": "Id" }
]
}
}
},
"waiters": {
"Completed": {
"waiterName": "SnapshotCompleted",
"params": [
{ "target": "SnapshotIds[]", "source": "identifier", "name": "Id" }
],
"path": "Snapshots[]"
}
},
"has": {
"Volume": {
"resource": {
"type": "Volume",
"identifiers": [
{ "target": "Id", "source": "data", "path": "VolumeId" }
]
}
}
}
},
"Subnet": {
"identifiers": [
{
"name": "Id",
"memberName": "SubnetId"
}
],
"shape": "Subnet",
"load": {
"request": {
"operation": "DescribeSubnets",
"params": [
{ "target": "SubnetIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "Subnets[0]"
},
"actions": {
"CreateInstances": {
"request": {
"operation": "RunInstances",
"params": [
{ "target": "SubnetId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Instances[].InstanceId" }
],
"path": "Instances[]"
}
},
"CreateNetworkInterface": {
"request": {
"operation": "CreateNetworkInterface",
"params": [
{ "target": "SubnetId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkInterface.NetworkInterfaceId" }
],
"path": "NetworkInterface"
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteSubnet",
"params": [
{ "target": "SubnetId", "source": "identifier", "name": "Id" }
]
}
}
},
"has": {
"Vpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "VpcId" }
]
}
}
},
"hasMany": {
"Instances": {
"request": {
"operation": "DescribeInstances",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "subnet-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Reservations[].Instances[].InstanceId" }
],
"path": "Reservations[].Instances[]"
}
},
"NetworkInterfaces": {
"request": {
"operation": "DescribeNetworkInterfaces",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "subnet-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkInterfaces[].NetworkInterfaceId" }
],
"path": "NetworkInterfaces[]"
}
}
}
},
"Tag": {
"identifiers": [
{
"name": "ResourceId",
"memberName": "ResourceId"
},
{
"name": "Key",
"memberName": "Key"
},
{
"name": "Value",
"memberName": "Value"
}
],
"shape": "TagDescription",
"load": {
"request": {
"operation": "DescribeTags",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "key" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Key" },
{ "target": "Filters[1].Name", "source": "string", "value": "value" },
{ "target": "Filters[1].Values[0]", "source": "identifier", "name": "Value" }
]
},
"path": "Tags[0]"
},
"actions": {
"Delete": {
"request": {
"operation": "DeleteTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "ResourceId" },
{ "target": "Tags[0].Key", "source": "identifier", "name": "Key" },
{ "target": "Tags[0].Value", "source": "identifier", "name": "Value" }
]
}
}
},
"batchActions": {
"Delete": {
"request": {
"operation": "DeleteTags",
"params": [
{ "target": "Resources[]", "source": "identifier", "name": "ResourceId" },
{ "target": "Tags[*].Key", "source": "identifier", "name": "Key" },
{ "target": "Tags[*].Value", "source": "identifier", "name": "Value" }
]
}
}
}
},
"Volume": {
"identifiers": [
{
"name": "Id",
"memberName": "VolumeId"
}
],
"shape": "Volume",
"load": {
"request": {
"operation": "DescribeVolumes",
"params": [
{ "target": "VolumeIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "Volumes[0]"
},
"actions": {
"AttachToInstance": {
"request": {
"operation": "AttachVolume",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
}
},
"CreateSnapshot": {
"request": {
"operation": "CreateSnapshot",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Snapshot",
"identifiers": [
{ "target": "Id", "source": "response", "path": "SnapshotId" }
],
"path": "@"
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteVolume",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
}
},
"DescribeAttribute": {
"request": {
"operation": "DescribeVolumeAttribute",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
}
},
"DescribeStatus": {
"request": {
"operation": "DescribeVolumeStatus",
"params": [
{ "target": "VolumeIds[0]", "source": "identifier", "name": "Id" }
]
}
},
"DetachFromInstance": {
"request": {
"operation": "DetachVolume",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
}
},
"EnableIo": {
"request": {
"operation": "EnableVolumeIO",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
}
},
"ModifyAttribute": {
"request": {
"operation": "ModifyVolumeAttribute",
"params": [
{ "target": "VolumeId", "source": "identifier", "name": "Id" }
]
}
}
},
"hasMany": {
"Snapshots": {
"request": {
"operation": "DescribeSnapshots",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "volume-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Snapshot",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Snapshots[].SnapshotId" }
],
"path": "Snapshots[]"
}
}
}
},
"Vpc": {
"identifiers": [
{
"name": "Id",
"memberName": "VpcId"
}
],
"shape": "Vpc",
"load": {
"request": {
"operation": "DescribeVpcs",
"params": [
{ "target": "VpcIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "Vpcs[0]"
},
"actions": {
"AssociateDhcpOptions": {
"request": {
"operation": "AssociateDhcpOptions",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"AttachClassicLinkInstance": {
"request": {
"operation": "AttachClassicLinkVpc",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"AttachInternetGateway": {
"request": {
"operation": "AttachInternetGateway",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"CreateNetworkAcl": {
"request": {
"operation": "CreateNetworkAcl",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "NetworkAcl",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkAcl.NetworkAclId" }
],
"path": "NetworkAcl"
}
},
"CreateRouteTable": {
"request": {
"operation": "CreateRouteTable",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "response", "path": "RouteTable.RouteTableId" }
],
"path": "RouteTable"
}
},
"CreateSecurityGroup": {
"request": {
"operation": "CreateSecurityGroup",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "SecurityGroup",
"identifiers": [
{ "target": "Id", "source": "response", "path": "GroupId" }
]
}
},
"CreateSubnet": {
"request": {
"operation": "CreateSubnet",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Subnet.SubnetId" }
],
"path": "Subnet"
}
},
"CreateTags": {
"request": {
"operation": "CreateTags",
"params": [
{ "target": "Resources[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Tag",
"identifiers": [
{ "target": "ResourceId", "source": "identifier", "name": "Id" },
{ "target": "Key", "source": "requestParameter", "path": "Tags[].Key" },
{ "target": "Value", "source": "requestParameter", "path": "Tags[].Value" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteVpc",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"DescribeAttribute": {
"request": {
"operation": "DescribeVpcAttribute",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"DetachClassicLinkInstance": {
"request": {
"operation": "DetachClassicLinkVpc",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"DetachInternetGateway": {
"request": {
"operation": "DetachInternetGateway",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"DisableClassicLink": {
"request": {
"operation": "DisableVpcClassicLink",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"EnableClassicLink": {
"request": {
"operation": "EnableVpcClassicLink",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"ModifyAttribute": {
"request": {
"operation": "ModifyVpcAttribute",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
}
},
"RequestVpcPeeringConnection": {
"request": {
"operation": "CreateVpcPeeringConnection",
"params": [
{ "target": "VpcId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "VpcPeeringConnection",
"identifiers": [
{ "target": "Id", "source": "response", "path": "VpcPeeringConnection.VpcPeeringConnectionId" }
],
"path": "VpcPeeringConnection"
}
}
},
"has": {
"DhcpOptions": {
"resource": {
"type": "DhcpOptions",
"identifiers": [
{ "target": "Id", "source": "data", "path": "DhcpOptionsId" }
]
}
}
},
"hasMany": {
"AcceptedVpcPeeringConnections": {
"request": {
"operation": "DescribeVpcPeeringConnections",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "accepter-vpc-info.vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "VpcPeeringConnection",
"identifiers": [
{ "target": "Id", "source": "response", "path": "VpcPeeringConnections[].VpcPeeringConnectionId" }
],
"path": "VpcPeeringConnections[]"
}
},
"Instances": {
"request": {
"operation": "DescribeInstances",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Instance",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Reservations[].Instances[].InstanceId" }
],
"path": "Reservations[].Instances[]"
}
},
"InternetGateways": {
"request": {
"operation": "DescribeInternetGateways",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "attachment.vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "InternetGateway",
"identifiers": [
{ "target": "Id", "source": "response", "path": "InternetGateways[].InternetGatewayId" }
],
"path": "InternetGateways[]"
}
},
"NetworkAcls": {
"request": {
"operation": "DescribeNetworkAcls",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "NetworkAcl",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkAcls[].NetworkAclId" }
],
"path": "NetworkAcls[]"
}
},
"NetworkInterfaces": {
"request": {
"operation": "DescribeNetworkInterfaces",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "NetworkInterface",
"identifiers": [
{ "target": "Id", "source": "response", "path": "NetworkInterfaces[].NetworkInterfaceId" }
],
"path": "NetworkInterfaces[]"
}
},
"RequestedVpcPeeringConnections": {
"request": {
"operation": "DescribeVpcPeeringConnections",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "requester-vpc-info.vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "VpcPeeringConnection",
"identifiers": [
{ "target": "Id", "source": "response", "path": "VpcPeeringConnections[].VpcPeeringConnectionId" }
],
"path": "VpcPeeringConnections[]"
}
},
"RouteTables": {
"request": {
"operation": "DescribeRouteTables",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "response", "path": "RouteTables[].RouteTableId" }
],
"path": "RouteTables[]"
}
},
"SecurityGroups": {
"request": {
"operation": "DescribeSecurityGroups",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "SecurityGroup",
"identifiers": [
{ "target": "Id", "source": "response", "path": "SecurityGroups[].GroupId" }
],
"path": "SecurityGroups[]"
}
},
"Subnets": {
"request": {
"operation": "DescribeSubnets",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "vpc-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Subnet",
"identifiers": [
{ "target": "Id", "source": "response", "path": "Subnets[].SubnetId" }
],
"path": "Subnets[]"
}
}
}
},
"VpcPeeringConnection": {
"identifiers": [
{
"name": "Id",
"memberName": "VpcPeeringConnectionId"
}
],
"shape": "VpcPeeringConnection",
"load": {
"request": {
"operation": "DescribeVpcPeeringConnections",
"params": [
{ "target": "VpcPeeringConnectionIds[0]", "source": "identifier", "name": "Id" }
]
},
"path": "VpcPeeringConnections[0]"
},
"actions": {
"Accept": {
"request": {
"operation": "AcceptVpcPeeringConnection",
"params": [
{ "target": "VpcPeeringConnectionId", "source": "identifier", "name": "Id" }
]
}
},
"Delete": {
"request": {
"operation": "DeleteVpcPeeringConnection",
"params": [
{ "target": "VpcPeeringConnectionId", "source": "identifier", "name": "Id" }
]
}
},
"Reject": {
"request": {
"operation": "RejectVpcPeeringConnection",
"params": [
{ "target": "VpcPeeringConnectionId", "source": "identifier", "name": "Id" }
]
}
}
},
"waiters": {
"Exists": {
"waiterName": "VpcPeeringConnectionExists",
"params": [
{ "target": "VpcPeeringConnectionIds[]", "source": "identifier", "name": "Id" }
],
"path": "VpcPeeringConnections[0]"
}
},
"has": {
"AccepterVpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "AccepterVpcInfo.VpcId" }
]
}
},
"RequesterVpc": {
"resource": {
"type": "Vpc",
"identifiers": [
{ "target": "Id", "source": "data", "path": "RequesterVpcInfo.VpcId" }
]
}
}
}
},
"VpcAddress": {
"identifiers": [
{
"name": "AllocationId"
}
],
"shape": "Address",
"load": {
"request": {
"operation": "DescribeAddresses",
"params": [
{ "target": "AllocationIds[0]", "source": "identifier", "name": "AllocationId" }
]
},
"path": "Addresses[0]"
},
"actions": {
"Associate": {
"request": {
"operation": "AssociateAddress",
"params": [
{ "target": "AllocationId", "source": "identifier", "name": "AllocationId" }
]
}
},
"Release": {
"request": {
"operation": "ReleaseAddress",
"params": [
{ "target": "AllocationId", "source": "data", "path": "AllocationId" }
]
}
}
},
"has": {
"Association": {
"resource": {
"type": "NetworkInterfaceAssociation",
"identifiers": [
{ "target": "Id", "source": "data", "path": "AssociationId" }
]
}
}
}
}
}
}

View file

@ -222,7 +222,7 @@
"resource": {
"type": "Queue",
"identifiers": [
{ "target": "Url", "source": "response", "path": "QueueUrls[]" }
{ "target": "Url", "source": "response", "path": "queueUrls[]" }
]
}
}

View file

@ -128,7 +128,8 @@ def document_load_reload_action(section, action_name, resource_name,
"""
description = (
'Calls :py:meth:`%s.Client.%s` to update the attributes of the'
' %s resource' % (
' %s resource. Note that the load and reload methods are '
'the same method and can be used interchangeably.' % (
get_service_module_name(service_model),
xform_name(load_model.request.operation),
resource_name)

View file

@ -10,7 +10,6 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.docs.utils import py_type_name
from botocore.docs.params import ResponseParamsDocumenter
from boto3.docs.utils import get_identifier_description

View file

@ -11,7 +11,6 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from collections import namedtuple
import functools
import re
from boto3.exceptions import DynamoDBOperationNotSupportedError
@ -73,21 +72,21 @@ class AttributeBase(object):
raise DynamoDBOperationNotSupportedError('NOT', self)
def eq(self, value):
"""Creates a condtion where the attribute is equal to the value.
"""Creates a condition where the attribute is equal to the value.
:param value: The value that the attribute is equal to.
"""
return Equals(self, value)
def lt(self, value):
"""Creates a condtion where the attribute is less than the value.
"""Creates a condition where the attribute is less than the value.
:param value: The value that the attribute is less than.
"""
return LessThan(self, value)
def lte(self, value):
"""Creates a condtion where the attribute is less than or equal to the
"""Creates a condition where the attribute is less than or equal to the
value.
:param value: The value that the attribute is less than or equal to.
@ -95,14 +94,14 @@ class AttributeBase(object):
return LessThanEquals(self, value)
def gt(self, value):
"""Creates a condtion where the attribute is greater than the value.
"""Creates a condition where the attribute is greater than the value.
:param value: The value that the attribute is greater than.
"""
return GreaterThan(self, value)
def gte(self, value):
"""Creates a condtion where the attribute is greater than or equal to
"""Creates a condition where the attribute is greater than or equal to
the value.
:param value: The value that the attribute is greater than or equal to.
@ -110,15 +109,15 @@ class AttributeBase(object):
return GreaterThanEquals(self, value)
def begins_with(self, value):
"""Creates a condtion where the attribute begins with the value.
"""Creates a condition where the attribute begins with the value.
:param value: The value that the attribute begins with.
"""
return BeginsWith(self, value)
def between(self, low_value, high_value):
"""Creates a condtion where the attribute is between the low value and
the high value.
"""Creates a condition where the attribute is greater than or equal
to the low value and less than or equal to the high value.
:param low_value: The value that the attribute is greater than.
:param high_value: The value that the attribute is less than.
@ -229,14 +228,14 @@ class Key(AttributeBase):
class Attr(AttributeBase):
"""Represents an DynamoDB item's attribute."""
def ne(self, value):
"""Creates a condtion where the attribute is not equal to the value
"""Creates a condition where the attribute is not equal to the value
:param value: The value that the attribute is not equal to.
"""
return NotEquals(self, value)
def is_in(self, value):
"""Creates a condtion where the attribute is in the value,
"""Creates a condition where the attribute is in the value,
:type value: list
:param value: The value that the attribute is in.
@ -244,11 +243,11 @@ class Attr(AttributeBase):
return In(self, value)
def exists(self):
"""Creates a condtion where the attribute exists."""
"""Creates a condition where the attribute exists."""
return AttributeExists(self)
def not_exists(self):
"""Creates a condtion where the attribute does not exist."""
"""Creates a condition where the attribute does not exist."""
return AttributeNotExists(self)
def contains(self, value):

View file

@ -29,7 +29,7 @@ class TableResource(object):
def __init__(self, *args, **kwargs):
super(TableResource, self).__init__(*args, **kwargs)
def batch_writer(self):
def batch_writer(self, overwrite_by_pkeys=None):
"""Create a batch writer object.
This method creates a context manager for writing
@ -39,7 +39,9 @@ class TableResource(object):
in batches. In addition, the batch writer will also automatically
handle any unprocessed items and resend them as needed. All you need
to do is call ``put_item`` for any items you want to add, and
``delete_item`` for any items you want to delete.
``delete_item`` for any items you want to delete. In addition, you can
specify ``auto_dedup`` if the batch might contain duplicated requests
and you want this writer to handle de-dup for you.
Example usage::
@ -50,13 +52,20 @@ class TableResource(object):
# You can also delete_items in a batch.
batch.delete_item(Key={'HashKey': 'SomeHashKey'})
:type overwrite_by_pkeys: list(string)
:param overwrite_by_pkeys: De-duplicate request items in buffer
if match new request item on specified primary keys. i.e
``["partition_key1", "sort_key2", "sort_key3"]``
"""
return BatchWriter(self.name, self.meta.client)
return BatchWriter(self.name, self.meta.client,
overwrite_by_pkeys=overwrite_by_pkeys)
class BatchWriter(object):
"""Automatically handle batch writes to DynamoDB for a single table."""
def __init__(self, table_name, client, flush_amount=25):
def __init__(self, table_name, client, flush_amount=25,
overwrite_by_pkeys=None):
"""
:type table_name: str
@ -78,21 +87,47 @@ class BatchWriter(object):
a local buffer before sending a batch_write_item
request to DynamoDB.
:type overwrite_by_pkeys: list(string)
:param overwrite_by_pkeys: De-duplicate request items in buffer
if match new request item on specified primary keys. i.e
``["partition_key1", "sort_key2", "sort_key3"]``
"""
self._table_name = table_name
self._client = client
self._items_buffer = []
self._flush_amount = flush_amount
self._overwrite_by_pkeys = overwrite_by_pkeys
def put_item(self, Item):
self._items_buffer.append({'PutRequest': {'Item': Item}})
self._flush_if_needed()
self._add_request_and_process({'PutRequest': {'Item': Item}})
def delete_item(self, Key):
self._items_buffer.append({'DeleteRequest': {'Key': Key}})
self._add_request_and_process({'DeleteRequest': {'Key': Key}})
def _add_request_and_process(self, request):
if self._overwrite_by_pkeys:
self._remove_dup_pkeys_request_if_any(request)
self._items_buffer.append(request)
self._flush_if_needed()
def _remove_dup_pkeys_request_if_any(self, request):
pkey_values_new = self._extract_pkey_values(request)
for item in self._items_buffer:
if self._extract_pkey_values(item) == pkey_values_new:
self._items_buffer.remove(item)
logger.debug("With overwrite_by_pkeys enabled, skipping "
"request:%s", item)
def _extract_pkey_values(self, request):
if request.get('PutRequest'):
return [request['PutRequest']['Item'][key]
for key in self._overwrite_by_pkeys]
elif request.get('DeleteRequest'):
return [request['DeleteRequest']['Key'][key]
for key in self._overwrite_by_pkeys]
return None
def _flush_if_needed(self):
if len(self._items_buffer) >= self._flush_amount:
self._flush()

View file

@ -71,9 +71,9 @@ class DynamoDBHighLevelResource(object):
'listed in the '
':ref:`DynamoDB Reference Guide<ref_valid_dynamodb_types>`.'
),
new_example_value = (
'\'string\'|123|Binary(b\'bytes\')|True|None|set([\'string\'])|'
'set([123])|set([Binary(b\'bytes\')])|[]|{}')
new_example_value=(
'\'string\'|123|Binary(b\'bytes\')|True|None|set([\'string\'])'
'|set([123])|set([Binary(b\'bytes\')])|[]|{}')
)
key_expression_shape_docs = DocumentModifiedShape(
@ -90,7 +90,7 @@ class DynamoDBHighLevelResource(object):
new_example_value='Key(\'mykey\').eq(\'myvalue\')'
)
cond_expression_shape_docs = DocumentModifiedShape(
con_expression_shape_docs = DocumentModifiedShape(
'ConditionExpression',
new_type=(
'condition from :py:class:`boto3.dynamodb.conditions.Attr` '
@ -116,7 +116,7 @@ class DynamoDBHighLevelResource(object):
self.meta.client.meta.events.register(
'docs.*.dynamodb.*.complete-section',
cond_expression_shape_docs.replace_documentation_for_matching_shape,
con_expression_shape_docs.replace_documentation_for_matching_shape,
unique_id='dynamodb-cond-expression-docs')

View file

@ -59,3 +59,48 @@ restoration is finished.
# Print out objects whose restoration is complete
elif 'ongoing-request="false"' in obj.restore:
print('Restoration complete: %s' % obj.key)
Uploading/downloading files using SSE Customer Keys
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This example shows how to use SSE-C to upload objects using
server side encryption with a customer provided key.
First, we'll need a 32 byte key. For this example, we'll
randomly generate a key but you can use any 32 byte key
you want. Remember, you must the same key to download
the object. If you lose the encryption key, you lose
the object.
Also note how we don't have to provide the SSECustomerKeyMD5.
Boto3 will automatically compute this value for us.
.. code-block:: python
import boto3
import os
BUCKET = 'your-bucket-name'
KEY = os.urandom(32)
s3 = boto3.client('s3')
print("Uploading S3 object with SSE-C")
s3.put_object(Bucket=BUCKET,
Key='encrypt-key',
Body=b'foobar',
SSECustomerKey=KEY,
SSECustomerAlgorithm='AES256')
print("Done")
# Getting the object:
print("Getting S3 object...")
# Note how we're using the same ``KEY`` we
# created earlier.
response = s3.get_object(Bucket=BUCKET,
Key='encrypt-key',
SSECustomerKey=KEY,
SSECustomerAlgorithm='AES256')
print("Done, response body:")
print(response['Body'].read())

View file

@ -94,6 +94,7 @@ class DynamoDBOperationNotSupportedError(Boto3Error):
# FIXME: Backward compatibility
DynanmoDBOperationNotSupportedError = DynamoDBOperationNotSupportedError
class DynamoDBNeedsConditionError(Boto3Error):
"""Raised when input is not a condition"""
def __init__(self, value):

View file

@ -78,7 +78,7 @@ class ServiceAction(object):
params.update(kwargs)
logger.info('Calling %s:%s with %r', parent.meta.service_name,
operation_name, params)
operation_name, params)
response = getattr(parent.meta.client, operation_name)(**params)
@ -110,7 +110,8 @@ class BatchAction(ServiceAction):
Perform the batch action's operation on every page of results
from the collection.
:type parent: :py:class:`~boto3.resources.collection.ResourceCollection`
:type parent:
:py:class:`~boto3.resources.collection.ResourceCollection`
:param parent: The collection iterator to which this action
is attached.
:rtype: list(dict)
@ -209,7 +210,8 @@ class CustomModeledAction(object):
function, event_emitter):
"""
:type action_name: str
:param action_name: The name of the action to inject, e.g. 'delete_tags'
:param action_name: The name of the action to inject, e.g.
'delete_tags'
:type action_model: dict
:param action_model: A JSON definition of the action, as if it were

View file

@ -40,7 +40,7 @@ class ResourceFactory(object):
self._emitter = emitter
def load_from_definition(self, resource_name,
single_resource_json_definition, service_context):
single_resource_json_definition, service_context):
"""
Loads a resource from a model, creating a new
:py:class:`~boto3.resources.base.ServiceResource` subclass
@ -62,8 +62,6 @@ class ResourceFactory(object):
:rtype: Subclass of :py:class:`~boto3.resources.base.ServiceResource`
:return: The service or resource class.
"""
logger.debug('Loading %s:%s', service_context.service_name,
resource_name)
@ -114,7 +112,7 @@ class ResourceFactory(object):
attrs=attrs, resource_model=resource_model,
service_context=service_context)
#References and Subresources
# References and Subresources
self._load_has_relations(
attrs=attrs, resource_name=resource_name,
resource_model=resource_model, service_context=service_context
@ -183,8 +181,9 @@ class ResourceFactory(object):
shape = service_context.service_model.shape_for(
resource_model.shape)
identifiers = dict((i.member_name, i)
for i in resource_model.identifiers if i.member_name)
identifiers = dict(
(i.member_name, i)
for i in resource_model.identifiers if i.member_name)
attributes = resource_model.get_attributes(shape)
for name, (orig_name, member) in attributes.items():
if name in identifiers:
@ -243,7 +242,7 @@ class ResourceFactory(object):
# This is a sub-resource class you can create
# by passing in an identifier, e.g. s3.Bucket(name).
attrs[subresource.name] = self._create_class_partial(
subresource_model=subresource,
subresource_model=subresource,
resource_name=resource_name,
service_context=service_context
)
@ -280,7 +279,7 @@ class ResourceFactory(object):
resource_waiter_model=waiter,
resource_name=resource_name,
service_context=service_context
)
)
def _create_identifier(factory_self, identifier, resource_name):
"""
@ -340,7 +339,8 @@ class ResourceFactory(object):
self.load()
else:
raise ResourceLoadException(
'{0} has no load method'.format(self.__class__.__name__))
'{0} has no load method'.format(
self.__class__.__name__))
return self.meta.data.get(name)
@ -364,6 +364,7 @@ class ResourceFactory(object):
"""
waiter = WaiterAction(resource_waiter_model,
waiter_resource_name=resource_waiter_model.name)
def do_waiter(self, *args, **kwargs):
waiter(self, *args, **kwargs)
@ -445,9 +446,10 @@ class ResourceFactory(object):
class' constructor.
"""
name = subresource_model.resource.type
# We need a new method here because we want access to the
# instance's client.
def create_resource(self, *args, **kwargs):
# We need a new method here because we want access to the
# instance's client.
positional_args = []
# We lazy-load the class to handle circular references.
@ -469,7 +471,7 @@ class ResourceFactory(object):
positional_args.append(value)
return partial(resource_cls, *positional_args,
client=self.meta.client)(*args, **kwargs)
client=self.meta.client)(*args, **kwargs)
create_resource.__name__ = str(name)
create_resource.__doc__ = docstring.SubResourceDocstring(

View file

@ -73,7 +73,6 @@ class Action(object):
self.path = definition.get('path')
class DefinitionWithParams(object):
"""
An item which has parameters exposed via the ``params`` property.

View file

@ -14,7 +14,6 @@
import jmespath
from botocore import xform_name
from ..exceptions import ResourceLoadException
from .params import get_data_member
@ -242,13 +241,14 @@ class ResourceHandler(object):
response_item = None
if search_response:
response_item = search_response[i]
response.append(self.handle_response_item(resource_cls,
parent, identifiers, response_item))
response.append(
self.handle_response_item(resource_cls, parent,
identifiers, response_item))
elif all_not_none(identifiers.values()):
# All identifiers must always exist, otherwise the resource
# cannot be instantiated.
response = self.handle_response_item(resource_cls,
parent, identifiers, search_response)
response = self.handle_response_item(
resource_cls, parent, identifiers, search_response)
else:
# The response should be empty, but that may mean an
# empty dict, list, or None based on whether we make
@ -258,8 +258,9 @@ class ResourceHandler(object):
if self.operation_name is not None:
# A remote service call was made, so try and determine
# its shape.
response = build_empty_response(self.search_path,
self.operation_name, self.service_context.service_model)
response = build_empty_response(
self.search_path, self.operation_name,
self.service_context.service_model)
return response

View file

@ -10,15 +10,20 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from boto3.s3.transfer import S3Transfer
from boto3 import utils
from s3transfer.manager import TransferManager, TransferConfig
from botocore.exceptions import ClientError
from boto3.s3.transfer import S3Transfer, ProgressCallbackInvoker
from boto3 import utils
def inject_s3_transfer_methods(class_attributes, **kwargs):
utils.inject_attribute(class_attributes, 'upload_file', upload_file)
utils.inject_attribute(class_attributes, 'download_file', download_file)
utils.inject_attribute(class_attributes, 'copy', copy)
utils.inject_attribute(class_attributes, 'upload_fileobj', upload_fileobj)
utils.inject_attribute(
class_attributes, 'download_fileobj', download_fileobj)
def inject_bucket_methods(class_attributes, **kwargs):
@ -26,12 +31,22 @@ def inject_bucket_methods(class_attributes, **kwargs):
utils.inject_attribute(class_attributes, 'upload_file', bucket_upload_file)
utils.inject_attribute(
class_attributes, 'download_file', bucket_download_file)
utils.inject_attribute(class_attributes, 'copy', bucket_copy)
utils.inject_attribute(
class_attributes, 'upload_fileobj', bucket_upload_fileobj)
utils.inject_attribute(
class_attributes, 'download_fileobj', bucket_download_fileobj)
def inject_object_methods(class_attributes, **kwargs):
utils.inject_attribute(class_attributes, 'upload_file', object_upload_file)
utils.inject_attribute(
class_attributes, 'download_file', object_download_file)
utils.inject_attribute(class_attributes, 'copy', object_copy)
utils.inject_attribute(
class_attributes, 'upload_fileobj', object_upload_fileobj)
utils.inject_attribute(
class_attributes, 'download_fileobj', object_download_fileobj)
def inject_object_summary_methods(class_attributes, **kwargs):
@ -39,7 +54,10 @@ def inject_object_summary_methods(class_attributes, **kwargs):
def bucket_load(self, *args, **kwargs):
"""Calls s3.Client.list_buckets() to update the attributes of the Bucket resource."""
"""
Calls s3.Client.list_buckets() to update the attributes of the Bucket
resource.
"""
# The docstring above is phrased this way to match what the autogenerated
# docs produce.
@ -56,7 +74,10 @@ def bucket_load(self, *args, **kwargs):
def object_summary_load(self, *args, **kwargs):
"""Calls s3.Client.head_object to update the attributes of the ObjectSummary resource."""
"""
Calls s3.Client.head_object to update the attributes of the ObjectSummary
resource.
"""
response = self.meta.client.head_object(
Bucket=self.bucket_name, Key=self.key)
if 'ContentLength' in response:
@ -78,10 +99,10 @@ def upload_file(self, Filename, Bucket, Key, ExtraArgs=None,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
"""
transfer = S3Transfer(self, Config)
return transfer.upload_file(
filename=Filename, bucket=Bucket, key=Key,
extra_args=ExtraArgs, callback=Callback)
with S3Transfer(self, Config) as transfer:
return transfer.upload_file(
filename=Filename, bucket=Bucket, key=Key,
extra_args=ExtraArgs, callback=Callback)
def download_file(self, Bucket, Key, Filename, ExtraArgs=None,
@ -98,10 +119,10 @@ def download_file(self, Bucket, Key, Filename, ExtraArgs=None,
except that parameters are capitalized. Detailed examples can be found at
:ref:`S3Transfer's Usage <ref_s3transfer_usage>`.
"""
transfer = S3Transfer(self, Config)
return transfer.download_file(
bucket=Bucket, key=Key, filename=Filename,
extra_args=ExtraArgs, callback=Callback)
with S3Transfer(self, Config) as transfer:
return transfer.download_file(
bucket=Bucket, key=Key, filename=Filename,
extra_args=ExtraArgs, callback=Callback)
def bucket_upload_file(self, Filename, Key,
@ -178,3 +199,450 @@ def object_download_file(self, Filename,
return self.meta.client.download_file(
Bucket=self.bucket_name, Key=self.key, Filename=Filename,
ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
def copy(self, CopySource, Bucket, Key, ExtraArgs=None, Callback=None,
SourceClient=None, Config=None):
"""Copy an object from one S3 location to another.
This is a managed transfer which will perform a multipart copy in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
s3.meta.client.copy(copy_source, 'otherbucket', 'otherkey')
:type CopySource: dict
:param CopySource: The name of the source bucket, key name of the
source object, and optional version ID of the source object. The
dictionary format is:
``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
that the ``VersionId`` key is optional and may be omitted.
:type Bucket: str
:param Bucket: The name of the bucket to copy to
:type Key: str
:param Key: The name of the key to copy to
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the copy.
:type SourceClient: botocore or boto3 Client
:param SourceClient: The client to be used for operation that
may happen at the source object. For example, this client is
used for the head_object that determines the size of the copy.
If no client is provided, the current client is used as the client
for the source object.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
copy.
"""
subscribers = None
if Callback is not None:
subscribers = [ProgressCallbackInvoker(Callback)]
config = Config
if config is None:
config = TransferConfig()
with TransferManager(self, config) as manager:
future = manager.copy(
copy_source=CopySource, bucket=Bucket, key=Key,
extra_args=ExtraArgs, subscribers=subscribers,
source_client=SourceClient)
return future.result()
def bucket_copy(self, CopySource, Key, ExtraArgs=None, Callback=None,
SourceClient=None, Config=None):
"""Copy an object from one S3 location to an object in this bucket.
This is a managed transfer which will perform a multipart copy in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
bucket = s3.Bucket('otherbucket')
bucket.copy(copy_source, 'otherkey')
:type CopySource: dict
:param CopySource: The name of the source bucket, key name of the
source object, and optional version ID of the source object. The
dictionary format is:
``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
that the ``VersionId`` key is optional and may be omitted.
:type Key: str
:param Key: The name of the key to copy to
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the copy.
:type SourceClient: botocore or boto3 Client
:param SourceClient: The client to be used for operation that
may happen at the source object. For example, this client is
used for the head_object that determines the size of the copy.
If no client is provided, the current client is used as the client
for the source object.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
copy.
"""
return self.meta.client.copy(
CopySource=CopySource, Bucket=self.name, Key=Key, ExtraArgs=ExtraArgs,
Callback=Callback, SourceClient=SourceClient, Config=Config)
def object_copy(self, CopySource, ExtraArgs=None, Callback=None,
SourceClient=None, Config=None):
"""Copy an object from one S3 location to this object.
This is a managed transfer which will perform a multipart copy in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
bucket = s3.Bucket('otherbucket')
obj = bucket.Object('otherkey')
obj.copy(copy_source)
:type CopySource: dict
:param CopySource: The name of the source bucket, key name of the
source object, and optional version ID of the source object. The
dictionary format is:
``{'Bucket': 'bucket', 'Key': 'key', 'VersionId': 'id'}``. Note
that the ``VersionId`` key is optional and may be omitted.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the copy.
:type SourceClient: botocore or boto3 Client
:param SourceClient: The client to be used for operation that
may happen at the source object. For example, this client is
used for the head_object that determines the size of the copy.
If no client is provided, the current client is used as the client
for the source object.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
copy.
"""
return self.meta.client.copy(
CopySource=CopySource, Bucket=self.bucket_name, Key=self.key,
ExtraArgs=ExtraArgs, Callback=Callback, SourceClient=SourceClient,
Config=Config)
def upload_fileobj(self, Fileobj, Bucket, Key, ExtraArgs=None,
Callback=None, Config=None):
"""Upload a file-like object to S3.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart upload in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'rb') as data:
s3.upload_fileobj(data, 'mybucket', 'mykey')
:type Fileobj: a file-like object
:param Fileobj: A file-like object to upload. At a minimum, it must
implement the `read` method, and must return bytes.
:type Bucket: str
:param Bucket: The name of the bucket to upload to.
:type Key: str
:param Key: The name of the key to upload to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
upload.
"""
if not hasattr(Fileobj, 'read'):
raise ValueError('Fileobj must implement read')
subscribers = None
if Callback is not None:
subscribers = [ProgressCallbackInvoker(Callback)]
config = Config
if config is None:
config = TransferConfig()
with TransferManager(self, config) as manager:
future = manager.upload(
fileobj=Fileobj, bucket=Bucket, key=Key,
extra_args=ExtraArgs, subscribers=subscribers)
return future.result()
def bucket_upload_fileobj(self, Fileobj, Key, ExtraArgs=None,
Callback=None, Config=None):
"""Upload a file-like object to this bucket.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart upload in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
with open('filename', 'rb') as data:
bucket.upload_fileobj(data, 'mykey')
:type Fileobj: a file-like object
:param Fileobj: A file-like object to upload. At a minimum, it must
implement the `read` method, and must return bytes.
:type Key: str
:param Key: The name of the key to upload to.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
upload.
"""
return self.meta.client.upload_fileobj(
Fileobj=Fileobj, Bucket=self.name, Key=Key, ExtraArgs=ExtraArgs,
Callback=Callback, Config=Config)
def object_upload_fileobj(self, Fileobj, ExtraArgs=None, Callback=None,
Config=None):
"""Upload a file-like object to this object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart upload in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
obj = bucket.Object('mykey')
with open('filename', 'rb') as data:
obj.upload_fileobj(data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to upload. At a minimum, it must
implement the `read` method, and must return bytes.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the upload.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
upload.
"""
return self.meta.client.upload_fileobj(
Fileobj=Fileobj, Bucket=self.bucket_name, Key=self.key,
ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
def download_fileobj(self, Bucket, Key, Fileobj, ExtraArgs=None,
Callback=None, Config=None):
"""Download an object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.client('s3')
with open('filename', 'wb') as data:
s3.download_fileobj('mybucket', 'mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Bucket: str
:param Bucket: The name of the bucket to download from.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
if not hasattr(Fileobj, 'write'):
raise ValueError('Fileobj must implement write')
subscribers = None
if Callback is not None:
subscribers = [ProgressCallbackInvoker(Callback)]
config = Config
if config is None:
config = TransferConfig()
with TransferManager(self, config) as manager:
future = manager.download(
bucket=Bucket, key=Key, fileobj=Fileobj,
extra_args=ExtraArgs, subscribers=subscribers)
return future.result()
def bucket_download_fileobj(self, Key, Fileobj, ExtraArgs=None,
Callback=None, Config=None):
"""Download an object from this bucket to a file-like-object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
with open('filename', 'wb') as data:
bucket.download_fileobj('mykey', data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type Key: str
:param Key: The name of the key to download from.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
return self.meta.client.download_fileobj(
Bucket=self.name, Key=Key, Fileobj=Fileobj, ExtraArgs=ExtraArgs,
Callback=Callback, Config=Config)
def object_download_fileobj(self, Fileobj, ExtraArgs=None, Callback=None,
Config=None):
"""Download this object from S3 to a file-like object.
The file-like object must be in binary mode.
This is a managed transfer which will perform a multipart download in
multiple threads if necessary.
Usage::
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('mybucket')
obj = bucket.Object('mykey')
with open('filename', 'wb') as data:
obj.download_fileobj(data)
:type Fileobj: a file-like object
:param Fileobj: A file-like object to download into. At a minimum, it must
implement the `write` method and must accept bytes.
:type ExtraArgs: dict
:param ExtraArgs: Extra arguments that may be passed to the
client operation.
:type Callback: method
:param Callback: A method which takes a number of bytes transferred to
be periodically called during the download.
:type Config: boto3.s3.transfer.TransferConfig
:param Config: The transfer configuration to be used when performing the
download.
"""
return self.meta.client.download_fileobj(
Bucket=self.bucket_name, Key=self.key, Fileobj=Fileobj,
ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)

View file

@ -18,7 +18,6 @@ uploads/downloads. It handles several things for the user:
* Automatically switching to multipart transfers when
a file is over a specific size threshold
* Uploading/downloading a file in parallel
* Throttling based on max bandwidth
* Progress callbacks to monitor transfers
* Retries. While botocore handles retries for streaming uploads,
it is not possible for it to handle retries for streaming
@ -30,7 +29,6 @@ to configure many aspects of the transfer process including:
* Multipart threshold size
* Max parallel downloads
* Max bandwidth
* Socket timeouts
* Retry amounts
@ -94,8 +92,9 @@ to the user:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\\r%s %s / %s (%.2f%%)" % (self._filename, self._seen_so_far,
self._size, percentage))
"\\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
@ -123,499 +122,111 @@ transfer. For example:
"""
import os
import math
import functools
import logging
import socket
import threading
import random
import string
import boto3
from concurrent import futures
from botocore.exceptions import ClientError
from botocore.compat import six
from botocore.vendored.requests.packages.urllib3.exceptions import \
ReadTimeoutError
from botocore.exceptions import IncompleteReadError
from s3transfer.exceptions import RetriesExceededError as \
S3TransferRetriesExceededError
from s3transfer.manager import TransferConfig as S3TransferConfig
from s3transfer.manager import TransferManager
from s3transfer.subscribers import BaseSubscriber
from s3transfer.utils import OSUtils
import boto3.compat
from boto3.exceptions import RetriesExceededError, S3UploadFailedError
logger = logging.getLogger(__name__)
queue = six.moves.queue
KB = 1024
MB = KB * KB
MB = 1024 * 1024
SHUTDOWN_SENTINEL = object()
S3_RETRYABLE_ERRORS = (
socket.timeout, boto3.compat.SOCKET_ERROR,
ReadTimeoutError, IncompleteReadError
)
class TransferConfig(S3TransferConfig):
ALIAS = {
'max_concurrency': 'max_request_concurrency',
'max_io_queue': 'max_io_queue_size'
}
def random_file_extension(num_digits=8):
return ''.join(random.choice(string.hexdigits) for _ in range(num_digits))
def disable_upload_callbacks(request, operation_name, **kwargs):
if operation_name in ['PutObject', 'UploadPart'] and \
hasattr(request.body, 'disable_callback'):
request.body.disable_callback()
def enable_upload_callbacks(request, operation_name, **kwargs):
if operation_name in ['PutObject', 'UploadPart'] and \
hasattr(request.body, 'enable_callback'):
request.body.enable_callback()
class QueueShutdownError(Exception):
pass
class ReadFileChunk(object):
def __init__(self, fileobj, start_byte, chunk_size, full_file_size,
callback=None, enable_callback=True):
"""
Given a file object shown below::
|___________________________________________________|
0 | | full_file_size
|----chunk_size---|
start_byte
:type fileobj: file
:param fileobj: File like object
:type start_byte: int
:param start_byte: The first byte from which to start reading.
:type chunk_size: int
:param chunk_size: The max chunk size to read. Trying to read
pass the end of the chunk size will behave like you've
reached the end of the file.
:type full_file_size: int
:param full_file_size: The entire content length associated
with ``fileobj``.
:type callback: function(amount_read)
:param callback: Called whenever data is read from this object.
"""
self._fileobj = fileobj
self._start_byte = start_byte
self._size = self._calculate_file_size(
self._fileobj, requested_size=chunk_size,
start_byte=start_byte, actual_file_size=full_file_size)
self._fileobj.seek(self._start_byte)
self._amount_read = 0
self._callback = callback
self._callback_enabled = enable_callback
@classmethod
def from_filename(cls, filename, start_byte, chunk_size, callback=None,
enable_callback=True):
"""Convenience factory function to create from a filename.
:type start_byte: int
:param start_byte: The first byte from which to start reading.
:type chunk_size: int
:param chunk_size: The max chunk size to read. Trying to read
pass the end of the chunk size will behave like you've
reached the end of the file.
:type full_file_size: int
:param full_file_size: The entire content length associated
with ``fileobj``.
:type callback: function(amount_read)
:param callback: Called whenever data is read from this object.
:type enable_callback: bool
:param enable_callback: Indicate whether to invoke callback
during read() calls.
:rtype: ``ReadFileChunk``
:return: A new instance of ``ReadFileChunk``
"""
f = open(filename, 'rb')
file_size = os.fstat(f.fileno()).st_size
return cls(f, start_byte, chunk_size, file_size, callback,
enable_callback)
def _calculate_file_size(self, fileobj, requested_size, start_byte,
actual_file_size):
max_chunk_size = actual_file_size - start_byte
return min(max_chunk_size, requested_size)
def read(self, amount=None):
if amount is None:
amount_to_read = self._size - self._amount_read
else:
amount_to_read = min(self._size - self._amount_read, amount)
data = self._fileobj.read(amount_to_read)
self._amount_read += len(data)
if self._callback is not None and self._callback_enabled:
self._callback(len(data))
return data
def enable_callback(self):
self._callback_enabled = True
def disable_callback(self):
self._callback_enabled = False
def seek(self, where):
self._fileobj.seek(self._start_byte + where)
if self._callback is not None and self._callback_enabled:
# To also rewind the callback() for an accurate progress report
self._callback(where - self._amount_read)
self._amount_read = where
def close(self):
self._fileobj.close()
def tell(self):
return self._amount_read
def __len__(self):
# __len__ is defined because requests will try to determine the length
# of the stream to set a content length. In the normal case
# of the file it will just stat the file, but we need to change that
# behavior. By providing a __len__, requests will use that instead
# of stat'ing the file.
return self._size
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def __iter__(self):
# This is a workaround for http://bugs.python.org/issue17575
# Basically httplib will try to iterate over the contents, even
# if its a file like object. This wasn't noticed because we've
# already exhausted the stream so iterating over the file immediately
# stops, which is what we're simulating here.
return iter([])
class StreamReaderProgress(object):
"""Wrapper for a read only stream that adds progress callbacks."""
def __init__(self, stream, callback=None):
self._stream = stream
self._callback = callback
def read(self, *args, **kwargs):
value = self._stream.read(*args, **kwargs)
if self._callback is not None:
self._callback(len(value))
return value
class OSUtils(object):
def get_file_size(self, filename):
return os.path.getsize(filename)
def open_file_chunk_reader(self, filename, start_byte, size, callback):
return ReadFileChunk.from_filename(filename, start_byte,
size, callback,
enable_callback=False)
def open(self, filename, mode):
return open(filename, mode)
def remove_file(self, filename):
"""Remove a file, noop if file does not exist."""
# Unlike os.remove, if the file does not exist,
# then this method does nothing.
try:
os.remove(filename)
except OSError:
pass
def rename_file(self, current_filename, new_filename):
boto3.compat.rename_file(current_filename, new_filename)
class MultipartUploader(object):
# These are the extra_args that need to be forwarded onto
# subsequent upload_parts.
UPLOAD_PART_ARGS = [
'SSECustomerKey',
'SSECustomerAlgorithm',
'SSECustomerKeyMD5',
'RequestPayer',
]
def __init__(self, client, config, osutil,
executor_cls=futures.ThreadPoolExecutor):
self._client = client
self._config = config
self._os = osutil
self._executor_cls = executor_cls
def _extra_upload_part_args(self, extra_args):
# Only the args in UPLOAD_PART_ARGS actually need to be passed
# onto the upload_part calls.
upload_parts_args = {}
for key, value in extra_args.items():
if key in self.UPLOAD_PART_ARGS:
upload_parts_args[key] = value
return upload_parts_args
def upload_file(self, filename, bucket, key, callback, extra_args):
response = self._client.create_multipart_upload(Bucket=bucket,
Key=key, **extra_args)
upload_id = response['UploadId']
try:
parts = self._upload_parts(upload_id, filename, bucket, key,
callback, extra_args)
except Exception as e:
logger.debug("Exception raised while uploading parts, "
"aborting multipart upload.", exc_info=True)
self._client.abort_multipart_upload(
Bucket=bucket, Key=key, UploadId=upload_id)
raise S3UploadFailedError(
"Failed to upload %s to %s: %s" % (
filename, '/'.join([bucket, key]), e))
self._client.complete_multipart_upload(
Bucket=bucket, Key=key, UploadId=upload_id,
MultipartUpload={'Parts': parts})
def _upload_parts(self, upload_id, filename, bucket, key, callback,
extra_args):
upload_parts_extra_args = self._extra_upload_part_args(extra_args)
parts = []
part_size = self._config.multipart_chunksize
num_parts = int(
math.ceil(self._os.get_file_size(filename) / float(part_size)))
max_workers = self._config.max_concurrency
with self._executor_cls(max_workers=max_workers) as executor:
upload_partial = functools.partial(
self._upload_one_part, filename, bucket, key, upload_id,
part_size, upload_parts_extra_args, callback)
for part in executor.map(upload_partial, range(1, num_parts + 1)):
parts.append(part)
return parts
def _upload_one_part(self, filename, bucket, key,
upload_id, part_size, extra_args,
callback, part_number):
open_chunk_reader = self._os.open_file_chunk_reader
with open_chunk_reader(filename, part_size * (part_number - 1),
part_size, callback) as body:
response = self._client.upload_part(
Bucket=bucket, Key=key,
UploadId=upload_id, PartNumber=part_number, Body=body,
**extra_args)
etag = response['ETag']
return {'ETag': etag, 'PartNumber': part_number}
class ShutdownQueue(queue.Queue):
"""A queue implementation that can be shutdown.
Shutting down a queue means that this class adds a
trigger_shutdown method that will trigger all subsequent
calls to put() to fail with a ``QueueShutdownError``.
It purposefully deviates from queue.Queue, and is *not* meant
to be a drop in replacement for ``queue.Queue``.
"""
def _init(self, maxsize):
self._shutdown = False
self._shutdown_lock = threading.Lock()
# queue.Queue is an old style class so we don't use super().
return queue.Queue._init(self, maxsize)
def trigger_shutdown(self):
with self._shutdown_lock:
self._shutdown = True
logger.debug("The IO queue is now shutdown.")
def put(self, item):
# Note: this is not sufficient, it's still possible to deadlock!
# Need to hook into the condition vars used by this class.
with self._shutdown_lock:
if self._shutdown:
raise QueueShutdownError("Cannot put item to queue when "
"queue has been shutdown.")
return queue.Queue.put(self, item)
class MultipartDownloader(object):
def __init__(self, client, config, osutil,
executor_cls=futures.ThreadPoolExecutor):
self._client = client
self._config = config
self._os = osutil
self._executor_cls = executor_cls
self._ioqueue = ShutdownQueue(self._config.max_io_queue)
def download_file(self, bucket, key, filename, object_size,
extra_args, callback=None):
with self._executor_cls(max_workers=2) as controller:
# 1 thread for the future that manages the uploading of files
# 1 thread for the future that manages IO writes.
download_parts_handler = functools.partial(
self._download_file_as_future,
bucket, key, filename, object_size, extra_args, callback)
parts_future = controller.submit(download_parts_handler)
io_writes_handler = functools.partial(
self._perform_io_writes, filename)
io_future = controller.submit(io_writes_handler)
results = futures.wait([parts_future, io_future],
return_when=futures.FIRST_EXCEPTION)
self._process_future_results(results)
def _process_future_results(self, futures):
finished, unfinished = futures
for future in finished:
future.result()
def _download_file_as_future(self, bucket, key, filename, object_size,
extra_args, callback):
part_size = self._config.multipart_chunksize
num_parts = int(math.ceil(object_size / float(part_size)))
max_workers = self._config.max_concurrency
download_partial = functools.partial(
self._download_range, bucket, key, filename,
part_size, num_parts, extra_args, callback)
try:
with self._executor_cls(max_workers=max_workers) as executor:
list(executor.map(download_partial, range(num_parts)))
finally:
self._ioqueue.put(SHUTDOWN_SENTINEL)
def _calculate_range_param(self, part_size, part_index, num_parts):
start_range = part_index * part_size
if part_index == num_parts - 1:
end_range = ''
else:
end_range = start_range + part_size - 1
range_param = 'bytes=%s-%s' % (start_range, end_range)
return range_param
def _download_range(self, bucket, key, filename,
part_size, num_parts,
extra_args, callback, part_index):
try:
range_param = self._calculate_range_param(
part_size, part_index, num_parts)
max_attempts = self._config.num_download_attempts
last_exception = None
for i in range(max_attempts):
try:
logger.debug("Making get_object call.")
response = self._client.get_object(
Bucket=bucket, Key=key, Range=range_param,
**extra_args)
streaming_body = StreamReaderProgress(
response['Body'], callback)
buffer_size = 1024 * 16
current_index = part_size * part_index
for chunk in iter(lambda: streaming_body.read(buffer_size),
b''):
self._ioqueue.put((current_index, chunk))
current_index += len(chunk)
return
except S3_RETRYABLE_ERRORS as e:
logger.debug("Retrying exception caught (%s), "
"retrying request, (attempt %s / %s)", e, i,
max_attempts, exc_info=True)
last_exception = e
continue
raise RetriesExceededError(last_exception)
finally:
logger.debug("EXITING _download_range for part: %s", part_index)
def _perform_io_writes(self, filename):
try:
self._loop_on_io_writes(filename)
except Exception as e:
logger.debug("Caught exception in IO thread: %s",
e, exc_info=True)
self._ioqueue.trigger_shutdown()
raise
def _loop_on_io_writes(self, filename):
with self._os.open(filename, 'wb') as f:
while True:
task = self._ioqueue.get()
if task is SHUTDOWN_SENTINEL:
logger.debug("Shutdown sentinel received in IO handler, "
"shutting down IO handler.")
return
else:
offset, data = task
f.seek(offset)
f.write(data)
class TransferConfig(object):
def __init__(self,
multipart_threshold=8 * MB,
max_concurrency=10,
multipart_chunksize=8 * MB,
num_download_attempts=5,
max_io_queue=100):
self.multipart_threshold = multipart_threshold
self.max_concurrency = max_concurrency
self.multipart_chunksize = multipart_chunksize
self.num_download_attempts = num_download_attempts
self.max_io_queue = max_io_queue
max_io_queue=100,
io_chunksize=256 * KB):
"""Configuration object for managed S3 transfers
:param multipart_threshold: The transfer size threshold for which
multipart uploads, downloads, and copies will automatically be
triggered.
:param max_concurrency: The maximum number of threads that will be
making requests to perform a transfer.
:param multipart_chunksize: The partition size of each part for a
multipart transfer.
:param num_download_attempts: The number of download attempts that
will be retried upon errors with downloading an object in S3.
Note that these retries account for errors that occur when
streaming down the data from s3 (i.e. socket errors and read
timeouts that occur after recieving an OK response from s3).
Other retryable exceptions such as throttling errors and 5xx
errors are already retried by botocore (this default is 5). This
does not take into account the number of exceptions retried by
botocore.
:param max_io_queue: The maximum amount of read parts that can be
queued in memory to be written for a download. The size of each
of these read parts is at most the size of ``io_chunksize``.
:param io_chunksize: The max size of each chunk in the io queue.
Currently, this is size used when ``read`` is called on the
downloaded stream as well.
"""
super(TransferConfig, self).__init__(
multipart_threshold=multipart_threshold,
max_request_concurrency=max_concurrency,
multipart_chunksize=multipart_chunksize,
num_download_attempts=num_download_attempts,
max_io_queue_size=max_io_queue,
io_chunksize=io_chunksize,
)
# Some of the argument names are not the same as the inherited
# S3TransferConfig so we add aliases so you can still access the
# old version of the names.
for alias in self.ALIAS:
setattr(self, alias, getattr(self, self.ALIAS[alias]))
def __setattr__(self, name, value):
# If the alias name is used, make sure we set the name that it points
# to as that is what actually is used in governing the TransferManager.
if name in self.ALIAS:
super(TransferConfig, self).__setattr__(self.ALIAS[name], value)
# Always set the value of the actual name provided.
super(TransferConfig, self).__setattr__(name, value)
class S3Transfer(object):
ALLOWED_DOWNLOAD_ARGS = TransferManager.ALLOWED_DOWNLOAD_ARGS
ALLOWED_UPLOAD_ARGS = TransferManager.ALLOWED_UPLOAD_ARGS
ALLOWED_DOWNLOAD_ARGS = [
'VersionId',
'SSECustomerAlgorithm',
'SSECustomerKey',
'SSECustomerKeyMD5',
'RequestPayer',
]
ALLOWED_UPLOAD_ARGS = [
'ACL',
'CacheControl',
'ContentDisposition',
'ContentEncoding',
'ContentLanguage',
'ContentType',
'Expires',
'GrantFullControl',
'GrantRead',
'GrantReadACP',
'GrantWriteACL',
'Metadata',
'RequestPayer',
'ServerSideEncryption',
'StorageClass',
'SSECustomerAlgorithm',
'SSECustomerKey',
'SSECustomerKeyMD5',
'SSEKMSKeyId',
]
def __init__(self, client, config=None, osutil=None):
self._client = client
def __init__(self, client=None, config=None, osutil=None, manager=None):
if not client and not manager:
raise ValueError(
'Either a boto3.Client or s3transfer.manager.TransferManager '
'must be provided'
)
if manager and any([client, config, osutil]):
raise ValueError(
'Manager cannot be provided with client, config, '
'nor osutil. These parameters are mutually exclusive.'
)
if config is None:
config = TransferConfig()
self._config = config
if osutil is None:
osutil = OSUtils()
self._osutil = osutil
if manager:
self._manager = manager
else:
self._manager = TransferManager(client, config, osutil)
def upload_file(self, filename, bucket, key,
callback=None, extra_args=None):
@ -624,31 +235,22 @@ class S3Transfer(object):
Variants have also been injected into S3 client, Bucket and Object.
You don't have to use S3Transfer.upload_file() directly.
"""
if extra_args is None:
extra_args = {}
self._validate_all_known_args(extra_args, self.ALLOWED_UPLOAD_ARGS)
events = self._client.meta.events
events.register_first('request-created.s3',
disable_upload_callbacks,
unique_id='s3upload-callback-disable')
events.register_last('request-created.s3',
enable_upload_callbacks,
unique_id='s3upload-callback-enable')
if self._osutil.get_file_size(filename) >= \
self._config.multipart_threshold:
self._multipart_upload(filename, bucket, key, callback, extra_args)
else:
self._put_object(filename, bucket, key, callback, extra_args)
if not isinstance(filename, six.string_types):
raise ValueError('Filename must be a string')
def _put_object(self, filename, bucket, key, callback, extra_args):
# We're using open_file_chunk_reader so we can take advantage of the
# progress callback functionality.
open_chunk_reader = self._osutil.open_file_chunk_reader
with open_chunk_reader(filename, 0,
self._osutil.get_file_size(filename),
callback=callback) as body:
self._client.put_object(Bucket=bucket, Key=key, Body=body,
**extra_args)
subscribers = self._get_subscribers(callback)
future = self._manager.upload(
filename, bucket, key, extra_args, subscribers)
try:
future.result()
# If a client error was raised, add the backwards compatibility layer
# that raises a S3UploadFailedError. These specific errors were only
# ever thrown for upload_parts but now can be thrown for any related
# client error.
except ClientError as e:
raise S3UploadFailedError(
"Failed to upload %s to %s: %s" % (
filename, '/'.join([bucket, key]), e))
def download_file(self, bucket, key, filename, extra_args=None,
callback=None):
@ -657,77 +259,42 @@ class S3Transfer(object):
Variants have also been injected into S3 client, Bucket and Object.
You don't have to use S3Transfer.download_file() directly.
"""
# This method will issue a ``head_object`` request to determine
# the size of the S3 object. This is used to determine if the
# object is downloaded in parallel.
if extra_args is None:
extra_args = {}
self._validate_all_known_args(extra_args, self.ALLOWED_DOWNLOAD_ARGS)
object_size = self._object_size(bucket, key, extra_args)
temp_filename = filename + os.extsep + random_file_extension()
if not isinstance(filename, six.string_types):
raise ValueError('Filename must be a string')
subscribers = self._get_subscribers(callback)
future = self._manager.download(
bucket, key, filename, extra_args, subscribers)
try:
self._download_file(bucket, key, temp_filename, object_size,
extra_args, callback)
except Exception:
logger.debug("Exception caught in download_file, removing partial "
"file: %s", temp_filename, exc_info=True)
self._osutil.remove_file(temp_filename)
raise
else:
self._osutil.rename_file(temp_filename, filename)
future.result()
# This is for backwards compatibility where when retries are
# exceeded we need to throw the same error from boto3 instead of
# s3transfer's built in RetriesExceededError as current users are
# catching the boto3 one instead of the s3transfer exception to do
# their own retries.
except S3TransferRetriesExceededError as e:
raise RetriesExceededError(e.last_exception)
def _download_file(self, bucket, key, filename, object_size,
extra_args, callback):
if object_size >= self._config.multipart_threshold:
self._ranged_download(bucket, key, filename, object_size,
extra_args, callback)
else:
self._get_object(bucket, key, filename, extra_args, callback)
def _get_subscribers(self, callback):
if not callback:
return None
return [ProgressCallbackInvoker(callback)]
def _validate_all_known_args(self, actual, allowed):
for kwarg in actual:
if kwarg not in allowed:
raise ValueError(
"Invalid extra_args key '%s', "
"must be one of: %s" % (
kwarg, ', '.join(allowed)))
def __enter__(self):
return self
def _ranged_download(self, bucket, key, filename, object_size,
extra_args, callback):
downloader = MultipartDownloader(self._client, self._config,
self._osutil)
downloader.download_file(bucket, key, filename, object_size,
extra_args, callback)
def __exit__(self, *args):
self._manager.__exit__(*args)
def _get_object(self, bucket, key, filename, extra_args, callback):
# precondition: num_download_attempts > 0
max_attempts = self._config.num_download_attempts
last_exception = None
for i in range(max_attempts):
try:
return self._do_get_object(bucket, key, filename,
extra_args, callback)
except S3_RETRYABLE_ERRORS as e:
logger.debug("Retrying exception caught (%s), "
"retrying request, (attempt %s / %s)", e, i,
max_attempts, exc_info=True)
last_exception = e
continue
raise RetriesExceededError(last_exception)
def _do_get_object(self, bucket, key, filename, extra_args, callback):
response = self._client.get_object(Bucket=bucket, Key=key,
**extra_args)
streaming_body = StreamReaderProgress(
response['Body'], callback)
with self._osutil.open(filename, 'wb') as f:
for chunk in iter(lambda: streaming_body.read(8192), b''):
f.write(chunk)
class ProgressCallbackInvoker(BaseSubscriber):
"""A back-compat wrapper to invoke a provided callback via a subscriber
def _object_size(self, bucket, key, extra_args):
return self._client.head_object(
Bucket=bucket, Key=key, **extra_args)['ContentLength']
:param callback: A callable that takes a single positional argument for
how many bytes were transferred.
"""
def __init__(self, callback):
self._callback = callback
def _multipart_upload(self, filename, bucket, key, callback, extra_args):
uploader = MultipartUploader(self._client, self._config, self._osutil)
uploader.upload_file(filename, bucket, key, callback, extra_args)
def on_progress(self, bytes_transferred, **kwargs):
self._callback(bytes_transferred)

View file

@ -69,8 +69,8 @@ class Session(object):
self._session.set_config_variable('profile', profile_name)
if aws_access_key_id or aws_secret_access_key or aws_session_token:
self._session.set_credentials(aws_access_key_id,
aws_secret_access_key, aws_session_token)
self._session.set_credentials(
aws_access_key_id, aws_secret_access_key, aws_session_token)
if region_name is not None:
self._session.set_config_variable('region', region_name)
@ -81,7 +81,8 @@ class Session(object):
self._register_default_handlers()
def __repr__(self):
return 'Session(region={0})'.format(
return '{0}(region_name={1})'.format(
self.__class__.__name__,
repr(self._session.get_config_variable('region')))
@property
@ -105,6 +106,13 @@ class Session(object):
"""
return self._session.get_component('event_emitter')
@property
def available_profiles(self):
"""
The profiles available to the session credentials
"""
return self._session.available_profiles
def _setup_loader(self):
"""
Setup loader paths so that we can load resources.
@ -197,26 +205,28 @@ class Session(object):
of the client.
:type use_ssl: boolean
:param use_ssl: Whether or not to use SSL. By default, SSL is used. Note that
not all services support non-ssl connections.
:param use_ssl: Whether or not to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:type verify: boolean/string
:param verify: Whether or not to verify SSL certificates. By default SSL certificates
are verified. You can provide the following values:
:param verify: Whether or not to verify SSL certificates. By default
SSL certificates are verified. You can provide the following
values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:type endpoint_url: string
:param endpoint_url: The complete URL to use for the constructed client.
Normally, botocore will automatically construct the appropriate URL
to use when communicating with a service. You can specify a
complete URL (including the "http/https" scheme) to override this
behavior. If this value is provided, then ``use_ssl`` is ignored.
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You
can specify a complete URL (including the "http/https" scheme)
to override this behavior. If this value is provided,
then ``use_ssl`` is ignored.
:type aws_access_key_id: string
:param aws_access_key_id: The access key to use when creating
@ -237,7 +247,10 @@ class Session(object):
:param config: Advanced client configuration options. If region_name
is specified in the client config, its value will take precedence
over environment variables and configuration values, but not over
a region_name value passed explicitly to the method.
a region_name value passed explicitly to the method. See
`botocore config documentation
<https://botocore.readthedocs.io/en/stable/reference/config.html>`_
for more details.
:return: Service client instance
@ -272,26 +285,28 @@ class Session(object):
of the client.
:type use_ssl: boolean
:param use_ssl: Whether or not to use SSL. By default, SSL is used. Note that
not all services support non-ssl connections.
:param use_ssl: Whether or not to use SSL. By default, SSL is used.
Note that not all services support non-ssl connections.
:type verify: boolean/string
:param verify: Whether or not to verify SSL certificates. By default SSL certificates
are verified. You can provide the following values:
:param verify: Whether or not to verify SSL certificates. By default
SSL certificates are verified. You can provide the following
values:
* False - do not validate SSL certificates. SSL will still be
used (unless use_ssl is False), but SSL certificates
will not be verified.
* path/to/cert/bundle.pem - A filename of the CA cert bundle to
uses. You can specify this argument if you want to use a different
CA cert bundle than the one used by botocore.
uses. You can specify this argument if you want to use a
different CA cert bundle than the one used by botocore.
:type endpoint_url: string
:param endpoint_url: The complete URL to use for the constructed client.
Normally, botocore will automatically construct the appropriate URL
to use when communicating with a service. You can specify a
complete URL (including the "http/https" scheme) to override this
behavior. If this value is provided, then ``use_ssl`` is ignored.
:param endpoint_url: The complete URL to use for the constructed
client. Normally, botocore will automatically construct the
appropriate URL to use when communicating with a service. You
can specify a complete URL (including the "http/https" scheme)
to override this behavior. If this value is provided,
then ``use_ssl`` is ignored.
:type aws_access_key_id: string
:param aws_access_key_id: The access key to use when creating
@ -314,20 +329,23 @@ class Session(object):
over environment variables and configuration values, but not over
a region_name value passed explicitly to the method. If
user_agent_extra is specified in the client config, it overrides
the default user_agent_extra provided by the resource API.
the default user_agent_extra provided by the resource API. See
`botocore config documentation
<https://botocore.readthedocs.io/en/stable/reference/config.html>`_
for more details.
:return: Subclass of :py:class:`~boto3.resources.base.ServiceResource`
"""
try:
resource_model = self._loader.load_service_model(
service_name, 'resources-1', api_version)
except UnknownServiceError as e:
except UnknownServiceError:
available = self.get_available_resources()
has_low_level_client = (
service_name in self.get_available_services())
raise ResourceNotExistsError(service_name, available,
has_low_level_client)
except DataNotFoundError as e:
except DataNotFoundError:
# This is because we've provided an invalid API version.
available_api_versions = self._loader.list_api_versions(
service_name, 'resources-1')
@ -377,7 +395,7 @@ class Session(object):
service_name=service_name, service_model=service_model,
resource_json_definitions=resource_model['resources'],
service_waiter_model=boto3.utils.LazyLoadedWaiterModel(
self._session, service_name, api_version)
self._session, service_name, api_version)
)
# Create the service resource class.

View file

@ -4,3 +4,17 @@
{{ super() }}
<!--REGION_DISCLAIMER_DO_NOT_REMOVE-->
{%- endblock %}
{%- block footer %}
{{ super() }}
<script type="text/javascript" src="https://media.amazonwebservices.com/js/sitecatalyst/s_code.min.js"></script>
<script type="text/javascript">
s.prop66 = 'AWS SDK for Python (boto3)';
s.eVar66 = 'D=c66';
s.prop65 = 'API Reference';
s.eVar65 = 'D=c65';
var s_code = s.t();
if (s_code) document.write(s_code);
</script>
{%- endblock %}

View file

@ -43,10 +43,10 @@ which are passed into the underlying service operation. Use the
:py:meth:`~boto3.resources.collection.Collection.filter` method to filter
the results::
# S3 list all keys with the prefix '/photos'
# S3 list all keys with the prefix 'photos/'
s3 = boto3.resource('s3')
for bucket in s3.buckets.all():
for obj in bucket.objects.filter(Prefix='/photos'):
for obj in bucket.objects.filter(Prefix='photos/'):
print('{0}:{1}'.format(bucket.name, obj.key))
.. warning::

View file

@ -62,14 +62,14 @@ For example::
's3',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKN,
aws_session_token=SESSION_TOKEN,
)
# Or via the Session
session = boto3.Session(
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKN,
aws_session_token=SESSION_TOKEN,
)
where ``ACCESS_KEY``, ``SECRET_KEY`` and ``SESSION_TOKEN`` are variables
@ -262,7 +262,8 @@ Boto2 Config
~~~~~~~~~~~~
Boto3 will attempt to load credentials from the Boto2 config file.
It will check ``/etc/boto.cfg`` and ``~/.boto``. Note that
It first checks the file pointed to by ``BOTO_CONFIG`` if set, otherwise
it will check ``/etc/boto.cfg`` and ``~/.boto``. Note that
*only* the ``[Credentials]`` section of the boto config file is used.
All other configuration data in the boto config file is ignored.
Example::
@ -350,6 +351,11 @@ Environment Variable Configuration
is ``~/.aws/credentials``. You only need to set this variable if
you want to change this location.
``BOTO_CONFIG``
The location of the boto2 credentials file. This is not set by default.
You only need to set this variable if want to use credentials stored in
boto2 format in a location other than ``/etc/boto.cfg`` or ``~/.boto``.
``AWS_CA_BUNDLE``
The path to a custom certificate bundle to use when establishing
SSL/TLS connections. Boto3 includes a bundled CA bundle it will
@ -397,7 +403,7 @@ If your profile name has spaces, you'll need to surround this value in quotes:
in the ``~/.aws/config`` file:
``region``
The default region to use, e.g. ``us-west-2``, ``us-west-2``, etc.
The default region to use, e.g. ``us-west-2``, ``us-west-2``, etc. When specifying a region inline during client initialization, this property is named ``region_name``
``aws_access_key_id``
The access key to use.
``aws_secret_access_key``
@ -417,6 +423,15 @@ in the ``~/.aws/config`` file:
The number of attempts to make before giving up when retrieving data from
the instance metadata service. See the docs above on
``AWS_METADATA_SERVICE_NUM_ATTEMPTS`` for more information.
``parameter_validation``
Disable parameter validation (default is true; parameters are
validated by default). This is a boolean value that can have
a value of either ``true`` or ``false``. Whenever you make an
API call using a client, the parameters you provide are run through
a set of validation checks including (but not limited to): required
parameters provided, type checking, no unknown parameters,
minimum length checks, etc. You generally should leave parameter
validation enabled.
``role_arn``
The ARN of the role you want to assume.
``source_profile``

View file

@ -274,6 +274,64 @@ table.
}
)
The batch writer can help to de-duplicate request by specifying ``overwrite_by_pkeys=['partition_key', 'sort_key']``
if you want to bypass no duplication limitation of single batch write request as
``botocore.exceptions.ClientError: An error occurred (ValidationException) when calling the BatchWriteItem operation: Provided list of item keys contains duplicates``.
It will drop request items in the buffer if their primary keys(composite) values are
the same as newly added one, as eventually consistent with streams of individual
put/delete operations on the same item.
::
with table.batch_writer(overwrite_by_pkeys=['partition_key', 'sort_key']) as batch:
batch.put_item(
Item={
'partition_key': 'p1',
'sort_key': 's1',
'other': '111',
}
)
batch.put_item(
Item={
'partition_key': 'p1',
'sort_key': 's1',
'other': '222',
}
)
batch.delete_item(
Key={
'partition_key': 'p1',
'sort_key': 's2'
}
)
batch.put_item(
Item={
'partition_key': 'p1',
'sort_key': 's2',
'other': '444',
}
)
after de-duplicate:
::
batch.put_item(
Item={
'partition_key': 'p1',
'sort_key': 's1',
'other': '222',
}
)
batch.put_item(
Item={
'partition_key': 'p1',
'sort_key': 's2',
'other': '444',
}
)
Querying and Scanning
---------------------

View file

@ -10,6 +10,7 @@ Migration Guides
new
migration
upgrading
General Feature Guides

View file

@ -50,7 +50,7 @@ Once configured, you may begin using Boto 3::
for bucket in boto3.resource('s3').buckets.all():
print(bucket.name)
See the :ref:`tutorial_list` and `Boto 3 Documentation <http://boto3.readthedocs.org/>`__ for more information.
See the :ref:`tutorial_list` and `Boto 3 Documentation <https://boto3.readthedocs.io/>`__ for more information.
The rest of this document will describe specific common usage scenarios of Boto 2 code and how to accomplish the same tasks with Boto 3.

View file

@ -5,7 +5,7 @@ What's New
Boto 3 is a ground-up rewrite of Boto. It uses a data-driven approach to
generate classes at runtime from JSON description files that are shared
between SDKs in various languages. This includes descriptions for a
high level, object oriented interface similar to those availabe in
high level, object oriented interface similar to those available in
previous versions of Boto.
Because Boto 3 is generated from these shared JSON files, we get

View file

@ -37,45 +37,404 @@ config parameter when you create your client or resource.::
Using the Transfer Manager
--------------------------
The `s3 transfer manager`_ provides you with less painful multipart uploads and
downloads. Its functions are automatically added into the client when you create
it, so there is no need to create your own transfer manager. Below you will see
several examples of how to use it.
``boto3`` provides interfaces for managing various types of transfers with
S3. Functionality includes:
The methods on the base client are :py:meth:`S3.Client.upload_file` and
:py:meth:`S3.Client.download_file`::
* Automatically managing multipart and non-multipart uploads
* Automatically managing multipart and non-multipart downloads
* Automatically managing multipart and non-multipart copies
* Uploading from:
* a file name
* a readable file-like object
* Downloading to:
* a file name
* a writeable file-like object
* Tracking progress of individual transfers
* Managing retries of transfers
* Configuring various transfer settings such as:
* Max request concurrency
* Multipart transfer thresholds
* Multipart transfer part sizes
* Number of download retry attempts
Uploads
~~~~~~~
The managed upload methods are exposed in both the client and resource
interfaces of ``boto3``:
* :py:class:`S3.Client` method to upload a file by name: :py:meth:`S3.Client.upload_file`
* :py:class:`S3.Client` method to upload a readable file-like object: :py:meth:`S3.Client.upload_fileobj`
* :py:class:`S3.Bucket` method to upload a file by name: :py:meth:`S3.Bucket.upload_file`
* :py:class:`S3.Bucket` method to upload a readable file-like object: :py:meth:`S3.Bucket.upload_fileobj`
* :py:class:`S3.Object` method to upload a file by name: :py:meth:`S3.Object.upload_file`
* :py:class:`S3.Object` method to upload a readable file-like object: :py:meth:`S3.Object.upload_fileobj`
.. note::
Even though there is an ``upload_file`` and ``upload_fileobj`` method for
a variety of classes, they all share the exact same functionality.
Other than for convenience, there are no benefits from using one method from
one class over using the same method for a different class.
To upload a file by name, use one of the ``upload_file`` methods::
import boto3
# Get the service client
s3 = boto3.client('s3')
# Upload tmp.txt to bucket-name
s3.upload_file("tmp.txt", "bucket-name", "tmp.txt")
# Upload tmp.txt to bucket-name at key-name
s3.upload_file("tmp.txt", "bucket-name", "key-name")
# Download tmp.txt as tmp2.txt
s3.download_file("bucket-name", "tmp.txt", "tmp2.txt")
If you happen to be using the resource model, the same function are accessed
through :py:meth:`S3.Object.upload_file` and
:py:meth:`S3.Object.download_file`::
To upload a readable file-like object, use one of the ``upload_fileobj``
methods. Note that this file-like object **must** produce binary when read
from, **not** text::
import boto3
# Get the service resource
s3 = boto3.resource('s3')
# Get the service client
s3 = boto3.client('s3')
# Get bucket-name
bucket = s3.Bucket('bucket-name')
# Upload a file-like object to bucket-name at key-name
with open("tmp.txt", "rb") as f:
s3.upload_fileobj(f, "bucket-name", "key-name")
# Get the object representation
obj = bucket.Object('tmp.txt')
# Upload tmp.txt
obj.upload_file('tmp.txt')
To upload a file using any extra parameters such as user metadata, use the
``ExtraArgs`` parameter::
# Download tmp.txt as tmp2.txt
obj.download_file('tmp2.txt')
import boto3
# Get the service client
s3 = boto3.client('s3')
# Upload tmp.txt to bucket-name at key-name
s3.upload_file(
"tmp.txt", "bucket-name", "key-name",
ExtraArgs={"Metadata": {"mykey": "myvalue"}}
)
All valid ``ExtraArgs`` are listed at :py:attr:`boto3.s3.transfer.S3Transfer.ALLOWED_UPLOAD_ARGS`
To track the progess of a transfer, a progress callback can be provided such
that the callback gets invoked each time progress is made on the transfer::
import os
import sys
import threading
import boto3
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
# Get the service client
s3 = boto3.client('s3')
# Upload tmp.txt to bucket-name at key-name
s3.upload_file(
"tmp.txt", "bucket-name", "key-name",
Callback=ProgressPercentage("tmp.txt"))
Downloads
~~~~~~~~~
The managed download methods are exposed in both the client and resource
interfaces of ``boto3``:
* :py:class:`S3.Client` method to download an object to a file by name: :py:meth:`S3.Client.download_file`
* :py:class:`S3.Client` method to download an object to a writeable file-like object: :py:meth:`S3.Client.download_fileobj`
* :py:class:`S3.Bucket` method to download an object to a file by name: :py:meth:`S3.Bucket.download_file`
* :py:class:`S3.Bucket` method to download an object to a writeable file-like object: :py:meth:`S3.Bucket.download_fileobj`
* :py:class:`S3.Object` method to download an object to a file by name: :py:meth:`S3.Object.download_file`
* :py:class:`S3.Object` method to download an object to a writeable file-like object: :py:meth:`S3.Object.download_fileobj`
.. note::
Even though there is a ``download_file`` and ``download_fileobj`` method for
a variety of classes, they all share the exact same functionality.
Other than for convenience, there are no benefits from using one method from
one class over using the same method for a different class.
To download to a file by name, use one of the ``download_file``
methods::
import boto3
# Get the service client
s3 = boto3.client('s3')
# Download object at bucket-name with key-name to tmp.txt
s3.download_file("bucket-name", "key-name", "tmp.txt")
To download to a writeable file-like object, use one of the
``download_fileobj`` methods. Note that this file-like object **must**
allow binary to be written to it, **not** just text::
import boto3
# Get the service client
s3 = boto3.client('s3')
# Download object at bucket-name with key-name to file-like object
with open("tmp.txt", "wb") as f:
s3.download_fileobj("bucket-name", "key-name", f)
To download using any extra parameters such as version ids, use the
``ExtraArgs`` parameter::
import boto3
# Get the service client
s3 = boto3.client('s3')
# Download object at bucket-name with key-name to tmp.txt
s3.download_file(
"bucket-name", "key-name", "tmp.txt",
ExtraArgs={"VersionId": "my-version-id"}
)
All valid ``ExtraArgs`` are listed at :py:attr:`boto3.s3.transfer.S3Transfer.ALLOWED_DOWNLOAD_ARGS`
To track the progess of a transfer, a progress callback can be provided such
that the callback gets invoked each time progress is made on the transfer::
import sys
import threading
import boto3
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
sys.stdout.write(
"\r%s --> %s bytes transferred" % (
self._filename, self._seen_so_far))
sys.stdout.flush()
# Get the service client
s3 = boto3.client('s3')
# Download object at bucket-name with key-name to tmp.txt
s3.download_file(
"bucket-name", "key-name", "tmp.txt",
Callback=ProgressPercentage("tmp.txt"))
Copies
~~~~~~
The managed copy methods are exposed in both the client and resource
interfaces of ``boto3``:
* :py:class:`S3.Client` method to copy an s3 object: :py:meth:`S3.Client.copy`
* :py:class:`S3.Bucket` method to copy an s3 object: :py:meth:`S3.Client.copy`
* :py:class:`S3.Object` method to copy an s3 object: :py:meth:`S3.Object.copy`
.. note::
Even though there is a ``copy`` method for a variety of classes,
they all share the exact same functionality.
Other than for convenience, there are no benefits from using one method from
one class over using the same method for a different class.
To do a managed copy, use one of the ``copy`` methods::
import boto3
# Get the service client
s3 = boto3.client('s3')
# Copies object located in mybucket at mykey
# to the location otherbucket at otherkey
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
s3.copy(copy_source, 'otherbucket', 'otherkey')
To do a managed copy where the region of the source bucket is different than
the region of the final bucket, provide a ``SourceClient`` that shares the
same region as the source bucket::
import boto3
# Get a service client for us-west-2 region
s3 = boto3.client('s3', 'us-west-2')
# Get a service client for the eu-central-1 region
source_client = boto3.client('s3', 'eu-central-1')
# Copies object located in mybucket at mykey in eu-central-1 region
# to the location otherbucket at otherkey in the us-west-2 region
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
s3.copy(copy_source, 'otherbucket', 'otherkey', SourceClient=source_client)
To copy using any extra parameters such as replacing user metadata on an
existing object, use the ``ExtraArgs`` parameter::
import boto3
# Get the service client
s3 = boto3.client('s3')
# Copies object located in mybucket at mykey
# to the location otherbucket at otherkey
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
s3.copy(
copy_source, 'bucket', 'mykey',
ExtraArgs={
"Metadata": {
"my-new-key": "my-new-value"
},
"MetadataDirective": "REPLACE"
}
)
To track the progess of a transfer, a progress callback can be provided such
that the callback gets invoked each time progress is made on the transfer::
import sys
import threading
import boto3
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
sys.stdout.write(
"\r%s --> %s bytes transferred" % (
self._filename, self._seen_so_far))
sys.stdout.flush()
# Get the service client
s3 = boto3.client('s3')
# Copies object located in mybucket at mykey
# to the location otherbucket at otherkey
copy_source = {
'Bucket': 'mybucket',
'Key': 'mykey'
}
s3.copy(copy_source, 'otherbucket', 'otherkey',
Callback=ProgressPercentage("otherbucket/otherkey"))
Note that the grainularity of these callbacks will be much larger than the
upload and download methods because copies are all done server side and so
there is no local file to track the streaming of data.
Configuration Settings
~~~~~~~~~~~~~~~~~~~~~~
To configure the various managed transfer methods, a
:py:class:`boto3.s3.transfer.TransferConfig` object can be provided to
the ``Config`` parameter. Please note that the default configuration should
be well-suited for most scenarios and a ``Config`` should only be provided
for specific use cases. Here are some common use cases for configuring the
managed s3 transfer methods:
To ensure that multipart uploads only happen when absolutely necessary, you
can use the ``multipart_threshold`` configuration parameter::
import boto3
from boto3.s3.transfer import TransferConfig
# Get the service client
s3 = boto3.client('s3')
GB = 1024 ** 3
# Ensure that multipart uploads only happen if the size of a transfer
# is larger than S3's size limit for nonmultipart uploads, which is 5 GB.
config = TransferConfig(multipart_threshold=5 * GB)
# Upload tmp.txt to bucket-name at key-name
s3.upload_file("tmp.txt", "bucket-name", "key-name", Config=config)
Sometimes depending on your connection speed, it is desired to limit or
increase potential bandwidth usage. Setting the ``max_concurrency`` can help
tune the potential bandwidth usage by decreasing or increasing the maximum
amount of concurrent S3 transfer-related API requests::
import boto3
from boto3.s3.transfer import TransferConfig
# Get the service client
s3 = boto3.client('s3')
# Decrease the max concurrency from 10 to 5 to potentially consume
# less downstream bandwidth.
config = TransferConfig(max_concurrency=5)
# Download object at bucket-name with key-name to tmp.txt with the
# set configuration
s3.download_file("bucket-name", "key-name", "tmp.txt", Config=config)
# Increase the max concurrency to 20 to potentially consume more
# downstream bandwidth.
config = TransferConfig(max_concurrency=20)
# Download object at bucket-name with key-name to tmp.txt with the
# set configuration
s3.download_file("bucket-name", "key-name", "tmp.txt", Config=config)
Generating Presigned URLs
@ -176,11 +535,12 @@ conditions when you generate the POST data.::
# Generate the POST attributes
post = s3.generate_presigned_post(
Bucket='bucket-name',
Key='key-name'
Key='key-name',
Fields=fields,
Conditions=conditions
)
Note: if your bucket is new and you require CORS, it is advised that
you use path style addressing (which is set by default in signature version 4).
.. _s3 transfer manager: http://boto3.readthedocs.org/en/latest/reference/customizations/s3.html#module-boto3.s3.transfer
.. _virtual host addressing: http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html

View file

@ -0,0 +1,29 @@
===============
Upgrading Notes
===============
Notes to refer to when upgrading ``boto3`` versions.
1.4.0
=====
* Logic from the `s3transfer <https://github.com/boto/s3transfer>`_ package
was ported into the ``boto3.s3.transfer`` module. In upgrading to this
new version of ``boto3``, code that relies on the public classes and
interfaces of ``boto3.s3.transfer``, such as
:py:class:`boto3.s3.transfer.S3Transfer` and
:py:class:`boto3.s3.transfer.TransferConfig`, should not be affected.
However, code that relies on the internal classes and functionality of the
``boto3.s3.transfer`` module may be affected in upgrading:
* Removed internal classes such as ``MultipartUploader``,
``MultipartDownloader``, ``ReadFileChunk``, etc. All of the managed
transfer logic now lives inside of ``s3transfer`` and as a result these
internal classes are no longer used and is essentially dead code.
* Custom implementations of ``OSUtils`` may see the
``open_file_chunk_reader`` method no longer being called when uploads
occur. If this was for the purpose of being able to provide file-like
objects for transfers, use the newly added ``upload_fileobj``
and ``download_fileobj`` methods that support both nonmultipart and
multipart transfers.

View file

@ -4,17 +4,22 @@
S3 Customization Reference
==========================
S3 Command Injection
--------------------
.. automodule:: boto3.s3.inject
:members:
:undoc-members:
:inherited-members:
S3 Transfers
------------
.. automodule:: boto3.s3.transfer
.. note::
All classes documented below are considered public and thus will not be
exposed to breaking changes. If a class from the ``boto3.s3.transfer``
module is not documented below, it is considered internal and users
should be very cautious in directly using them because breaking changes may
be introduced from version to version of the library.
.. autoclass:: boto3.s3.transfer.TransferConfig
:members:
:undoc-members:
.. autoclass:: boto3.s3.transfer.S3Transfer
:members:
:undoc-members:

View file

@ -1,5 +1,6 @@
-e git://github.com/boto/botocore.git@develop#egg=botocore
-e git://github.com/boto/jmespath.git@develop#egg=jmespath
-e git://github.com/boto/s3transfer.git@develop#egg=s3transfer
nose==1.3.3
mock==1.3.0
wheel==0.24.0

View file

@ -47,7 +47,7 @@ import subprocess
import argparse
VALID_CHARS = set(string.letters + string.digits)
VALID_CHARS = set(string.ascii_letters + string.digits)
CHANGES_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'.changes'
@ -148,12 +148,13 @@ def write_new_change(parsed_values):
filename = '{type_name}-{summary}'.format(
type_name=parsed_values['type'],
summary=short_summary)
possible_filename = os.path.join(dirname, filename) + '.json'
possible_filename = os.path.join(
dirname, '%s-%s.json' % (filename, str(random.randint(1, 100000))))
while os.path.isfile(possible_filename):
possible_filename = os.path.join(
dirname, '%s-%s.json' % (filename, str(random.randint(1, 100000))))
with open(possible_filename, 'w') as f:
f.write(json.dumps(parsed_values, indent=2))
f.write(json.dumps(parsed_values, indent=2) + "\n")
def parse_filled_in_contents(contents):

View file

@ -5,4 +5,4 @@ universal = 1
requires-dist =
botocore>=1.4.1,<1.5.0
jmespath>=0.7.1,<1.0.0
futures>=2.2.0,<4.0.0; python_version=="2.6" or python_version=="2.7"
s3transfer>=0.1.0,<0.2.0

View file

@ -17,15 +17,10 @@ VERSION_RE = re.compile(r'''__version__ = ['"]([0-9.]+)['"]''')
requires = [
'botocore>=1.4.1,<1.5.0',
'jmespath>=0.7.1,<1.0.0',
's3transfer>=0.1.0,<0.2.0'
]
if sys.version_info[0] == 2:
# concurrent.futures is only in python3, so for
# python2 we need to install the backport.
requires.append('futures>=2.2.0,<4.0.0')
def get_version():
init = open(os.path.join(ROOT, 'boto3', '__init__.py')).read()
return VERSION_RE.search(init).group(1)
@ -48,10 +43,6 @@ setup(
},
include_package_data=True,
install_requires=requires,
extras_require={
':python_version=="2.6" or python_version=="2.7"': [
'futures>=2.2.0,<4.0.0']
},
license="Apache License 2.0",
classifiers=[
'Development Status :: 5 - Production/Stable',

View file

@ -27,7 +27,7 @@ class TestDynamoDBCustomizations(BaseDocsFunctionalTests):
self.assert_contains_lines_in_order([
'.. py:class:: DynamoDB.Table(name)',
' * :py:meth:`batch_writer()`',
' .. py:method:: batch_writer()'],
' .. py:method:: batch_writer(overwrite_by_pkeys=None)'],
self.generated_contents
)

View file

@ -12,9 +12,12 @@
# language governing permissions and limitations under the License.
from tests import unittest
import botocore.stub
from botocore.stub import Stubber
from botocore.compat import six
import boto3.session
from boto3.s3.transfer import TransferConfig
class TestS3MethodInjection(unittest.TestCase):
@ -25,6 +28,8 @@ class TestS3MethodInjection(unittest.TestCase):
'upload_file was not injected onto S3 client')
self.assertTrue(hasattr(client, 'download_file'),
'download_file was not injected onto S3 client')
self.assertTrue(hasattr(client, 'copy'),
'copy was not injected onto S3 client')
def test_bucket_resource_has_load_method(self):
session = boto3.session.Session(region_name='us-west-2')
@ -38,6 +43,8 @@ class TestS3MethodInjection(unittest.TestCase):
'upload_file was not injected onto S3 bucket')
self.assertTrue(hasattr(bucket, 'download_file'),
'download_file was not injected onto S3 bucket')
self.assertTrue(hasattr(bucket, 'copy'),
'copy was not injected onto S3 bucket')
def test_transfer_methods_injected_to_object(self):
obj = boto3.resource('s3').Object('my_bucket', 'my_key')
@ -45,6 +52,444 @@ class TestS3MethodInjection(unittest.TestCase):
'upload_file was not injected onto S3 object')
self.assertTrue(hasattr(obj, 'download_file'),
'download_file was not injected onto S3 object')
self.assertTrue(hasattr(obj, 'copy'),
'copy was not injected onto S3 object')
class BaseTransferTest(unittest.TestCase):
def setUp(self):
self.session = boto3.session.Session(
aws_access_key_id='foo', aws_secret_access_key='bar',
region_name='us-west-2')
self.s3 = self.session.resource('s3')
self.stubber = Stubber(self.s3.meta.client)
self.bucket = 'mybucket'
self.key = 'mykey'
self.upload_id = 'uploadid'
self.etag = '"example0etag"'
self.progress = 0
self.progress_times_called = 0
def stub_head(self, content_length=4, expected_params=None):
head_response = {
'AcceptRanges': 'bytes',
'ContentLength': content_length,
'ContentType': 'binary/octet-stream',
'ETag': self.etag,
'Metadata': {},
'ResponseMetadata': {
'HTTPStatusCode': 200,
}
}
if expected_params is None:
expected_params = {
'Bucket': self.bucket,
'Key': self.key
}
self.stubber.add_response(
method='head_object', service_response=head_response,
expected_params=expected_params)
def stub_create_multipart_upload(self):
# Add the response and assert params for CreateMultipartUpload
create_upload_response = {
"Bucket": self.bucket,
"Key": self.key,
"UploadId": self.upload_id
}
expected_params = {
"Bucket": self.bucket,
"Key": self.key,
}
self.stubber.add_response(
method='create_multipart_upload',
service_response=create_upload_response,
expected_params=expected_params)
def stub_complete_multipart_upload(self, parts):
complete_upload_response = {
"Location": "us-west-2",
"Bucket": self.bucket,
"Key": self.key,
"ETag": self.etag
}
expected_params = {
"Bucket": self.bucket,
"Key": self.key,
"MultipartUpload": {
"Parts": parts
},
"UploadId": self.upload_id
}
self.stubber.add_response(
method='complete_multipart_upload',
service_response=complete_upload_response,
expected_params=expected_params)
class TestCopy(BaseTransferTest):
def setUp(self):
super(TestCopy, self).setUp()
self.copy_source = {'Bucket': 'foo', 'Key': 'bar'}
def stub_single_part_copy(self):
self.stub_head(expected_params=self.copy_source)
self.stub_copy_object()
def stub_multipart_copy(self, part_size, num_parts):
# Set the HEAD to return the total size
total_size = part_size * num_parts
self.stub_head(
content_length=total_size, expected_params=self.copy_source)
self.stub_create_multipart_upload()
# Add the responses for each UploadPartCopy
parts = []
for i in range(num_parts):
# Fill in the parts
part_number = i + 1
copy_range = "bytes=%s-%s" % (
i * part_size,
i * part_size + (part_size - 1)
)
self.stub_copy_part(part_number=part_number, copy_range=copy_range)
parts.append({'ETag': self.etag, 'PartNumber': part_number})
self.stub_complete_multipart_upload(parts)
def stub_copy_object(self):
copy_response = {
'CopyObjectResult': {
'ETag': self.etag
},
'ResponseMetadata': {
'HTTPStatusCode': 200
}
}
expected_params = {
"Bucket": self.bucket,
"Key": self.key,
"CopySource": self.copy_source
}
self.stubber.add_response(
method='copy_object', service_response=copy_response,
expected_params=expected_params)
def stub_copy_part(self, part_number, copy_range):
copy_part_response = {
"CopyPartResult": {
"ETag": self.etag
},
'ResponseMetadata': {
'HTTPStatusCode': 200
}
}
expected_params = {
"Bucket": self.bucket,
"Key": self.key,
"CopySource": self.copy_source,
"UploadId": self.upload_id,
"PartNumber": part_number,
"CopySourceRange": copy_range
}
self.stubber.add_response(
method='upload_part_copy', service_response=copy_part_response,
expected_params=expected_params)
def test_client_copy(self):
self.stub_single_part_copy()
with self.stubber:
response = self.s3.meta.client.copy(
self.copy_source, self.bucket, self.key)
# The response will be none on a successful transfer.
self.assertIsNone(response)
def test_bucket_copy(self):
self.stub_single_part_copy()
bucket = self.s3.Bucket(self.bucket)
with self.stubber:
response = bucket.copy(self.copy_source, self.key)
# The response will be none on a successful transfer.
self.assertIsNone(response)
def test_object_copy(self):
self.stub_single_part_copy()
obj = self.s3.Object(self.bucket, self.key)
with self.stubber:
response = obj.copy(self.copy_source)
self.assertIsNone(response)
def test_copy_progress(self):
chunksize = 8 * (1024 ** 2)
self.stub_multipart_copy(chunksize, 3)
transfer_config = TransferConfig(
multipart_chunksize=chunksize, multipart_threshold=1,
max_concurrency=1)
def progress_callback(amount):
self.progress += amount
self.progress_times_called += 1
with self.stubber:
self.s3.meta.client.copy(
Bucket=self.bucket, Key=self.key, CopySource=self.copy_source,
Config=transfer_config, Callback=progress_callback)
# Assert that the progress callback was called the correct number of
# times with the correct amounts.
self.assertEqual(self.progress_times_called, 3)
self.assertEqual(self.progress, chunksize * 3)
class TestUploadFileobj(BaseTransferTest):
def setUp(self):
super(TestUploadFileobj, self).setUp()
self.contents = six.BytesIO(b'foo\n')
def stub_put_object(self):
put_object_response = {
"ETag": self.etag,
"ResponseMetadata": {
"HTTPStatusCode": 200
}
}
expected_params = {
"Bucket": self.bucket,
"Key": self.key,
"Body": botocore.stub.ANY
}
self.stubber.add_response(
method='put_object', service_response=put_object_response,
expected_params=expected_params)
def stub_upload_part(self, part_number):
upload_part_response = {
'ETag': self.etag,
'ResponseMetadata': {
'HTTPStatusCode': 200
}
}
expected_params = {
"Bucket": self.bucket,
"Key": self.key,
"Body": botocore.stub.ANY,
"PartNumber": part_number,
"UploadId": self.upload_id
}
self.stubber.add_response(
method='upload_part', service_response=upload_part_response,
expected_params=expected_params)
def stub_multipart_upload(self, num_parts):
self.stub_create_multipart_upload()
# Add the responses for each UploadPartCopy
parts = []
for i in range(num_parts):
# Fill in the parts
part_number = i + 1
self.stub_upload_part(part_number=part_number)
parts.append({'ETag': self.etag, 'PartNumber': part_number})
self.stub_complete_multipart_upload(parts)
def test_client_upload(self):
self.stub_put_object()
with self.stubber:
# The stubber will assert that all the right parameters are called.
self.s3.meta.client.upload_fileobj(
Fileobj=self.contents, Bucket=self.bucket, Key=self.key)
self.stubber.assert_no_pending_responses()
def test_raises_value_error_on_invalid_fileobj(self):
with self.stubber:
with self.assertRaises(ValueError):
self.s3.meta.client.upload_fileobj(
Fileobj='foo', Bucket=self.bucket, Key=self.key)
def test_bucket_upload(self):
self.stub_put_object()
bucket = self.s3.Bucket(self.bucket)
with self.stubber:
# The stubber will assert that all the right parameters are called.
bucket.upload_fileobj(Fileobj=self.contents, Key=self.key)
self.stubber.assert_no_pending_responses()
def test_object_upload(self):
self.stub_put_object()
obj = self.s3.Object(self.bucket, self.key)
with self.stubber:
# The stubber will assert that all the right parameters are called.
obj.upload_fileobj(Fileobj=self.contents)
self.stubber.assert_no_pending_responses()
def test_multipart_upload(self):
chunksize = 8 * (1024 ** 2)
contents = six.BytesIO(b'0' * (chunksize * 3))
self.stub_multipart_upload(num_parts=3)
transfer_config = TransferConfig(
multipart_chunksize=chunksize, multipart_threshold=1,
max_concurrency=1)
with self.stubber:
# The stubber will assert that all the right parameters are called.
self.s3.meta.client.upload_fileobj(
Fileobj=contents, Bucket=self.bucket, Key=self.key,
Config=transfer_config)
self.stubber.assert_no_pending_responses()
class TestDownloadFileobj(BaseTransferTest):
def setUp(self):
super(TestDownloadFileobj, self).setUp()
self.contents = b'foo'
self.fileobj = six.BytesIO()
def stub_single_part_download(self):
self.stub_head(content_length=len(self.contents))
self.stub_get_object(self.contents)
def stub_get_object(self, full_contents, start_byte=0, end_byte=None):
"""
Stubs out the get_object operation.
:param full_contents: The FULL contents of the object
:param start_byte: The first byte to grab.
:param end_byte: The last byte to grab.
"""
get_object_response = {}
expected_params = {}
contents = full_contents
end_byte_range = end_byte
# If the start byte is set and the end byte is not, the end byte is
# the last byte.
if start_byte != 0 and end_byte is None:
end_byte = len(full_contents) - 1
# The range on get object where the the end byte is the last byte
# should set the input range as e.g. Range='bytes=3-'
if end_byte == len(full_contents) - 1:
end_byte_range = ''
# If this is a ranged get, ContentRange needs to be returned,
# contents needs to be pruned, and Range needs to be an expected param.
if end_byte is not None:
contents = full_contents[start_byte:end_byte+1]
part_range = 'bytes=%s-%s' % (start_byte, end_byte_range)
content_range = 'bytes=%s-%s/%s' % (
start_byte, end_byte, len(full_contents))
get_object_response['ContentRange'] = content_range
expected_params['Range'] = part_range
get_object_response.update({
"AcceptRanges": "bytes",
"ETag": self.etag,
"ContentLength": len(contents),
"ContentType": "binary/octet-stream",
"Body": six.BytesIO(contents),
"ResponseMetadata": {
"HTTPStatusCode": 200
}
})
expected_params.update({
"Bucket": self.bucket,
"Key": self.key
})
self.stubber.add_response(
method='get_object', service_response=get_object_response,
expected_params=expected_params)
def stub_multipart_download(self, contents, part_size, num_parts):
self.stub_head(content_length=len(contents))
for i in range(num_parts):
start_byte = i * part_size
end_byte = (i + 1) * part_size - 1
self.stub_get_object(
full_contents=contents, start_byte=start_byte,
end_byte=end_byte)
def test_client_download(self):
self.stub_single_part_download()
with self.stubber:
self.s3.meta.client.download_fileobj(
Bucket=self.bucket, Key=self.key, Fileobj=self.fileobj)
self.assertEqual(self.fileobj.getvalue(), self.contents)
self.stubber.assert_no_pending_responses()
def test_raises_value_error_on_invalid_fileobj(self):
with self.stubber:
with self.assertRaises(ValueError):
self.s3.meta.client.download_fileobj(
Bucket=self.bucket, Key=self.key, Fileobj='foo')
def test_bucket_download(self):
self.stub_single_part_download()
bucket = self.s3.Bucket(self.bucket)
with self.stubber:
bucket.download_fileobj(Key=self.key, Fileobj=self.fileobj)
self.assertEqual(self.fileobj.getvalue(), self.contents)
self.stubber.assert_no_pending_responses()
def test_object_download(self):
self.stub_single_part_download()
obj = self.s3.Object(self.bucket, self.key)
with self.stubber:
obj.download_fileobj(Fileobj=self.fileobj)
self.assertEqual(self.fileobj.getvalue(), self.contents)
self.stubber.assert_no_pending_responses()
def test_multipart_download(self):
self.contents = b'A' * 55
self.stub_multipart_download(
contents=self.contents, part_size=5, num_parts=11)
transfer_config = TransferConfig(
multipart_chunksize=5, multipart_threshold=1,
max_concurrency=1)
with self.stubber:
self.s3.meta.client.download_fileobj(
Bucket=self.bucket, Key=self.key, Fileobj=self.fileobj,
Config=transfer_config)
self.assertEqual(self.fileobj.getvalue(), self.contents)
self.stubber.assert_no_pending_responses()
def test_download_progress(self):
self.contents = b'A' * 55
self.stub_multipart_download(
contents=self.contents, part_size=5, num_parts=11)
transfer_config = TransferConfig(
multipart_chunksize=5, multipart_threshold=1,
max_concurrency=1)
def progress_callback(amount):
self.progress += amount
self.progress_times_called += 1
with self.stubber:
self.s3.meta.client.download_fileobj(
Bucket=self.bucket, Key=self.key, Fileobj=self.fileobj,
Config=transfer_config, Callback=progress_callback)
# Assert that the progress callback was called the correct number of
# times with the correct amounts.
self.assertEqual(self.progress_times_called, 11)
self.assertEqual(self.progress, 55)
self.stubber.assert_no_pending_responses()
class TestS3ObjectSummary(unittest.TestCase):

View file

@ -237,7 +237,6 @@ class TestS3Resource(unittest.TestCase):
self.assertEqual(len(versions), 0)
class TestS3Transfers(unittest.TestCase):
"""Tests for the high level boto3.s3.transfer module."""
@ -253,6 +252,7 @@ class TestS3Transfers(unittest.TestCase):
def setUp(self):
self.files = FileCreator()
self.progress = 0
def tearDown(self):
self.files.remove_all()
@ -281,6 +281,61 @@ class TestS3Transfers(unittest.TestCase):
if g['Permission'] == 'READ']
self.assertIn('groups/global/AllUsers', public_read[0])
def test_copy(self):
self.client.put_object(
Bucket=self.bucket_name, Key='foo', Body='beach')
self.addCleanup(self.delete_object, 'foo')
self.client.copy(
CopySource={'Bucket': self.bucket_name, 'Key': 'foo'},
Bucket=self.bucket_name, Key='bar'
)
self.addCleanup(self.delete_object, 'bar')
self.object_exists('bar')
def test_upload_fileobj(self):
fileobj = six.BytesIO(b'foo')
self.client.upload_fileobj(
Fileobj=fileobj, Bucket=self.bucket_name, Key='foo')
self.addCleanup(self.delete_object, 'foo')
self.object_exists('foo')
def test_upload_fileobj_progress(self):
# This has to be an integration test because the fileobj will never
# actually be read from when using the stubber and therefore the
# progress callbacks will not be invoked.
chunksize = 5 * (1024 ** 2)
config = boto3.s3.transfer.TransferConfig(
multipart_chunksize=chunksize,
multipart_threshold=chunksize,
max_concurrency=1
)
fileobj = six.BytesIO(b'0' * (chunksize * 3))
def progress_callback(amount):
self.progress += amount
self.client.upload_fileobj(
Fileobj=fileobj, Bucket=self.bucket_name, Key='foo',
Config=config, Callback=progress_callback)
self.addCleanup(self.delete_object, 'foo')
self.object_exists('foo')
self.assertEqual(self.progress, chunksize * 3)
def test_download_fileobj(self):
fileobj = six.BytesIO()
self.client.put_object(
Bucket=self.bucket_name, Key='foo', Body=b'beach')
self.addCleanup(self.delete_object, 'foo')
self.client.download_fileobj(
Bucket=self.bucket_name, Key='foo', Fileobj=fileobj)
self.assertEqual(fileobj.getvalue(), b'beach')
def test_upload_below_threshold(self):
config = boto3.s3.transfer.TransferConfig(
multipart_threshold=2 * 1024 * 1024)

View file

@ -285,3 +285,104 @@ class BaseTransformationTest(unittest.TestCase):
}
self.assert_batch_write_calls_are([first_batch, second_batch,
third_batch])
def test_auto_dedup_for_dup_requests(self):
with BatchWriter(self.table_name, self.client,
flush_amount=5, overwrite_by_pkeys=["pkey", "skey"]) as b:
# dup 1
b.put_item(Item={
'pkey': 'foo1',
'skey': 'bar1',
'other': 'other1'
})
b.put_item(Item={
'pkey': 'foo1',
'skey': 'bar1',
'other': 'other2'
})
# dup 2
b.delete_item(Key={
'pkey': 'foo1',
'skey': 'bar2',
})
b.put_item(Item={
'pkey': 'foo1',
'skey': 'bar2',
'other': 'other3'
})
# dup 3
b.put_item(Item={
'pkey': 'foo2',
'skey': 'bar2',
'other': 'other3'
})
b.delete_item(Key={
'pkey': 'foo2',
'skey': 'bar2',
})
# dup 4
b.delete_item(Key={
'pkey': 'foo2',
'skey': 'bar3',
})
b.delete_item(Key={
'pkey': 'foo2',
'skey': 'bar3',
})
# 5
b.delete_item(Key={
'pkey': 'foo3',
'skey': 'bar3',
})
# 2nd batch
b.put_item(Item={
'pkey': 'foo1',
'skey': 'bar1',
'other': 'other1'
})
b.put_item(Item={
'pkey': 'foo1',
'skey': 'bar1',
'other': 'other2'
})
first_batch = {
'RequestItems': {
self.table_name: [
{'PutRequest': { 'Item': {
'pkey': 'foo1',
'skey': 'bar1',
'other': 'other2'
}}},
{'PutRequest': { 'Item': {
'pkey': 'foo1',
'skey': 'bar2',
'other': 'other3'
}}},
{'DeleteRequest': {'Key': {
'pkey': 'foo2',
'skey': 'bar2',
}}},
{'DeleteRequest': {'Key': {
'pkey': 'foo2',
'skey': 'bar3',
}}},
{'DeleteRequest': {'Key': {
'pkey': 'foo3',
'skey': 'bar3',
}}},
]
}
}
second_batch = {
'RequestItems': {
self.table_name: [
{'PutRequest': { 'Item': {
'pkey': 'foo1',
'skey': 'bar1',
'other': 'other2'
}}},
]
}
}
self.assert_batch_write_calls_are([first_batch, second_batch])

View file

@ -10,12 +10,13 @@
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
import mock
from botocore.exceptions import ClientError
from botocore.compat import six
from boto3.s3 import inject
from tests import unittest
class TestInjectTransferMethods(unittest.TestCase):
@ -30,7 +31,9 @@ class TestInjectTransferMethods(unittest.TestCase):
inject.upload_file(mock.sentinel.CLIENT,
Filename='filename',
Bucket='bucket', Key='key')
transfer.return_value.upload_file.assert_called_with(
transfer_in_context_manager = \
transfer.return_value.__enter__.return_value
transfer_in_context_manager.upload_file.assert_called_with(
filename='filename', bucket='bucket', key='key',
extra_args=None, callback=None)
@ -40,7 +43,9 @@ class TestInjectTransferMethods(unittest.TestCase):
mock.sentinel.CLIENT,
Bucket='bucket', Key='key',
Filename='filename')
transfer.return_value.download_file.assert_called_with(
transfer_in_context_manager = \
transfer.return_value.__enter__.return_value
transfer_in_context_manager.download_file.assert_called_with(
bucket='bucket', key='key', filename='filename',
extra_args=None, callback=None)
@ -81,6 +86,7 @@ class TestBucketTransferMethods(unittest.TestCase):
def setUp(self):
self.bucket = mock.Mock(name='my_bucket')
self.copy_source = {'Bucket': 'foo', 'Key': 'bar'}
def test_upload_file_proxies_to_meta_client(self):
inject.bucket_upload_file(self.bucket, Filename='foo', Key='key')
@ -94,11 +100,32 @@ class TestBucketTransferMethods(unittest.TestCase):
Bucket=self.bucket.name, Key='key', Filename='foo',
ExtraArgs=None, Callback=None, Config=None)
def test_copy(self):
inject.bucket_copy(self.bucket, self.copy_source, Key='key')
self.bucket.meta.client.copy.assert_called_with(
CopySource=self.copy_source, Bucket=self.bucket.name, Key='key',
ExtraArgs=None, Callback=None, SourceClient=None, Config=None)
def test_upload_fileobj(self):
fileobj = six.BytesIO(b'foo')
inject.bucket_upload_fileobj(self.bucket, Key='key', Fileobj=fileobj)
self.bucket.meta.client.upload_fileobj.assert_called_with(
Bucket=self.bucket.name, Fileobj=fileobj, Key='key',
ExtraArgs=None, Callback=None, Config=None)
def test_download_fileobj(self):
obj = six.BytesIO()
inject.bucket_download_fileobj(self.bucket, Key='key', Fileobj=obj)
self.bucket.meta.client.download_fileobj.assert_called_with(
Bucket=self.bucket.name, Key='key', Fileobj=obj, ExtraArgs=None,
Callback=None, Config=None)
class TestObjectTransferMethods(unittest.TestCase):
def setUp(self):
self.obj = mock.Mock(bucket_name='my_bucket', key='my_key')
self.copy_source = {'Bucket': 'foo', 'Key': 'bar'}
def test_upload_file_proxies_to_meta_client(self):
inject.object_upload_file(self.obj, Filename='foo')
@ -112,6 +139,27 @@ class TestObjectTransferMethods(unittest.TestCase):
Bucket=self.obj.bucket_name, Key=self.obj.key, Filename='foo',
ExtraArgs=None, Callback=None, Config=None)
def test_copy(self):
inject.object_copy(self.obj, self.copy_source)
self.obj.meta.client.copy.assert_called_with(
CopySource=self.copy_source, Bucket=self.obj.bucket_name,
Key=self.obj.key, ExtraArgs=None, Callback=None,
SourceClient=None, Config=None)
def test_upload_fileobj(self):
fileobj = six.BytesIO(b'foo')
inject.object_upload_fileobj(self.obj, Fileobj=fileobj)
self.obj.meta.client.upload_fileobj.assert_called_with(
Bucket=self.obj.bucket_name, Fileobj=fileobj, Key=self.obj.key,
ExtraArgs=None, Callback=None, Config=None)
def test_download_fileobj(self):
fileobj = six.BytesIO()
inject.object_download_fileobj(self.obj, Fileobj=fileobj)
self.obj.meta.client.download_fileobj.assert_called_with(
Bucket=self.obj.bucket_name, Key=self.obj.key, Fileobj=fileobj,
ExtraArgs=None, Callback=None, Config=None)
class TestObejctSummaryLoad(unittest.TestCase):
def setUp(self):

View file

@ -10,757 +10,180 @@
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import tempfile
import shutil
import socket
from tests import unittest
from contextlib import closing
import mock
from botocore.stub import Stubber
from botocore.session import Session
from botocore.vendored import six
from concurrent import futures
from s3transfer.manager import TransferManager
from boto3.exceptions import RetriesExceededError
from boto3.exceptions import S3UploadFailedError
from boto3.s3.transfer import ReadFileChunk, StreamReaderProgress
from boto3.s3.transfer import S3Transfer
from boto3.s3.transfer import OSUtils, TransferConfig
from boto3.s3.transfer import MultipartDownloader, MultipartUploader
from boto3.s3.transfer import ShutdownQueue
from boto3.s3.transfer import QueueShutdownError
from boto3.s3.transfer import random_file_extension
from boto3.s3.transfer import disable_upload_callbacks, enable_upload_callbacks
class InMemoryOSLayer(OSUtils):
def __init__(self, filemap):
self.filemap = filemap
def get_file_size(self, filename):
return len(self.filemap[filename])
def open_file_chunk_reader(self, filename, start_byte, size, callback):
return closing(six.BytesIO(self.filemap[filename]))
def open(self, filename, mode):
if 'wb' in mode:
fileobj = six.BytesIO()
self.filemap[filename] = fileobj
return closing(fileobj)
else:
return closing(self.filemap[filename])
def remove_file(self, filename):
if filename in self.filemap:
del self.filemap[filename]
def rename_file(self, current_filename, new_filename):
if current_filename in self.filemap:
self.filemap[new_filename] = self.filemap.pop(
current_filename)
class SequentialExecutor(object):
def __init__(self, max_workers):
pass
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
pass
# The real map() interface actually takes *args, but we specifically do
# _not_ use this interface.
def map(self, function, args):
results = []
for arg in args:
results.append(function(arg))
return results
def submit(self, function):
future = futures.Future()
future.set_result(function())
return future
class TestOSUtils(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_get_file_size(self):
with mock.patch('os.path.getsize') as m:
OSUtils().get_file_size('myfile')
m.assert_called_with('myfile')
def test_open_file_chunk_reader(self):
with mock.patch('boto3.s3.transfer.ReadFileChunk') as m:
OSUtils().open_file_chunk_reader('myfile', 0, 100, None)
m.from_filename.assert_called_with('myfile', 0, 100,
None, enable_callback=False)
def test_open_file(self):
fileobj = OSUtils().open(os.path.join(self.tempdir, 'foo'), 'w')
self.assertTrue(hasattr(fileobj, 'write'))
def test_remove_file_ignores_errors(self):
with mock.patch('os.remove') as remove:
remove.side_effect = OSError('fake error')
OSUtils().remove_file('foo')
remove.assert_called_with('foo')
def test_remove_file_proxies_remove_file(self):
with mock.patch('os.remove') as remove:
OSUtils().remove_file('foo')
remove.assert_called_with('foo')
def test_rename_file(self):
with mock.patch('boto3.compat.rename_file') as rename_file:
OSUtils().rename_file('foo', 'newfoo')
rename_file.assert_called_with('foo', 'newfoo')
class TestReadFileChunk(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tempdir)
def test_read_entire_chunk(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'onetwothreefourfivesixseveneightnineten')
chunk = ReadFileChunk.from_filename(
filename, start_byte=0, chunk_size=3)
self.assertEqual(chunk.read(), b'one')
self.assertEqual(chunk.read(), b'')
def test_read_with_amount_size(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'onetwothreefourfivesixseveneightnineten')
chunk = ReadFileChunk.from_filename(
filename, start_byte=11, chunk_size=4)
self.assertEqual(chunk.read(1), b'f')
self.assertEqual(chunk.read(1), b'o')
self.assertEqual(chunk.read(1), b'u')
self.assertEqual(chunk.read(1), b'r')
self.assertEqual(chunk.read(1), b'')
def test_reset_stream_emulation(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'onetwothreefourfivesixseveneightnineten')
chunk = ReadFileChunk.from_filename(
filename, start_byte=11, chunk_size=4)
self.assertEqual(chunk.read(), b'four')
chunk.seek(0)
self.assertEqual(chunk.read(), b'four')
def test_read_past_end_of_file(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'onetwothreefourfivesixseveneightnineten')
chunk = ReadFileChunk.from_filename(
filename, start_byte=36, chunk_size=100000)
self.assertEqual(chunk.read(), b'ten')
self.assertEqual(chunk.read(), b'')
self.assertEqual(len(chunk), 3)
def test_tell_and_seek(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'onetwothreefourfivesixseveneightnineten')
chunk = ReadFileChunk.from_filename(
filename, start_byte=36, chunk_size=100000)
self.assertEqual(chunk.tell(), 0)
self.assertEqual(chunk.read(), b'ten')
self.assertEqual(chunk.tell(), 3)
chunk.seek(0)
self.assertEqual(chunk.tell(), 0)
def test_file_chunk_supports_context_manager(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'abc')
with ReadFileChunk.from_filename(filename,
start_byte=0,
chunk_size=2) as chunk:
val = chunk.read()
self.assertEqual(val, b'ab')
def test_iter_is_always_empty(self):
# This tests the workaround for the httplib bug (see
# the source for more info).
filename = os.path.join(self.tempdir, 'foo')
open(filename, 'wb').close()
chunk = ReadFileChunk.from_filename(
filename, start_byte=0, chunk_size=10)
self.assertEqual(list(chunk), [])
class TestReadFileChunkWithCallback(TestReadFileChunk):
def setUp(self):
super(TestReadFileChunkWithCallback, self).setUp()
self.filename = os.path.join(self.tempdir, 'foo')
with open(self.filename, 'wb') as f:
f.write(b'abc')
self.amounts_seen = []
def callback(self, amount):
self.amounts_seen.append(amount)
def test_callback_is_invoked_on_read(self):
chunk = ReadFileChunk.from_filename(
self.filename, start_byte=0, chunk_size=3, callback=self.callback)
chunk.read(1)
chunk.read(1)
chunk.read(1)
self.assertEqual(self.amounts_seen, [1, 1, 1])
def test_callback_can_be_disabled(self):
chunk = ReadFileChunk.from_filename(
self.filename, start_byte=0, chunk_size=3, callback=self.callback)
chunk.disable_callback()
# Now reading from the ReadFileChunk should not invoke
# the callback.
chunk.read()
self.assertEqual(self.amounts_seen, [])
def test_callback_will_also_be_triggered_by_seek(self):
chunk = ReadFileChunk.from_filename(
self.filename, start_byte=0, chunk_size=3, callback=self.callback)
chunk.read(2)
chunk.seek(0)
chunk.read(2)
chunk.seek(1)
chunk.read(2)
self.assertEqual(self.amounts_seen, [2, -2, 2, -1, 2])
class TestStreamReaderProgress(unittest.TestCase):
def test_proxies_to_wrapped_stream(self):
original_stream = six.StringIO('foobarbaz')
wrapped = StreamReaderProgress(original_stream)
self.assertEqual(wrapped.read(), 'foobarbaz')
def test_callback_invoked(self):
amounts_seen = []
def callback(amount):
amounts_seen.append(amount)
original_stream = six.StringIO('foobarbaz')
wrapped = StreamReaderProgress(original_stream, callback)
self.assertEqual(wrapped.read(), 'foobarbaz')
self.assertEqual(amounts_seen, [9])
class TestMultipartUploader(unittest.TestCase):
def test_multipart_upload_uses_correct_client_calls(self):
client = mock.Mock()
uploader = MultipartUploader(
client, TransferConfig(),
InMemoryOSLayer({'filename': b'foobar'}), SequentialExecutor)
client.create_multipart_upload.return_value = {'UploadId': 'upload_id'}
client.upload_part.return_value = {'ETag': 'first'}
uploader.upload_file('filename', 'bucket', 'key', None, {})
# We need to check both the sequence of calls (create/upload/complete)
# as well as the params passed between the calls, including
# 1. The upload_id was plumbed through
# 2. The collected etags were added to the complete call.
client.create_multipart_upload.assert_called_with(
Bucket='bucket', Key='key')
# Should be two parts.
client.upload_part.assert_called_with(
Body=mock.ANY, Bucket='bucket',
UploadId='upload_id', Key='key', PartNumber=1)
client.complete_multipart_upload.assert_called_with(
MultipartUpload={'Parts': [{'PartNumber': 1, 'ETag': 'first'}]},
Bucket='bucket',
UploadId='upload_id',
Key='key')
def test_multipart_upload_injects_proper_kwargs(self):
client = mock.Mock()
uploader = MultipartUploader(
client, TransferConfig(),
InMemoryOSLayer({'filename': b'foobar'}), SequentialExecutor)
client.create_multipart_upload.return_value = {'UploadId': 'upload_id'}
client.upload_part.return_value = {'ETag': 'first'}
extra_args = {
'SSECustomerKey': 'fakekey',
'SSECustomerAlgorithm': 'AES256',
'StorageClass': 'REDUCED_REDUNDANCY'
}
uploader.upload_file('filename', 'bucket', 'key', None, extra_args)
client.create_multipart_upload.assert_called_with(
Bucket='bucket', Key='key',
# The initial call should inject all the storage class params.
SSECustomerKey='fakekey',
SSECustomerAlgorithm='AES256',
StorageClass='REDUCED_REDUNDANCY')
client.upload_part.assert_called_with(
Body=mock.ANY, Bucket='bucket',
UploadId='upload_id', Key='key', PartNumber=1,
# We only have to forward certain **extra_args in subsequent
# UploadPart calls.
SSECustomerKey='fakekey',
SSECustomerAlgorithm='AES256',
)
client.complete_multipart_upload.assert_called_with(
MultipartUpload={'Parts': [{'PartNumber': 1, 'ETag': 'first'}]},
Bucket='bucket',
UploadId='upload_id',
Key='key')
def test_multipart_upload_is_aborted_on_error(self):
# If the create_multipart_upload succeeds and any upload_part
# fails, then abort_multipart_upload will be called.
client = mock.Mock()
uploader = MultipartUploader(
client, TransferConfig(),
InMemoryOSLayer({'filename': b'foobar'}), SequentialExecutor)
client.create_multipart_upload.return_value = {'UploadId': 'upload_id'}
client.upload_part.side_effect = Exception(
"Some kind of error occurred.")
with self.assertRaises(S3UploadFailedError):
uploader.upload_file('filename', 'bucket', 'key', None, {})
client.abort_multipart_upload.assert_called_with(
Bucket='bucket', Key='key', UploadId='upload_id')
class TestMultipartDownloader(unittest.TestCase):
maxDiff = None
def test_multipart_download_uses_correct_client_calls(self):
client = mock.Mock()
response_body = b'foobarbaz'
client.get_object.return_value = {'Body': six.BytesIO(response_body)}
downloader = MultipartDownloader(client, TransferConfig(),
InMemoryOSLayer({}),
SequentialExecutor)
downloader.download_file('bucket', 'key', 'filename',
len(response_body), {})
client.get_object.assert_called_with(
Range='bytes=0-',
Bucket='bucket',
Key='key'
)
def test_multipart_download_with_multiple_parts(self):
client = mock.Mock()
response_body = b'foobarbaz'
client.get_object.return_value = {'Body': six.BytesIO(response_body)}
# For testing purposes, we're testing with a multipart threshold
# of 4 bytes and a chunksize of 4 bytes. Given b'foobarbaz',
# this should result in 3 calls. In python slices this would be:
# r[0:4], r[4:8], r[8:9]. But the Range param will be slightly
# different because they use inclusive ranges.
config = TransferConfig(multipart_threshold=4,
multipart_chunksize=4)
downloader = MultipartDownloader(client, config,
InMemoryOSLayer({}),
SequentialExecutor)
downloader.download_file('bucket', 'key', 'filename',
len(response_body), {})
# We're storing these in **extra because the assertEqual
# below is really about verifying we have the correct value
# for the Range param.
extra = {'Bucket': 'bucket', 'Key': 'key'}
self.assertEqual(client.get_object.call_args_list,
# Note these are inclusive ranges.
[mock.call(Range='bytes=0-3', **extra),
mock.call(Range='bytes=4-7', **extra),
mock.call(Range='bytes=8-', **extra)])
def test_multipart_download_with_multiple_parts_and_extra_args(self):
client = Session().create_client('s3')
stubber = Stubber(client)
response_body = b'foobarbaz'
response = {'Body': six.BytesIO(response_body)}
expected_params = {
'Range': mock.ANY, 'Bucket': mock.ANY, 'Key': mock.ANY,
'RequestPayer': 'requester'}
stubber.add_response('get_object', response, expected_params)
stubber.activate()
downloader = MultipartDownloader(
client, TransferConfig(), InMemoryOSLayer({}), SequentialExecutor)
downloader.download_file(
'bucket', 'key', 'filename', len(response_body),
{'RequestPayer': 'requester'})
stubber.assert_no_pending_responses()
def test_retry_on_failures_from_stream_reads(self):
# If we get an exception during a call to the response body's .read()
# method, we should retry the request.
client = mock.Mock()
response_body = b'foobarbaz'
stream_with_errors = mock.Mock()
stream_with_errors.read.side_effect = [
socket.timeout("fake error"),
response_body
]
client.get_object.return_value = {'Body': stream_with_errors}
config = TransferConfig(multipart_threshold=4,
multipart_chunksize=4)
downloader = MultipartDownloader(client, config,
InMemoryOSLayer({}),
SequentialExecutor)
downloader.download_file('bucket', 'key', 'filename',
len(response_body), {})
# We're storing these in **extra because the assertEqual
# below is really about verifying we have the correct value
# for the Range param.
extra = {'Bucket': 'bucket', 'Key': 'key'}
self.assertEqual(client.get_object.call_args_list,
# The first call to range=0-3 fails because of the
# side_effect above where we make the .read() raise a
# socket.error.
# The second call to range=0-3 then succeeds.
[mock.call(Range='bytes=0-3', **extra),
mock.call(Range='bytes=0-3', **extra),
mock.call(Range='bytes=4-7', **extra),
mock.call(Range='bytes=8-', **extra)])
def test_exception_raised_on_exceeded_retries(self):
client = mock.Mock()
response_body = b'foobarbaz'
stream_with_errors = mock.Mock()
stream_with_errors.read.side_effect = socket.timeout("fake error")
client.get_object.return_value = {'Body': stream_with_errors}
config = TransferConfig(multipart_threshold=4,
multipart_chunksize=4)
downloader = MultipartDownloader(client, config,
InMemoryOSLayer({}),
SequentialExecutor)
with self.assertRaises(RetriesExceededError):
downloader.download_file('bucket', 'key', 'filename',
len(response_body), {})
def test_io_thread_failure_triggers_shutdown(self):
client = mock.Mock()
response_body = b'foobarbaz'
client.get_object.return_value = {'Body': six.BytesIO(response_body)}
os_layer = mock.Mock()
mock_fileobj = mock.MagicMock()
mock_fileobj.__enter__.return_value = mock_fileobj
mock_fileobj.write.side_effect = Exception("fake IO error")
os_layer.open.return_value = mock_fileobj
downloader = MultipartDownloader(client, TransferConfig(),
os_layer, SequentialExecutor)
# We're verifying that the exception raised from the IO future
# propogates back up via download_file().
with self.assertRaisesRegexp(Exception, "fake IO error"):
downloader.download_file('bucket', 'key', 'filename',
len(response_body), {})
def test_io_thread_fails_to_open_triggers_shutdown_error(self):
client = mock.Mock()
client.get_object.return_value = {
'Body': six.BytesIO(b'asdf')
}
os_layer = mock.Mock(spec=OSUtils)
os_layer.open.side_effect = IOError("Can't open file")
downloader = MultipartDownloader(
client, TransferConfig(),
os_layer, SequentialExecutor)
# We're verifying that the exception raised from the IO future
# propogates back up via download_file().
with self.assertRaisesRegexp(IOError, "Can't open file"):
downloader.download_file('bucket', 'key', 'filename',
len(b'asdf'), {})
def test_download_futures_fail_triggers_shutdown(self):
class FailedDownloadParts(SequentialExecutor):
def __init__(self, max_workers):
self.is_first = True
def submit(self, function):
future = super(FailedDownloadParts, self).submit(function)
if self.is_first:
# This is the download_parts_thread.
future.set_exception(
Exception("fake download parts error"))
self.is_first = False
return future
client = mock.Mock()
response_body = b'foobarbaz'
client.get_object.return_value = {'Body': six.BytesIO(response_body)}
downloader = MultipartDownloader(client, TransferConfig(),
InMemoryOSLayer({}),
FailedDownloadParts)
with self.assertRaisesRegexp(Exception, "fake download parts error"):
downloader.download_file('bucket', 'key', 'filename',
len(response_body), {})
from boto3.s3.transfer import OSUtils, TransferConfig, ProgressCallbackInvoker
from boto3.s3.transfer import ClientError, S3TransferRetriesExceededError
class TestTransferConfig(unittest.TestCase):
def assert_value_of_actual_and_alias(self, config, actual, alias,
ref_value):
# Ensure that the name set in the underlying TransferConfig (i.e.
# the actual) is the correct value.
self.assertEqual(getattr(config, actual), ref_value)
# Ensure that backcompat name (i.e. the alias) is the correct value.
self.assertEqual(getattr(config, alias), ref_value)
def test_alias_max_concurreny(self):
ref_value = 10
config = TransferConfig(max_concurrency=ref_value)
self.assert_value_of_actual_and_alias(
config, 'max_request_concurrency', 'max_concurrency', ref_value)
# Set a new value using the alias
new_value = 15
config.max_concurrency = new_value
# Make sure it sets the value for both the alias and the actual
# value that will be used in the TransferManager
self.assert_value_of_actual_and_alias(
config, 'max_request_concurrency', 'max_concurrency', new_value)
def test_alias_max_io_queue(self):
ref_value = 10
config = TransferConfig(max_io_queue=ref_value)
self.assert_value_of_actual_and_alias(
config, 'max_io_queue_size', 'max_io_queue', ref_value)
# Set a new value using the alias
new_value = 15
config.max_io_queue = new_value
# Make sure it sets the value for both the alias and the actual
# value that will be used in the TransferManager
self.assert_value_of_actual_and_alias(
config, 'max_io_queue_size', 'max_io_queue', new_value)
class TestProgressCallbackInvoker(unittest.TestCase):
def test_on_progress(self):
callback = mock.Mock()
subscriber = ProgressCallbackInvoker(callback)
subscriber.on_progress(bytes_transferred=1)
callback.assert_called_with(1)
class TestS3Transfer(unittest.TestCase):
def setUp(self):
self.client = mock.Mock()
self.random_file_patch = mock.patch(
'boto3.s3.transfer.random_file_extension')
self.random_file = self.random_file_patch.start()
self.random_file.return_value = 'RANDOM'
self.manager = mock.Mock(TransferManager(self.client))
self.transfer = S3Transfer(manager=self.manager)
self.callback = mock.Mock()
def tearDown(self):
self.random_file_patch.stop()
def assert_callback_wrapped_in_subscriber(self, call_args):
subscribers = call_args[0][4]
# Make sure only one subscriber was passed in.
self.assertEqual(len(subscribers), 1)
subscriber = subscribers[0]
# Make sure that the subscriber is of the correct type
self.assertIsInstance(subscriber, ProgressCallbackInvoker)
# Make sure that the on_progress method() calls out to the wrapped
# callback by actually invoking it.
subscriber.on_progress(bytes_transferred=1)
self.callback.assert_called_with(1)
def test_callback_handlers_register_on_put_item(self):
osutil = InMemoryOSLayer({'smallfile': b'foobar'})
transfer = S3Transfer(self.client, osutil=osutil)
transfer.upload_file('smallfile', 'bucket', 'key')
events = self.client.meta.events
events.register_first.assert_called_with(
'request-created.s3',
disable_upload_callbacks,
unique_id='s3upload-callback-disable',
)
events.register_last.assert_called_with(
'request-created.s3',
enable_upload_callbacks,
unique_id='s3upload-callback-enable',
)
def test_upload_below_multipart_threshold_uses_put_object(self):
fake_files = {
'smallfile': b'foobar',
}
osutil = InMemoryOSLayer(fake_files)
transfer = S3Transfer(self.client, osutil=osutil)
transfer.upload_file('smallfile', 'bucket', 'key')
self.client.put_object.assert_called_with(
Bucket='bucket', Key='key', Body=mock.ANY
)
def test_extra_args_on_uploaded_passed_to_api_call(self):
def test_upload_file(self):
extra_args = {'ACL': 'public-read'}
fake_files = {
'smallfile': b'hello world'
}
osutil = InMemoryOSLayer(fake_files)
transfer = S3Transfer(self.client, osutil=osutil)
transfer.upload_file('smallfile', 'bucket', 'key',
extra_args=extra_args)
self.client.put_object.assert_called_with(
Bucket='bucket', Key='key', Body=mock.ANY,
ACL='public-read'
)
self.transfer.upload_file('smallfile', 'bucket', 'key',
extra_args=extra_args)
self.manager.upload.assert_called_with(
'smallfile', 'bucket', 'key', extra_args, None)
def test_uses_multipart_upload_when_over_threshold(self):
with mock.patch('boto3.s3.transfer.MultipartUploader') as uploader:
fake_files = {
'smallfile': b'foobar',
}
osutil = InMemoryOSLayer(fake_files)
config = TransferConfig(multipart_threshold=2,
multipart_chunksize=2)
transfer = S3Transfer(self.client, osutil=osutil, config=config)
transfer.upload_file('smallfile', 'bucket', 'key')
uploader.return_value.upload_file.assert_called_with(
'smallfile', 'bucket', 'key', None, {})
def test_uses_multipart_download_when_over_threshold(self):
with mock.patch('boto3.s3.transfer.MultipartDownloader') as downloader:
osutil = InMemoryOSLayer({})
over_multipart_threshold = 100 * 1024 * 1024
transfer = S3Transfer(self.client, osutil=osutil)
callback = mock.sentinel.CALLBACK
self.client.head_object.return_value = {
'ContentLength': over_multipart_threshold,
}
transfer.download_file('bucket', 'key', 'filename',
callback=callback)
downloader.return_value.download_file.assert_called_with(
# Note how we're downloading to a temorary random file.
'bucket', 'key', 'filename.RANDOM', over_multipart_threshold,
{}, callback)
def test_download_file_with_invalid_extra_args(self):
below_threshold = 20
osutil = InMemoryOSLayer({})
transfer = S3Transfer(self.client, osutil=osutil)
self.client.head_object.return_value = {
'ContentLength': below_threshold}
with self.assertRaises(ValueError):
transfer.download_file('bucket', 'key', '/tmp/smallfile',
extra_args={'BadValue': 'foo'})
def test_upload_file_with_invalid_extra_args(self):
osutil = InMemoryOSLayer({})
transfer = S3Transfer(self.client, osutil=osutil)
bad_args = {"WebsiteRedirectLocation": "/foo"}
with self.assertRaises(ValueError):
transfer.upload_file('bucket', 'key', '/tmp/smallfile',
extra_args=bad_args)
def test_download_file_fowards_extra_args(self):
def test_download_file(self):
extra_args = {
'SSECustomerKey': 'foo',
'SSECustomerAlgorithm': 'AES256',
}
below_threshold = 20
osutil = InMemoryOSLayer({'smallfile': b'hello world'})
transfer = S3Transfer(self.client, osutil=osutil)
self.client.head_object.return_value = {
'ContentLength': below_threshold}
self.client.get_object.return_value = {
'Body': six.BytesIO(b'foobar')
}
transfer.download_file('bucket', 'key', '/tmp/smallfile',
extra_args=extra_args)
self.transfer.download_file('bucket', 'key', '/tmp/smallfile',
extra_args=extra_args)
self.manager.download.assert_called_with(
'bucket', 'key', '/tmp/smallfile', extra_args, None)
# Note that we need to invoke the HeadObject call
# and the PutObject call with the extra_args.
# This is necessary. Trying to HeadObject an SSE object
# will return a 400 if you don't provide the required
# params.
self.client.get_object.assert_called_with(
Bucket='bucket', Key='key', SSECustomerAlgorithm='AES256',
SSECustomerKey='foo')
def test_upload_wraps_callback(self):
self.transfer.upload_file(
'smallfile', 'bucket', 'key', callback=self.callback)
self.assert_callback_wrapped_in_subscriber(
self.manager.upload.call_args)
def test_get_object_stream_is_retried_and_succeeds(self):
below_threshold = 20
osutil = InMemoryOSLayer({'smallfile': b'hello world'})
transfer = S3Transfer(self.client, osutil=osutil)
self.client.head_object.return_value = {
'ContentLength': below_threshold}
self.client.get_object.side_effect = [
# First request fails.
socket.timeout("fake error"),
# Second succeeds.
{'Body': six.BytesIO(b'foobar')}
]
transfer.download_file('bucket', 'key', '/tmp/smallfile')
def test_download_wraps_callback(self):
self.transfer.download_file(
'bucket', 'key', '/tmp/smallfile', callback=self.callback)
self.assert_callback_wrapped_in_subscriber(
self.manager.download.call_args)
self.assertEqual(self.client.get_object.call_count, 2)
def test_get_object_stream_uses_all_retries_and_errors_out(self):
below_threshold = 20
osutil = InMemoryOSLayer({})
transfer = S3Transfer(self.client, osutil=osutil)
self.client.head_object.return_value = {
'ContentLength': below_threshold}
# Here we're raising an exception every single time, which
# will exhaust our retry count and propogate a
# RetriesExceededError.
self.client.get_object.side_effect = socket.timeout("fake error")
def test_propogation_of_retry_error(self):
future = mock.Mock()
future.result.side_effect = S3TransferRetriesExceededError(Exception())
self.manager.download.return_value = future
with self.assertRaises(RetriesExceededError):
transfer.download_file('bucket', 'key', 'smallfile')
self.transfer.download_file('bucket', 'key', '/tmp/smallfile')
self.assertEqual(self.client.get_object.call_count, 5)
# We should have also cleaned up the in progress file
# we were downloading to.
self.assertEqual(osutil.filemap, {})
def test_download_below_multipart_threshold(self):
below_threshold = 20
osutil = InMemoryOSLayer({'smallfile': b'hello world'})
transfer = S3Transfer(self.client, osutil=osutil)
self.client.head_object.return_value = {
'ContentLength': below_threshold}
self.client.get_object.return_value = {
'Body': six.BytesIO(b'foobar')
}
transfer.download_file('bucket', 'key', 'smallfile')
self.client.get_object.assert_called_with(Bucket='bucket', Key='key')
def test_propogation_s3_upload_failed_error(self):
future = mock.Mock()
future.result.side_effect = ClientError({'Error': {}}, 'op_name')
self.manager.upload.return_value = future
with self.assertRaises(S3UploadFailedError):
self.transfer.upload_file('smallfile', 'bucket', 'key')
def test_can_create_with_just_client(self):
transfer = S3Transfer(client=mock.Mock())
self.assertIsInstance(transfer, S3Transfer)
def test_can_create_with_extra_configurations(self):
transfer = S3Transfer(
client=mock.Mock(), config=TransferConfig(), osutil=OSUtils())
self.assertIsInstance(transfer, S3Transfer)
class TestShutdownQueue(unittest.TestCase):
def test_handles_normal_put_get_requests(self):
q = ShutdownQueue()
q.put('foo')
self.assertEqual(q.get(), 'foo')
def test_client_or_manager_is_required(self):
with self.assertRaises(ValueError):
S3Transfer()
def test_put_raises_error_on_shutdown(self):
q = ShutdownQueue()
q.trigger_shutdown()
with self.assertRaises(QueueShutdownError):
q.put('foo')
def test_client_and_manager_are_mutually_exclusive(self):
with self.assertRaises(ValueError):
S3Transfer(self.client, manager=self.manager)
def test_config_and_manager_are_mutually_exclusive(self):
with self.assertRaises(ValueError):
S3Transfer(config=mock.Mock(), manager=self.manager)
class TestRandomFileExtension(unittest.TestCase):
def test_has_proper_length(self):
def test_osutil_and_manager_are_mutually_exclusive(self):
with self.assertRaises(ValueError):
S3Transfer(osutil=mock.Mock(), manager=self.manager)
def test_upload_requires_string_filename(self):
transfer = S3Transfer(client=mock.Mock())
with self.assertRaises(ValueError):
transfer.upload_file(filename=object(), bucket='foo', key='bar')
def test_download_requires_string_filename(self):
transfer = S3Transfer(client=mock.Mock())
with self.assertRaises(ValueError):
transfer.download_file(bucket='foo', key='bar', filename=object())
def test_context_manager(self):
manager = mock.Mock()
manager.__exit__ = mock.Mock()
with S3Transfer(manager=manager):
pass
# The underlying transfer manager should have had its __exit__
# called as well.
self.assertEqual(
len(random_file_extension(num_digits=4)), 4)
manager.__exit__.call_args, mock.call(None, None, None))
class TestCallbackHandlers(unittest.TestCase):
def setUp(self):
self.request = mock.Mock()
def test_disable_request_on_put_object(self):
disable_upload_callbacks(self.request,
'PutObject')
self.request.body.disable_callback.assert_called_with()
def test_disable_request_on_upload_part(self):
disable_upload_callbacks(self.request,
'UploadPart')
self.request.body.disable_callback.assert_called_with()
def test_enable_object_on_put_object(self):
enable_upload_callbacks(self.request,
'PutObject')
self.request.body.enable_callback.assert_called_with()
def test_enable_object_on_upload_part(self):
enable_upload_callbacks(self.request,
'UploadPart')
self.request.body.enable_callback.assert_called_with()
def test_dont_disable_if_missing_interface(self):
del self.request.body.disable_callback
disable_upload_callbacks(self.request,
'PutObject')
self.assertEqual(self.request.body.method_calls, [])
def test_dont_enable_if_missing_interface(self):
del self.request.body.enable_callback
enable_upload_callbacks(self.request,
'PutObject')
self.assertEqual(self.request.body.method_calls, [])
def test_dont_disable_if_wrong_operation(self):
disable_upload_callbacks(self.request,
'OtherOperation')
self.assertFalse(
self.request.body.disable_callback.called)
def test_dont_enable_if_wrong_operation(self):
enable_upload_callbacks(self.request,
'OtherOperation')
self.assertFalse(
self.request.body.enable_callback.called)
def test_context_manager_with_errors(self):
manager = mock.Mock()
manager.__exit__ = mock.Mock()
raised_exception = ValueError()
with self.assertRaises(type(raised_exception)):
with S3Transfer(manager=manager):
raise raised_exception
# The underlying transfer manager should have had its __exit__
# called as well and pass on the error as well.
self.assertEqual(
manager.__exit__.call_args,
mock.call(type(raised_exception), raised_exception, mock.ANY))

View file

@ -22,6 +22,7 @@ from tests import mock, BaseTestCase
class TestSession(BaseTestCase):
def test_repr(self):
bc_session = self.bc_session_cls.return_value
bc_session.get_credentials.return_value.access_key = 'abc123'
@ -29,7 +30,19 @@ class TestSession(BaseTestCase):
session = Session('abc123', region_name='us-west-2')
self.assertEqual(repr(session), 'Session(region=\'us-west-2\')')
self.assertEqual(repr(session), 'Session(region_name=\'us-west-2\')')
def test_repr_on_subclasses(self):
bc_session = self.bc_session_cls.return_value
bc_session.get_credentials.return_value.access_key = 'abc123'
bc_session.get_config_variable.return_value = 'us-west-2'
class MySession(Session):
pass
session = MySession('abc123', region_name='us-west-2')
self.assertEqual(repr(session), 'MySession(region_name=\'us-west-2\')')
def test_can_access_region_name(self):
bc_session = self.bc_session_cls.return_value
@ -44,7 +57,7 @@ class TestSession(BaseTestCase):
Session()
self.assertTrue(self.bc_session_cls.called,
'Botocore session was not created')
'Botocore session was not created')
def test_credentials_can_be_set(self):
bc_session = self.bc_session_cls.return_value
@ -55,9 +68,9 @@ class TestSession(BaseTestCase):
aws_session_token='token')
self.assertTrue(self.bc_session_cls.called,
'Botocore session was not created')
'Botocore session was not created')
self.assertTrue(bc_session.set_credentials.called,
'Botocore session set_credentials not called from constructor')
'Botocore session set_credentials not called from constructor')
bc_session.set_credentials.assert_called_with(
'key', 'secret', 'token')
@ -103,6 +116,13 @@ class TestSession(BaseTestCase):
self.assertEqual(session.profile_name, 'default')
def test_available_profiles(self):
bc_session = mock.Mock()
bc_session.available_profiles.return_value = ['foo','bar']
session = Session(botocore_session=bc_session)
profiles = session.available_profiles
self.assertEqual(len(profiles.return_value), 2)
def test_custom_session(self):
bc_session = self.bc_session_cls()
self.bc_session_cls.reset_mock()
@ -158,7 +178,7 @@ class TestSession(BaseTestCase):
session.get_available_services()
self.assertTrue(bc_session.get_available_services.called,
'Botocore session get_available_services not called')
'Botocore session get_available_services not called')
def test_get_available_resources(self):
mock_bc_session = mock.Mock()
@ -195,7 +215,7 @@ class TestSession(BaseTestCase):
client = session.client('sqs', region_name='us-west-2')
self.assertTrue(client,
'No low-level client was returned')
'No low-level client was returned')
def test_create_client_with_args(self):
bc_session = self.bc_session_cls.return_value
@ -213,7 +233,8 @@ class TestSession(BaseTestCase):
mock_bc_session = mock.Mock()
loader = mock.Mock(spec=loaders.Loader)
loader.determine_latest_version.return_value = '2014-11-02'
loader.load_service_model.return_value = {'resources': [], 'service': []}
loader.load_service_model.return_value = {
'resources': [], 'service': []}
mock_bc_session.get_component.return_value = loader
session = Session(botocore_session=mock_bc_session)
session.resource_factory.load_from_definition = mock.Mock()
@ -234,7 +255,8 @@ class TestSession(BaseTestCase):
mock_bc_session = mock.Mock()
loader = mock.Mock(spec=loaders.Loader)
loader.determine_latest_version.return_value = '2014-11-02'
loader.load_service_model.return_value = {'resources': [], 'service': []}
loader.load_service_model.return_value = {
'resources': [], 'service': []}
mock_bc_session.get_component.return_value = loader
session = Session(botocore_session=mock_bc_session)
session.resource_factory.load_from_definition = mock.Mock()
@ -256,7 +278,8 @@ class TestSession(BaseTestCase):
mock_bc_session = mock.Mock()
loader = mock.Mock(spec=loaders.Loader)
loader.determine_latest_version.return_value = '2014-11-02'
loader.load_service_model.return_value = {'resources': [], 'service': []}
loader.load_service_model.return_value = {
'resources': [], 'service': []}
mock_bc_session.get_component.return_value = loader
session = Session(botocore_session=mock_bc_session)
session.resource_factory.load_from_definition = mock.Mock()
@ -278,7 +301,8 @@ class TestSession(BaseTestCase):
mock_bc_session = mock.Mock()
loader = mock.Mock(spec=loaders.Loader)
loader.determine_latest_version.return_value = '2014-11-02'
loader.load_service_model.return_value = {'resources': [], 'service': []}
loader.load_service_model.return_value = {
'resources': [], 'service': []}
mock_bc_session.get_component.return_value = loader
session = Session(botocore_session=mock_bc_session)
session.resource_factory.load_from_definition = mock.Mock()