Imported Upstream version 1.3.1

This commit is contained in:
Eric Evans 2016-05-21 21:03:29 -05:00
parent 0b92fa3b17
commit 57f5c10fc5
102 changed files with 3412 additions and 633 deletions

12
.changes/0.0.1.json Normal file
View file

@ -0,0 +1,12 @@
[
{
"category": "Resources",
"description": "Supports S3, EC2, SQS, SNS, and IAM resources",
"type": "feature"
},
{
"category": "Clients",
"description": "Supports low-level clients for most services",
"type": "feature"
}
]

17
.changes/0.0.10.json Normal file
View file

@ -0,0 +1,17 @@
[
{
"category": "Documentation",
"description": "Name collisions are now handled at the resource model layer instead of the factory, meaning that the documentation now uses the correct names. (`issue 67 <https://github.com/boto/boto3/pull/67>`__)",
"type": "bugfix"
},
{
"category": "Session",
"description": "Add a ``region_name`` option when creating a session. (`issue 69 <https://github.com/boto/boto3/pull/69>`__, `issue 21 <https://github.com/boto/boto3/issues/21>`__)",
"type": "feature"
},
{
"category": "Botocore",
"description": "Update to Botocore 0.94.0",
"type": "feature"
}
]

22
.changes/0.0.11.json Normal file
View file

@ -0,0 +1,22 @@
[
{
"category": "Resources",
"description": "Add Amazon EC2 support for ClassicLink actions and add a delete action to EC2 ``Volume`` resources.",
"type": "feature"
},
{
"category": "Resources",
"description": "Add a ``load`` operation and ``user`` reference to AWS IAM's ``CurrentUser`` resource. (`issue 72 <https://github.com/boto/boto3/pull/72>`__,",
"type": "feature"
},
{
"category": "Resources",
"description": "Add resources for AWS IAM managed policies. (`issue 71 <https://github.com/boto/boto3/pull/71>`__)",
"type": "feature"
},
{
"category": "Botocore",
"description": "Update to Botocore 0.97.0",
"type": "feature"
}
]

12
.changes/0.0.12.json Normal file
View file

@ -0,0 +1,12 @@
[
{
"category": "Resources",
"description": "Add the ability to load resource data from a ``has`` relationship. This saves a call to ``load`` when available, and otherwise fixes a problem where there was no way to get at certain resource data. (`issue 74 <https://github.com/boto/boto3/pull/72>`__,",
"type": "feature"
},
{
"category": "Botocore",
"description": "Update to Botocore 0.99.0",
"type": "feature"
}
]

7
.changes/0.0.13.json Normal file
View file

@ -0,0 +1,7 @@
[
{
"category": "Botocore",
"description": "Update to Botocore 0.100.0.",
"type": "feature"
}
]

17
.changes/0.0.14.json Normal file
View file

@ -0,0 +1,17 @@
[
{
"category": "Resources",
"description": "Update to the latest resource models for",
"type": "feature"
},
{
"category": "Amazon S3",
"description": "Add an ``upload_file`` and ``download_file`` to S3 clients that transparently handle parallel multipart transfers.",
"type": "feature"
},
{
"category": "Botocore",
"description": "Update to Botocore 0.102.0.",
"type": "feature"
}
]

17
.changes/0.0.15.json Normal file
View file

@ -0,0 +1,17 @@
[
{
"category": "Packaging",
"description": "Fix an issue with the Amazon S3 ``upload_file`` and ``download_file`` customization. (`issue 85 <https://github.com/boto/boto3/pull/85>`__)",
"type": "bugfix"
},
{
"category": "Resource",
"description": "Fix an issue with the Amazon S3 ``BucketNofitication`` resource.",
"type": "bugfix"
},
{
"category": "Botocore",
"description": "Update to Botocore 0.103.0.",
"type": "feature"
}
]

12
.changes/0.0.16.json Normal file
View file

@ -0,0 +1,12 @@
[
{
"category": "Packaging",
"description": "Fix release sdist and whl files from 0.0.15.",
"type": "bugfix"
},
{
"category": "Amazon Dynamodb",
"description": "Add resource model for Amazon DynamoDB.",
"type": "feature"
}
]

7
.changes/0.0.17.json Normal file
View file

@ -0,0 +1,7 @@
[
{
"category": "Botocore",
"description": "Update to Botocore 0.107.0.",
"type": "feature"
}
]

17
.changes/0.0.18.json Normal file
View file

@ -0,0 +1,17 @@
[
{
"category": "DynamoDB",
"description": "Add document level interface for Table resource (`issue 103 <https://github.com/boto/boto3/pull/103>`__)",
"type": "feature"
},
{
"category": "DynamoDB",
"description": "Add ConditionExpression interface for querying and filtering Table resource. (`issue 103 <https://github.com/boto/boto3/pull/103>`__)",
"type": "feature"
},
{
"category": "Clients",
"description": "Add support for passing of ``botocore.client.Config`` object to instantiation of clients.",
"type": "feature"
}
]

12
.changes/0.0.19.json Normal file
View file

@ -0,0 +1,12 @@
[
{
"category": "Collections",
"description": "Remove the ``page_count`` and ``limit`` arguments from ``all()``. Undocument support for the two arguments in the ``filter()`` method. (`issue 119 <https://github.com/boto/boto3/pull/119>`__)",
"type": "breakingchange"
},
{
"category": "DynamoDB",
"description": "Add batch writer. (`issue 118 <https://github.com/boto/boto3/pull/118>`__)",
"type": "feature"
}
]

22
.changes/0.0.2.json Normal file
View file

@ -0,0 +1,22 @@
[
{
"category": "Resources",
"description": "Adds resources for `AWS CloudFormation <http://aws.amazon.com/cloudformation/>`_ and `AWS OpsWorks <http://aws.amazon.com/opsworks/>`_.",
"type": "feature"
},
{
"category": "Botocore",
"description": "Update to Botocore 0.73.0 and JMESPath 0.5.0",
"type": "feature"
},
{
"category": "Clients",
"description": "Adds support for `AWS CodeDeploy <http://aws.amazon.com/codedeploy/>`_, `AWS Config <http://aws.amazon.com/config/>`_, `AWS KMS <http://aws.amazon.com/kms/>`_, `AWS Lambda <http://aws.amazon.com/lambda/>`_.",
"type": "feature"
},
{
"category": "UserAgent",
"description": "Make requests with a customized HTTP user-agent",
"type": "feature"
}
]

7
.changes/0.0.20.json Normal file
View file

@ -0,0 +1,7 @@
[
{
"category": "ec2",
"description": "Update resource model. (`issue 129 <https://github.com/boto/boto3/pull/129>`__)",
"type": "feature"
}
]

7
.changes/0.0.21.json Normal file
View file

@ -0,0 +1,7 @@
[
{
"category": "Installation",
"description": "Fix regression when installing via older versions of pip on python 2.6. (`issue 132 <https://github.com/boto/boto3/pull/132`__)",
"type": "bugfix"
}
]

12
.changes/0.0.22.json Normal file
View file

@ -0,0 +1,12 @@
[
{
"category": "``s3.client.upload_file``",
"description": "Fix double invocation of callbacks when using signature version 4. (`issue 133 <https://github.com/boto/boto3/pull/133>`__)",
"type": "bugfix"
},
{
"category": "",
"description": "``s3.Bucket.load`` (`issue 128 <https://github.com/boto/boto3/pull/128>`__)",
"type": "bugfix"
}
]

7
.changes/0.0.3.json Normal file
View file

@ -0,0 +1,7 @@
[
{
"category": "Botocore",
"description": "Update to Botocore 0.76.0.",
"type": "feature"
}
]

22
.changes/0.0.4.json Normal file
View file

@ -0,0 +1,22 @@
[
{
"category": "Botocore",
"description": "Update to Botocore 0.77.0",
"type": "feature"
},
{
"category": "EC2",
"description": "Update `Amazon EC2 <http",
"type": "feature"
},
{
"category": "Resources",
"description": "Support `belongsTo` resource reference as well as `path` specified in an action's resource definition.",
"type": "feature"
},
{
"category": "SQS",
"description": "Fix an issue accessing SQS message bodies (`issue 33 <https://github.com/boto/boto3/issues/33>`__)",
"type": "bugfix"
}
]

12
.changes/0.0.5.json Normal file
View file

@ -0,0 +1,12 @@
[
{
"category": "Resources",
"description": "Add support for batch actions on collections. (`issue 32 <https://github.com/boto/boto3/pull/32>`__)",
"type": "feature"
},
{
"category": "Botocore",
"description": "Update to Botocore 0.78.0",
"type": "feature"
}
]

27
.changes/0.0.6.json Normal file
View file

@ -0,0 +1,27 @@
[
{
"category": "Amazon SQS",
"description": "Add ``purge`` action to queue resources",
"type": "feature"
},
{
"category": "Waiters",
"description": "Add documentation for client and resource waiters (`issue 44 <https://github.com/boto/boto3/pull/44>`__)",
"type": "feature"
},
{
"category": "Waiters",
"description": "Add support for resource waiters (`issue 43 <https://github.com/boto/boto3/pull/43>`__)",
"type": "feature"
},
{
"category": "Installation",
"description": "Remove dependency on the unused ``six`` module (`issue 42 <https://github.com/boto/boto3/pull/42>`__)",
"type": "bugfix"
},
{
"category": "Botocore",
"description": "Update to Botocore 0.80.0",
"type": "feature"
}
]

32
.changes/0.0.7.json Normal file
View file

@ -0,0 +1,32 @@
[
{
"category": "Resources",
"description": "Enable support for Amazon Glacier.",
"type": "feature"
},
{
"category": "Resources",
"description": "Support plural references and nested JMESPath queries for data members when building parameters and identifiers. (`issue 52 <https://github.com/boto/boto3/pull/52>`__)",
"type": "feature"
},
{
"category": "Resources",
"description": "Update to the latest resource JSON format. This is a **backward-incompatible** change as not all resources are exposed at the service level anymore. For example, ``s3.Object('bucket', 'key')`` is now ``s3.Bucket('bucket').Object('key')``. (`issue 51 <https://github.com/boto/boto3/pull/51>`__)",
"type": "feature"
},
{
"category": "Resources",
"description": "Make ``resource.meta`` a proper object. This allows you to do things like ``resource.meta.client``. This is a **backward- incompatible** change. (`issue 45 <https://github.com/boto/boto3/pull/45>`__)",
"type": "feature"
},
{
"category": "Dependency",
"description": "Update to JMESPath 0.6.1",
"type": "feature"
},
{
"category": "Botocore",
"description": "Update to Botocore 0.86.0",
"type": "feature"
}
]

22
.changes/0.0.8.json Normal file
View file

@ -0,0 +1,22 @@
[
{
"category": "Resources",
"description": "Fix Amazon S3 resource identifier order. (`issue 62 <https://github.com/boto/boto3/pull/62>`__)",
"type": "bugfix"
},
{
"category": "Resources",
"description": "Fix collection resource hydration path. (`issue 61 <https://github.com/boto/boto3/pull/61>`__)",
"type": "bugfix"
},
{
"category": "Resources",
"description": "Re-enable service-level access to all resources, allowing e.g. ``obj = s3.Object('bucket', 'key')``. (`issue 60 <https://github.com/boto/boto3/pull/60>`__)",
"type": "bugfix"
},
{
"category": "Botocore",
"description": "Update to Botocore 0.87.0",
"type": "feature"
}
]

7
.changes/0.0.9.json Normal file
View file

@ -0,0 +1,7 @@
[
{
"category": "Botocore",
"description": "Update to Botocore 0.92.0",
"type": "feature"
}
]

7
.changes/1.1.0.json Normal file
View file

@ -0,0 +1,7 @@
[
{
"category": "``EC2.Vpc.filter``",
"description": "Fix issue with clobbering of ``Filtering`` paramter. (`issue 154 `https://github.com/boto/boto3/pull/154`__)",
"type": "bugfix"
}
]

7
.changes/1.1.1.json Normal file
View file

@ -0,0 +1,7 @@
[
{
"category": "``EC2.ServiceResource.create_tags``",
"description": "Fix issue when creating multiple tags. (`issue 160 <https://github.com/boto/boto3/pull/160>`__)",
"type": "bugfix"
}
]

17
.changes/1.1.2.json Normal file
View file

@ -0,0 +1,17 @@
[
{
"category": "``session.Session``",
"description": "Add ``events`` property to access session's event emitter. (`issue 204 <https://github.com/boto/boto3/pull/204`__)",
"type": "feature"
},
{
"category": "``Glacier.Account``",
"description": "Fix issue with resource model. (`issue 196 <https://github.com/boto/boto3/pull/196>`__)",
"type": "bugfix"
},
{
"category": "``DynamoDB``",
"description": "Fix misspelling of error class to ``DynamoDBOperationNotSupportedError``. (`issue 218 <https://github.com/boto/boto3/pull/218>`__)",
"type": "bugfix"
}
]

12
.changes/1.1.3.json Normal file
View file

@ -0,0 +1,12 @@
[
{
"category": "``aws storagegateway``",
"description": "Add support for resource tagging.",
"type": "feature"
},
{
"category": "timeouts",
"description": "Add support for customizable timeouts.",
"type": "feature"
}
]

12
.changes/1.1.4.json Normal file
View file

@ -0,0 +1,12 @@
[
{
"category": "Identifier",
"description": "Make resource identifiers immutable. (`issue 246 <https://github.com/boto/boto3/pull/246>`__)",
"type": "bugfix"
},
{
"category": "S3",
"description": "Both S3 Bucket and Object obtain upload_file() and download_file() (`issue 243 <https://github.com/boto/boto3/pull/243>`__)",
"type": "feature"
}
]

22
.changes/1.2.0.json Normal file
View file

@ -0,0 +1,22 @@
[
{
"category": "Docstrings",
"description": "Add docstrings for resource identifiers, attributes, references, and subresources. (`issue 239 <https://github.com/boto/boto3/pull/239>`__)",
"type": "feature"
},
{
"category": "``S3``",
"description": "Add ability to configure host addressing style when making requests to Amazon S3. (`botocore issue 673 <https://github.com/boto/botocore/pull/673>`__)",
"type": "feature"
},
{
"category": "``IAM``",
"description": "Fix model issue with attached groups, roles, and policies. (`issue 304 <https://github.com/boto/boto3/pull/304>`__)",
"type": "bugfix"
},
{
"category": "``EC2.ServiceResource.create_key_pair``",
"description": "Fix model issue where creating key pair does not have a ``key_material`` on ``KeyPair`` resource. (`issue 290 <https://github.com/boto/boto3/pull/290>`__)",
"type": "bugfix"
}
]

7
.changes/1.2.1.json Normal file
View file

@ -0,0 +1,7 @@
[
{
"category": "setup.cfg",
"description": "Fix issue in formatting that broke PyPI distributable",
"type": "bugfix"
}
]

17
.changes/1.2.2.json Normal file
View file

@ -0,0 +1,17 @@
[
{
"category": "Dependencies",
"description": "Relax version constraint of ``futures`` to support version 3.x.",
"type": "feature"
},
{
"category": "Resources",
"description": "Allow ``config`` object to be provided when creating resources (`issue 325 <https://github.com/boto/boto3/pull/325>`__)",
"type": "feature"
},
{
"category": "Documentation",
"description": "Add docstrings for resource collections and waiters (`issue 267 <https://github.com/boto/boto3/pull/267>`__, `issue 261 <https://github.com/boto/boto3/pull/261>`__)",
"type": "feature"
}
]

27
.changes/1.2.3.json Normal file
View file

@ -0,0 +1,27 @@
[
{
"category": "``CloudWatch``",
"description": "Add resource model. (`issue 412 <https://github.com/boto/boto3/pull/412>`__)",
"type": "feature"
},
{
"category": "``S3``",
"description": "Add a start_restore() on Object and ObjectSummary resources. (`issue 408 <https://github.com/boto/boto3/pull/408>`__)",
"type": "feature"
},
{
"category": "Documentation",
"description": "Add examples for S3. (`issue 402 <https://github.com/boto/boto3/pull/402>`__)",
"type": "feature"
},
{
"category": "Collections",
"description": "Fix regression where filters could not be chained. (`issue 401 <https://github.com/boto/boto3/pull/401>`__)",
"type": "bugfix"
},
{
"category": "``S3``",
"description": "Progress callback will be triggered when rewinding stream. (`issue 395 <https://github.com/boto/boto3/pull/395>`__)",
"type": "bugfix"
}
]

12
.changes/1.2.4.json Normal file
View file

@ -0,0 +1,12 @@
[
{
"category": "``Session``",
"description": "Add ``region_name`` property on session. (`issue 414 <https://github.com/boto/boto3/pull/414>`__)",
"type": "feature"
},
{
"category": "``S3``",
"description": "Fix issue with hanging downloads. (`issue 471 <https://github.com/boto/boto3/pull/471>`__)",
"type": "bugfix"
}
]

7
.changes/1.2.5.json Normal file
View file

@ -0,0 +1,7 @@
[
{
"category": "``S3``",
"description": "Forward ``extra_args`` when using multipart downloads. (`issue 503 <https://github.com/boto/boto3/pull/503>`__)",
"type": "bugfix"
}
]

7
.changes/1.3.0.json Normal file
View file

@ -0,0 +1,7 @@
[
{
"category": "``EC2``",
"description": "Update resource model to include ``Route`` resources. (`issue 532 <https://github.com/boto/boto3/pull/532>`__)",
"type": "feature"
}
]

22
.changes/1.3.1.json Normal file
View file

@ -0,0 +1,22 @@
[
{
"category": "S3",
"description": "Add custom load to ObjectSummary",
"type": "feature"
},
{
"category": "Session",
"description": "Add method to get session credentials",
"type": "feature"
},
{
"category": "DynamoDB",
"description": "Ensure batch writer never sends more than flush_amount (`#483 <https://github.com/boto/boto3/issues/483>`__)",
"type": "bugfix"
},
{
"category": "Resources",
"description": "Add get_available_subresources to Resources (`#113 <https://github.com/boto/boto3/issues/113>`__)",
"type": "feature"
}
]

View file

@ -4,7 +4,13 @@ python:
- "2.7"
- "3.3"
- "3.4"
- "3.5"
sudo: false
before_install:
- if [ "$TRAVIS_PULL_REQUEST" != "false" ] && [ "$TRAVIS_BRANCH" == "master" ]; then
echo "No pull requests can be sent to the master branch" 1>&2;
exit 1;
fi
install:
- python scripts/ci/install
script: python scripts/ci/run-tests

View file

@ -1,429 +1,265 @@
Changelog
=========
CHANGELOG
=========
1.2.2 - (2015-11-19)
--------------------
1.3.1
=====
* feature:Dependencies: Relax version constraint of ``futures`` to support
version 3.x.
* feature:Resources: Allow ``config`` object to be provided when creating
resources
(`issue 325 <https://github.com/boto/boto3/pull/325>`__)
* feature:Documentation: Add docstrings for resource collections and waiters
(`issue 267 <https://github.com/boto/boto3/pull/267>`__,
`issue 261 <https://github.com/boto/boto3/pull/261>`__)
* feature:S3: Add custom load to ObjectSummary
* feature:Session: Add method to get session credentials
* bugfix:DynamoDB: Ensure batch writer never sends more than flush_amount (`#483 <https://github.com/boto/boto3/issues/483>`__)
* feature:Resources: Add get_available_subresources to Resources (`#113 <https://github.com/boto/boto3/issues/113>`__)
1.2.1 - (2015-10-22)
--------------------
1.3.0
=====
* feature:``EC2``: Update resource model to include ``Route`` resources. (`issue 532 <https://github.com/boto/boto3/pull/532>`__)
1.2.5
=====
* bugfix:``S3``: Forward ``extra_args`` when using multipart downloads. (`issue 503 <https://github.com/boto/boto3/pull/503>`__)
1.2.4
=====
* feature:``Session``: Add ``region_name`` property on session. (`issue 414 <https://github.com/boto/boto3/pull/414>`__)
* bugfix:``S3``: Fix issue with hanging downloads. (`issue 471 <https://github.com/boto/boto3/pull/471>`__)
1.2.3
=====
* feature:``CloudWatch``: Add resource model. (`issue 412 <https://github.com/boto/boto3/pull/412>`__)
* feature:``S3``: Add a start_restore() on Object and ObjectSummary resources. (`issue 408 <https://github.com/boto/boto3/pull/408>`__)
* feature:Documentation: Add examples for S3. (`issue 402 <https://github.com/boto/boto3/pull/402>`__)
* bugfix:Collections: Fix regression where filters could not be chained. (`issue 401 <https://github.com/boto/boto3/pull/401>`__)
* bugfix:``S3``: Progress callback will be triggered when rewinding stream. (`issue 395 <https://github.com/boto/boto3/pull/395>`__)
1.2.2
=====
* feature:Dependencies: Relax version constraint of ``futures`` to support version 3.x.
* feature:Resources: Allow ``config`` object to be provided when creating resources (`issue 325 <https://github.com/boto/boto3/pull/325>`__)
* feature:Documentation: Add docstrings for resource collections and waiters (`issue 267 <https://github.com/boto/boto3/pull/267>`__, `issue 261 <https://github.com/boto/boto3/pull/261>`__)
1.2.1
=====
* bugfix:setup.cfg: Fix issue in formatting that broke PyPI distributable
1.2.0 - (2015-10-22)
--------------------
1.2.0
=====
* feature:Docstrings: Add docstrings for resource identifiers, attributes,
references, and subresources.
(`issue 239 <https://github.com/boto/boto3/pull/239>`__)
* feature:``S3``: Add ability to configure host addressing style when making
requests to Amazon S3.
(`botocore issue 673 <https://github.com/boto/botocore/pull/673>`__)
* bugfix:``IAM``: Fix model issue with attached groups, roles, and policies.
(`issue 304 <https://github.com/boto/boto3/pull/304>`__)
* bugfix:``EC2.ServiceResource.create_key_pair``: Fix model issue where
creating key pair does not have a ``key_material`` on ``KeyPair`` resource.
(`issue 290 <https://github.com/boto/boto3/pull/290>`__)
* feature:Docstrings: Add docstrings for resource identifiers, attributes, references, and subresources. (`issue 239 <https://github.com/boto/boto3/pull/239>`__)
* feature:``S3``: Add ability to configure host addressing style when making requests to Amazon S3. (`botocore issue 673 <https://github.com/boto/botocore/pull/673>`__)
* bugfix:``IAM``: Fix model issue with attached groups, roles, and policies. (`issue 304 <https://github.com/boto/boto3/pull/304>`__)
* bugfix:``EC2.ServiceResource.create_key_pair``: Fix model issue where creating key pair does not have a ``key_material`` on ``KeyPair`` resource. (`issue 290 <https://github.com/boto/boto3/pull/290>`__)
1.1.4 - (2015-09-24)
--------------------
1.1.4
=====
* bugfix:Identifier: Make resource identifiers immutable.
(`issue 246 <https://github.com/boto/boto3/pull/246>`__)
* feature: Both S3 Bucket and Object obtain upload_file() and download_file()
(`issue 243 <https://github.com/boto/boto3/pull/243>`__)
* bugfix:Identifier: Make resource identifiers immutable. (`issue 246 <https://github.com/boto/boto3/pull/246>`__)
* feature:S3: Both S3 Bucket and Object obtain upload_file() and download_file() (`issue 243 <https://github.com/boto/boto3/pull/243>`__)
1.1.3 - 2015-09-03
------------------
1.1.3
=====
* feature:``aws storagegateway``: Add support for resource tagging.
* feature: Add support for customizable timeouts.
* feature:timeouts: Add support for customizable timeouts.
1.1.2 - 2015-08-25
------------------
1.1.2
=====
* feature:``session.Session``: Add ``events`` property to access session's
event emitter.
(`issue 204 <https://github.com/boto/boto3/pull/204`__)
* bugfix:``Glacier.Account``: Fix issue with resource model.
(`issue 196 <https://github.com/boto/boto3/pull/196>`__)
* bugfix:``DynamoDB``: Fix misspelling of error class to
``DynamoDBOperationNotSupportedError``.
(`issue 218 <https://github.com/boto/boto3/pull/218>`__)
* feature:``session.Session``: Add ``events`` property to access session's event emitter. (`issue 204 <https://github.com/boto/boto3/pull/204`__)
* bugfix:``Glacier.Account``: Fix issue with resource model. (`issue 196 <https://github.com/boto/boto3/pull/196>`__)
* bugfix:``DynamoDB``: Fix misspelling of error class to ``DynamoDBOperationNotSupportedError``. (`issue 218 <https://github.com/boto/boto3/pull/218>`__)
1.1.1 - 2015-07-23
------------------
1.1.1
=====
* bugfix:``EC2.ServiceResource.create_tags``: Fix issue when creating
multiple tags.
(`issue 160 <https://github.com/boto/boto3/pull/160>`__)
* bugfix:``EC2.ServiceResource.create_tags``: Fix issue when creating multiple tags. (`issue 160 <https://github.com/boto/boto3/pull/160>`__)
1.1.0 - 2015-07-07
------------------
* bugfix:``EC2.Vpc.filter``: Fix issue with clobbering of ``Filtering``
paramter.
(`issue 154 `https://github.com/boto/boto3/pull/154`__)
1.1.0
=====
* bugfix:``EC2.Vpc.filter``: Fix issue with clobbering of ``Filtering`` paramter. (`issue 154 `https://github.com/boto/boto3/pull/154`__)
1.0.1 - 2015-06-24
------------------
* feature: Update documentation
0.0.22
======
* bugfix:``s3.client.upload_file``: Fix double invocation of callbacks when using signature version 4. (`issue 133 <https://github.com/boto/boto3/pull/133>`__)
* bugfix:: ``s3.Bucket.load`` (`issue 128 <https://github.com/boto/boto3/pull/128>`__)
1.0.0 - 2015-06-22
------------------
* feature: Announced GA
0.0.21
======
* bugfix:Installation: Fix regression when installing via older versions of pip on python 2.6. (`issue 132 <https://github.com/boto/boto3/pull/132`__)
0.0.22 - 2015-06-12
-------------------
0.0.20
======
* bugfix:``s3.client.upload_file``: Fix double invocation of callbacks when
using signature version 4.
(`issue 133 <https://github.com/boto/boto3/pull/133>`__)
* bugfix::``s3.Bucket.load``: Add custom load method for Bucket resource.
(`issue 128 <https://github.com/boto/boto3/pull/128>`__)
* feature:ec2: Update resource model. (`issue 129 <https://github.com/boto/boto3/pull/129>`__)
0.0.21 - 2015-06-12
-------------------
0.0.19
======
* bugfix:Installation: Fix regression when installing via older versions of
pip on python 2.6.
(`issue 132 <https://github.com/boto/boto3/pull/132`__)
* breakingchange:Collections: Remove the ``page_count`` and ``limit`` arguments from ``all()``. Undocument support for the two arguments in the ``filter()`` method. (`issue 119 <https://github.com/boto/boto3/pull/119>`__)
* feature:DynamoDB: Add batch writer. (`issue 118 <https://github.com/boto/boto3/pull/118>`__)
0.0.20 - 2015-06-11
-------------------
0.0.18
======
* feature:ec2: Update resource model.
(`issue 129 <https://github.com/boto/boto3/pull/129>`__)
* feature:DynamoDB: Add document level interface for Table resource (`issue 103 <https://github.com/boto/boto3/pull/103>`__)
* feature:DynamoDB: Add ConditionExpression interface for querying and filtering Table resource. (`issue 103 <https://github.com/boto/boto3/pull/103>`__)
* feature:Clients: Add support for passing of ``botocore.client.Config`` object to instantiation of clients.
0.0.19 - 2015-06-04
-------------------
* breakingchange:Collections: Remove the ``page_count`` and ``limit``
arguments from ``all()``. Undocument support for the two arguments in the
``filter()`` method.
(`issue 119 <https://github.com/boto/boto3/pull/119>`__)
* feature:DynamoDB: Add batch writer.
(`issue 118 <https://github.com/boto/boto3/pull/118>`__)
0.0.18 - 2015-06-01
-------------------
* feature:DynamoDB: Add document level interface for Table resource
(`issue 103 <https://github.com/boto/boto3/pull/103>`__)
* feature:DynamoDB: Add ConditionExpression interface for querying and
filtering Table resource.
(`issue 103 <https://github.com/boto/boto3/pull/103>`__)
* feature:Clients: Add support for passing of ``botocore.client.Config`` object
to instantiation of clients.
0.0.17 - 2015-05-07
-------------------
0.0.17
======
* feature:Botocore: Update to Botocore 0.107.0.
* Adopt new data structure model.
0.0.16 - 2015-04-20
-------------------
0.0.16
======
* bugfix:Packaging: Fix release sdist and whl files from 0.0.15.
* feature:Amazon Dynamodb: Add resource model for Amazon DynamoDB.
0.0.15 - 2015-04-13
-------------------
* bugfix:Packaging: Fix an issue with the Amazon S3 ``upload_file`` and
``download_file`` customization.
(`issue 85 <https://github.com/boto/boto3/pull/85>`__)
* bugfix:Resource: Fix an issue with the Amazon S3 ``BucketNofitication``
resource.
0.0.15
======
* bugfix:Packaging: Fix an issue with the Amazon S3 ``upload_file`` and ``download_file`` customization. (`issue 85 <https://github.com/boto/boto3/pull/85>`__)
* bugfix:Resource: Fix an issue with the Amazon S3 ``BucketNofitication`` resource.
* feature:Botocore: Update to Botocore 0.103.0.
* Documentation updates for Amazon EC2 Container Service.
0.0.14 - 2015-04-02
-------------------
0.0.14
======
* feature:Resources: Update to the latest resource models for:
* AWS CloudFormation
* Amazon EC2
* AWS IAM
* feature:Amazon S3: Add an ``upload_file`` and ``download_file``
to S3 clients that transparently handle parallel multipart transfers.
* feature:Resources: Update to the latest resource models for
* feature:Amazon S3: Add an ``upload_file`` and ``download_file`` to S3 clients that transparently handle parallel multipart transfers.
* feature:Botocore: Update to Botocore 0.102.0.
* Add support for Amazon Machine Learning.
* Add support for Amazon Workspaces.
* Update ``requests`` to 2.6.0.
* Update AWS Lambda to the latest API.
* Update Amazon EC2 Container Service to the latest API.
* Update Amazon S3 to the latest API.
* Add ``DBSnapshotCompleted`` support to Amazon RDS waiters.
* Fixes for the REST-JSON protocol.
0.0.13 - 2015-04-02
-------------------
0.0.13
======
* feature:Botocore: Update to Botocore 0.100.0.
* Update AWS CodeDeploy to the latest service API.
* Update Amazon RDS to support the ``describe_certificates``
service operation.
* Update Amazon Elastic Transcoder to support PlayReady DRM.
* Update Amazon EC2 to support D2 instance types.
0.0.12 - 2015-03-26
-------------------
0.0.12
======
* feature:Resources: Add the ability to load resource data from a
``has`` relationship. This saves a call to ``load`` when available,
and otherwise fixes a problem where there was no way to get at
certain resource data.
(`issue 74 <https://github.com/boto/boto3/pull/72>`__,
* feature:Resources: Add the ability to load resource data from a ``has`` relationship. This saves a call to ``load`` when available, and otherwise fixes a problem where there was no way to get at certain resource data. (`issue 74 <https://github.com/boto/boto3/pull/72>`__,
* feature:Botocore: Update to Botocore 0.99.0
* Update service models for amazon Elastic Transcoder, AWS IAM
and AWS OpsWorks to the latest versions.
* Add deprecation warnings for old interface.
0.0.11 - 2015-03-24
-------------------
0.0.11
======
* feature:Resources: Add Amazon EC2 support for ClassicLink actions
and add a delete action to EC2 ``Volume`` resources.
* feature:Resources: Add a ``load`` operation and ``user`` reference
to AWS IAM's ``CurrentUser`` resource.
(`issue 72 <https://github.com/boto/boto3/pull/72>`__,
* feature:Resources: Add resources for AWS IAM managed policies.
(`issue 71 <https://github.com/boto/boto3/pull/71>`__)
* feature:Resources: Add Amazon EC2 support for ClassicLink actions and add a delete action to EC2 ``Volume`` resources.
* feature:Resources: Add a ``load`` operation and ``user`` reference to AWS IAM's ``CurrentUser`` resource. (`issue 72 <https://github.com/boto/boto3/pull/72>`__,
* feature:Resources: Add resources for AWS IAM managed policies. (`issue 71 <https://github.com/boto/boto3/pull/71>`__)
* feature:Botocore: Update to Botocore 0.97.0
* Add new Amazon EC2 waiters.
* Add support for Amazon S3 cross region replication.
* Fix an issue where empty config values could not be specified for
Amazon S3's bucket notifications.
(`botocore issue 495 <https://github.com/boto/botocore/pull/495>`__)
* Update Amazon CloudWatch Logs to the latest API.
* Update Amazon Elastic Transcoder to the latest API.
* Update AWS CloudTrail to the latest API.
* Fix bug where explicitly passed ``profile_name`` will now override
any access and secret keys set in environment variables.
(`botocore issue 486 <https://github.com/boto/botocore/pull/486>`__)
* Add ``endpoint_url`` to ``client.meta``.
* Better error messages for invalid regions.
* Fix creating clients with unicode service name.
0.0.10 - 2015-03-05
-------------------
0.0.10
======
* bugfix:Documentation: Name collisions are now handled at the resource
model layer instead of the factory, meaning that the documentation
now uses the correct names.
(`issue 67 <https://github.com/boto/boto3/pull/67>`__)
* feature:Session: Add a ``region_name`` option when creating a session.
(`issue 69 <https://github.com/boto/boto3/pull/69>`__,
`issue 21 <https://github.com/boto/boto3/issues/21>`__)
* bugfix:Documentation: Name collisions are now handled at the resource model layer instead of the factory, meaning that the documentation now uses the correct names. (`issue 67 <https://github.com/boto/boto3/pull/67>`__)
* feature:Session: Add a ``region_name`` option when creating a session. (`issue 69 <https://github.com/boto/boto3/pull/69>`__, `issue 21 <https://github.com/boto/boto3/issues/21>`__)
* feature:Botocore: Update to Botocore 0.94.0
* Update to the latest Amazon CloudeSearch API.
* Add support for near-realtime data updates and exporting historical
data from Amazon Cognito Sync.
* **Removed** the ability to clone a low-level client. Instead, create
a new client with the same parameters.
* Add support for URL paths in an endpoint URL.
* Multithreading signature fixes.
* Add support for listing hosted zones by name and getting hosted zone
counts from Amazon Route53.
* Add support for tagging to AWS Data Pipeline.
0.0.9 - 2015-02-19
------------------
0.0.9
=====
* feature:Botocore: Update to Botocore 0.92.0
* Add support for the latest Amazon EC2 Container Service API.
* Allow calling AWS STS ``assume_role_with_saml`` without credentials.
* Update to latest Amazon CloudFront API
* Add support for AWS STS regionalized calls by passing both a region
name and an endpoint URL.
(`botocore issue 464 <https://github.com/boto/botocore/pull/464>`__)
* Add support for Amazon Simple Systems Management Service (SSM)
* Fix Amazon S3 auth errors when uploading large files
to the ``eu-central-1`` and ``cn-north-1`` regions.
(`botocore issue 462 <https://github.com/boto/botocore/pull/462>`__)
* Add support for AWS IAM managed policies
* Add support for Amazon ElastiCache tagging
* Add support for Amazon Route53 Domains tagging of domains
0.0.8 - 2015-02-10
------------------
0.0.8
=====
* bugfix:Resources: Fix Amazon S3 resource identifier order.
(`issue 62 <https://github.com/boto/boto3/pull/62>`__)
* bugfix:Resources: Fix collection resource hydration path.
(`issue 61 <https://github.com/boto/boto3/pull/61>`__)
* bugfix:Resources: Re-enable service-level access to all resources,
allowing e.g. ``obj = s3.Object('bucket', 'key')``.
(`issue 60 <https://github.com/boto/boto3/pull/60>`__)
* bugfix:Resources: Fix Amazon S3 resource identifier order. (`issue 62 <https://github.com/boto/boto3/pull/62>`__)
* bugfix:Resources: Fix collection resource hydration path. (`issue 61 <https://github.com/boto/boto3/pull/61>`__)
* bugfix:Resources: Re-enable service-level access to all resources, allowing e.g. ``obj = s3.Object('bucket', 'key')``. (`issue 60 <https://github.com/boto/boto3/pull/60>`__)
* feature:Botocore: Update to Botocore 0.87.0
* Add support for Amazon DynamoDB secondary index scanning.
* Upgrade to ``requests`` 2.5.1.
* Add support for anonymous (unsigned) clients.
(`botocore issue 448 <https://github.com/boto/botocore/pull/448>`__)
0.0.7 - 2015-02-05
------------------
0.0.7
=====
* feature:Resources: Enable support for Amazon Glacier.
* feature:Resources: Support plural references and nested JMESPath
queries for data members when building parameters and identifiers.
(`issue 52 <https://github.com/boto/boto3/pull/52>`__)
* feature:Resources: Update to the latest resource JSON format. This is
a **backward-incompatible** change as not all resources are exposed
at the service level anymore. For example, ``s3.Object('bucket', 'key')``
is now ``s3.Bucket('bucket').Object('key')``.
(`issue 51 <https://github.com/boto/boto3/pull/51>`__)
* feature:Resources: Make ``resource.meta`` a proper object. This allows
you to do things like ``resource.meta.client``. This is a **backward-
incompatible** change.
(`issue 45 <https://github.com/boto/boto3/pull/45>`__)
* feature:Resources: Support plural references and nested JMESPath queries for data members when building parameters and identifiers. (`issue 52 <https://github.com/boto/boto3/pull/52>`__)
* feature:Resources: Update to the latest resource JSON format. This is a **backward-incompatible** change as not all resources are exposed at the service level anymore. For example, ``s3.Object('bucket', 'key')`` is now ``s3.Bucket('bucket').Object('key')``. (`issue 51 <https://github.com/boto/boto3/pull/51>`__)
* feature:Resources: Make ``resource.meta`` a proper object. This allows you to do things like ``resource.meta.client``. This is a **backward- incompatible** change. (`issue 45 <https://github.com/boto/boto3/pull/45>`__)
* feature:Dependency: Update to JMESPath 0.6.1
* feature:Botocore: Update to Botocore 0.86.0
* Add support for AWS CloudHSM
* Add support for Amazon EC2 and Autoscaling ClassicLink
* Add support for Amazon EC2 Container Service (ECS)
* Add support for encryption at rest and CloudHSM to Amazon RDS
* Add support for Amazon DynamoDB online indexing.
* Add support for AWS ImportExport ``get_shipping_label``.
* Add support for Amazon Glacier.
* Add waiters for AWS ElastiCache.
(`botocore issue 443 <https://github.com/boto/botocore/pull/443>`__)
* Fix an issue with Amazon CloudFront waiters.
(`botocore issue 426 <https://github.com/boto/botocore/pull/426>`_)
* Allow binary data to be passed to ``UserData``.
(`botocore issue 416 <https://github.com/boto/botocore/pull/416>`_)
* Fix Amazon EMR endpoints for ``eu-central-1`` and ``cn-north-1``.
(`botocore issue 423 <https://github.com/boto/botocore/pull/423>`__)
* Fix issue with base64 encoding of blob types for Amazon EMR.
(`botocore issue 413 <https://github.com/boto/botocore/pull/413>`__)
0.0.6 - 2014-12-18
------------------
0.0.6
=====
* feature:Amazon SQS: Add ``purge`` action to queue resources
* feature:Waiters: Add documentation for client and resource waiters
(`issue 44 <https://github.com/boto/boto3/pull/44>`__)
* feature:Waiters: Add support for resource waiters
(`issue 43 <https://github.com/boto/boto3/pull/43>`__)
* bugfix:Installation: Remove dependency on the unused ``six`` module
(`issue 42 <https://github.com/boto/boto3/pull/42>`__)
* feature:Waiters: Add documentation for client and resource waiters (`issue 44 <https://github.com/boto/boto3/pull/44>`__)
* feature:Waiters: Add support for resource waiters (`issue 43 <https://github.com/boto/boto3/pull/43>`__)
* bugfix:Installation: Remove dependency on the unused ``six`` module (`issue 42 <https://github.com/boto/boto3/pull/42>`__)
* feature:Botocore: Update to Botocore 0.80.0
* Update Amazon Simple Workflow Service (SWF) to the latest version
* Update AWS Storage Gateway to the latest version
* Update Amazon Elastic MapReduce (EMR) to the latest version
* Update AWS Elastic Transcoder to the latest version
* Enable use of ``page_size`` for clients
(`botocore issue 408 <https://github.com/boto/botocore/pull/408>`__)
0.0.5 - 2014-12-09
------------------
0.0.5
=====
* feature: Add support for batch actions on collections.
(`issue 32 <https://github.com/boto/boto3/pull/32>`__)
* feature: Update to Botocore 0.78.0
* feature:Resources: Add support for batch actions on collections. (`issue 32 <https://github.com/boto/boto3/pull/32>`__)
* feature:Botocore: Update to Botocore 0.78.0
* Add support for Amazon Simple Queue Service purge queue which allows
users to delete the messages in their queue.
* Add AWS OpsWorks support for registering and assigning existing Amazon
EC2 instances and on-premises servers.
* Fix issue with expired signatures when retrying failed requests
(`botocore issue 399 <https://github.com/boto/botocore/pull/399>`__)
* Port Route53 resource ID customizations from AWS CLI to Botocore.
(`botocore issue 398 <https://github.com/boto/botocore/pull/398>`__)
* Fix handling of blob type serialization for JSON services.
(`botocore issue 397 <https://github.com/boto/botocore/pull/397>`__)
0.0.4 - 2014-12-04
------------------
0.0.4
=====
* feature: Update to Botocore 0.77.0
* feature:Botocore: Update to Botocore 0.77.0
* feature:EC2: Update `Amazon EC2 <http
* feature:Resources: Support `belongsTo` resource reference as well as `path` specified in an action's resource definition.
* bugfix:SQS: Fix an issue accessing SQS message bodies (`issue 33 <https://github.com/boto/boto3/issues/33>`__)
* Add support for Kinesis PutRecords operation. It writes multiple
data records from a producer into an Amazon Kinesis stream in a
single call.
* Add support for IAM GetAccountAuthorizationDetails operation. It
retrieves information about all IAM users, groups, and roles in
your account, including their relationships to one another and
their attached policies.
* Add support for updating the comment of a Route53 hosted zone.
* Fix base64 serialization for JSON protocol services.
* Fix issue where certain timestamps were not being accepted as valid input
(`botocore issue 389 <https://github.com/boto/botocore/pull/389>`__)
* feature: Update `Amazon EC2 <http://aws.amazon.com/ec2/>`_ resource model.
* feature: Support `belongsTo` resource reference as well as `path`
specified in an action's resource definition.
* bugfix: Fix an issue accessing SQS message bodies
(`issue 33 <https://github.com/boto/boto3/issues/33>`__)
0.0.3
=====
0.0.3 - 2014-11-26
------------------
* feature:Botocore: Update to Botocore 0.76.0.
* feature: Update to Botocore 0.76.0.
* Add support for using AWS Data Pipeline templates to create
pipelines and bind values to parameters in the pipeline
* Add support to Amazon Elastic Transcoder client for encryption of files
in Amazon S3.
* Fix issue where Amazon S3 requests were not being
resigned correctly when using Signature Version 4.
(`botocore issue 388 <https://github.com/boto/botocore/pull/388>`__)
* Add support for custom response parsing in Botocore clients.
(`botocore issue 387 <https://github.com/boto/botocore/pull/387>`__)
0.0.2
=====
0.0.2 - 2014-11-20
------------------
* feature:Resources: Adds resources for `AWS CloudFormation <http://aws.amazon.com/cloudformation/>`_ and `AWS OpsWorks <http://aws.amazon.com/opsworks/>`_.
* feature:Botocore: Update to Botocore 0.73.0 and JMESPath 0.5.0
* feature:Clients: Adds support for `AWS CodeDeploy <http://aws.amazon.com/codedeploy/>`_, `AWS Config <http://aws.amazon.com/config/>`_, `AWS KMS <http://aws.amazon.com/kms/>`_, `AWS Lambda <http://aws.amazon.com/lambda/>`_.
* feature:UserAgent: Make requests with a customized HTTP user-agent
* Adds resources for
`AWS CloudFormation <http://aws.amazon.com/cloudformation/>`_ and
`AWS OpsWorks <http://aws.amazon.com/opsworks/>`_.
* Update to Botocore 0.73.0 and JMESPath 0.5.0
* Adds support for
`AWS CodeDeploy <http://aws.amazon.com/codedeploy/>`_,
`AWS Config <http://aws.amazon.com/config/>`_,
`AWS KMS <http://aws.amazon.com/kms/>`_,
`AWS Lambda <http://aws.amazon.com/lambda/>`_.
* Make requests with a customized HTTP user-agent
0.0.1 - 2014-11-11
------------------
0.0.1
=====
* feature:Resources: Supports S3, EC2, SQS, SNS, and IAM resources
* feature:Clients: Supports low-level clients for most services
* Initial developer preview refresh of Boto 3
* Supports S3, EC2, SQS, SNS, and IAM resources
* Supports low-level clients for most services

View file

@ -1,4 +1,4 @@
Copyright 2013-2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Copyright 2013-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License"). You
may not use this file except in compliance with the License. A copy of

View file

@ -17,7 +17,7 @@ from boto3.session import Session
__author__ = 'Amazon Web Services'
__version__ = '1.2.2'
__version__ = '1.3.1'
# The default Boto3 session; autoloaded when needed.

View file

@ -13,6 +13,18 @@
import sys
import os
import errno
import socket
from botocore.vendored import six
if six.PY3:
# In python3, socket.error is OSError, which is too general
# for what we want (i.e FileNotFoundError is a subclass of OSError).
# In py3 all the socket related errors are in a newly created
# ConnectionError
SOCKET_ERROR = ConnectionError
else:
SOCKET_ERROR = socket.error
if sys.platform.startswith('win'):

View file

@ -0,0 +1,334 @@
{
"service": {
"has": {
"Alarm": {
"resource": {
"type": "Alarm",
"identifiers": [
{
"target": "Name",
"source": "input"
}
]
}
},
"Metric": {
"resource": {
"type": "Metric",
"identifiers": [
{
"target": "Namespace",
"source": "input"
},
{
"target": "Name",
"source": "input"
}
]
}
}
},
"hasMany": {
"Alarms": {
"request": { "operation": "DescribeAlarms" },
"resource": {
"type": "Alarm",
"identifiers": [
{
"target": "Name",
"source": "response",
"path": "MetricAlarms[].AlarmName"
}
],
"path": "MetricAlarms[]"
}
},
"Metrics": {
"request": { "operation": "ListMetrics" },
"resource": {
"type": "Metric",
"identifiers": [
{
"target": "Namespace",
"source": "response",
"path": "Metrics[].Namespace"
},
{
"target": "Name",
"source": "response",
"path": "Metrics[].MetricName"
}
],
"path": "Metrics[]"
}
}
}
},
"resources": {
"Alarm": {
"identifiers": [
{
"name": "Name",
"memberName": "AlarmName"
}
],
"shape": "MetricAlarm",
"load": {
"request": {
"operation": "DescribeAlarms",
"params": [
{
"target": "AlarmNames[0]",
"source": "identifier",
"name": "Name"
}
]
},
"path": "MetricAlarms[0]"
},
"actions": {
"Delete": {
"request": {
"operation": "DeleteAlarms",
"params": [
{
"target": "AlarmNames[0]",
"source": "identifier",
"name": "Name"
}
]
}
},
"DescribeHistory": {
"request": {
"operation": "DescribeAlarmHistory",
"params": [
{
"target": "AlarmName",
"source": "identifier",
"name": "Name"
}
]
}
},
"DisableActions": {
"request": {
"operation": "DisableAlarmActions",
"params": [
{
"target": "AlarmNames[0]",
"source": "identifier",
"name": "Name"
}
]
}
},
"EnableActions": {
"request": {
"operation": "EnableAlarmActions",
"params": [
{
"target": "AlarmNames[0]",
"source": "identifier",
"name": "Name"
}
]
}
},
"SetState": {
"request": {
"operation": "SetAlarmState",
"params": [
{
"target": "AlarmName",
"source": "identifier",
"name": "Name"
}
]
}
}
},
"batchActions": {
"Delete": {
"request": {
"operation": "DeleteAlarms",
"params": [
{
"target": "AlarmNames[]",
"source": "identifier",
"name": "Name"
}
]
}
},
"DisableActions": {
"request": {
"operation": "DisableAlarmActions",
"params": [
{
"target": "AlarmNames[]",
"source": "identifier",
"name": "Name"
}
]
}
},
"EnableActions": {
"request": {
"operation": "EnableAlarmActions",
"params": [
{
"target": "AlarmNames[]",
"source": "identifier",
"name": "Name"
}
]
}
}
},
"has": {
"Metric": {
"resource": {
"type": "Metric",
"identifiers": [
{
"target": "Namespace",
"source": "data",
"path": "Namespace"
},
{
"target": "Name",
"source": "data",
"path": "MetricName"
}
]
}
}
}
},
"Metric": {
"identifiers": [
{
"name": "Namespace",
"memberName": "Namespace"
},
{
"name": "Name",
"memberName": "MetricName"
}
],
"shape": "Metric",
"load": {
"request": {
"operation": "ListMetrics",
"params": [
{
"target": "MetricName",
"source": "identifier",
"name": "Name"
},
{
"target": "Namespace",
"source": "identifier",
"name": "Namespace"
}
]
},
"path": "Metrics[0]"
},
"actions": {
"GetStatistics": {
"request": {
"operation": "GetMetricStatistics",
"params": [
{
"target": "Namespace",
"source": "identifier",
"name": "Namespace"
},
{
"target": "MetricName",
"source": "identifier",
"name": "Name"
}
]
}
},
"PutAlarm": {
"request": {
"operation": "PutMetricAlarm",
"params": [
{
"target": "Namespace",
"source": "identifier",
"name": "Namespace"
},
{
"target": "MetricName",
"source": "identifier",
"name": "Name"
}
]
},
"resource": {
"type": "Alarm",
"identifiers": [
{
"target": "Name",
"source": "requestParameter",
"path": "AlarmName"
}
]
}
},
"PutData": {
"request": {
"operation": "PutMetricData",
"params": [
{
"target": "Namespace",
"source": "identifier",
"name": "Namespace"
},
{
"target": "MetricData[].MetricName",
"source": "identifier",
"name": "Name"
}
]
}
}
},
"hasMany": {
"Alarms": {
"request": {
"operation": "DescribeAlarmsForMetric",
"params": [
{
"target": "Namespace",
"source": "identifier",
"name": "Namespace"
},
{
"target": "MetricName",
"source": "identifier",
"name": "Name"
}
]
},
"resource": {
"type": "Alarm",
"identifiers": [
{
"target": "Name",
"source": "response",
"path": "MetricAlarms[].AlarmName"
}
],
"path": "MetricAlarms[]"
}
}
}
}
}
}

View file

@ -130,6 +130,20 @@
]
}
}
},
"waiters":{
"Exists": {
"waiterName": "TableExists",
"params": [
{ "target": "TableName", "source": "identifier", "name": "Name" }
]
},
"NotExists": {
"waiterName": "TableNotExists",
"params": [
{ "target": "TableName", "source": "identifier", "name": "Name" }
]
}
}
}
}

View file

@ -491,7 +491,7 @@
"request": {
"operation": "DescribeAddresses",
"params": [
{ "target": "PublicIp", "source": "identifier", "name": "PublicIp" }
{ "target": "PublicIps[]", "source": "identifier", "name": "PublicIp" }
]
},
"path": "Addresses[0]"
@ -1356,6 +1356,16 @@
}
],
"shape": "InstanceNetworkInterfaceAssociation",
"load": {
"request": {
"operation": "DescribeNetworkInterfaces",
"params": [
{ "target": "Filters[0].Name", "source": "string", "value": "association.association-id" },
{ "target": "Filters[0].Values[0]", "source": "identifier", "name": "Id" }
]
},
"path": "NetworkInterfaces[0].Association"
},
"actions": {
"Delete": {
"request": {
@ -1423,6 +1433,46 @@
}
}
},
"Route": {
"identifiers": [
{ "name": "RouteTableId" },
{
"name": "DestinationCidrBlock",
"memberName": "DestinationCidrBlock"
}
],
"shape": "Route",
"actions": {
"Delete": {
"request": {
"operation": "DeleteRoute",
"params": [
{ "target": "RouteTableId", "source": "identifier", "name": "RouteTableId" },
{ "target": "DestinationCidrBlock", "source": "identifier", "name": "DestinationCidrBlock" }
]
}
},
"Replace": {
"request": {
"operation": "ReplaceRoute",
"params": [
{ "target": "RouteTableId", "source": "identifier", "name": "RouteTableId" },
{ "target": "DestinationCidrBlock", "source": "identifier", "name": "DestinationCidrBlock" }
]
}
}
},
"has": {
"RouteTable": {
"resource": {
"type": "RouteTable",
"identifiers": [
{ "target": "Id", "source": "identifier", "name": "RouteTableId" }
]
}
}
}
},
"RouteTable": {
"identifiers": [
{
@ -1461,6 +1511,13 @@
"params": [
{ "target": "RouteTableId", "source": "identifier", "name": "Id" }
]
},
"resource": {
"type": "Route",
"identifiers": [
{ "target": "RouteTableId", "source": "identifier", "name": "Id" },
{ "target": "DestinationCidrBlock", "source": "requestParameter", "path": "DestinationCidrBlock" }
]
}
},
"CreateTags": {
@ -1489,6 +1546,16 @@
}
},
"has": {
"Routes": {
"resource": {
"type": "Route",
"identifiers": [
{ "target": "RouteTableId", "source": "identifier", "name": "Id" },
{ "target": "DestinationCidrBlock", "source": "data", "path": "Routes[].DestinationCidrBlock" }
],
"path": "Routes[]"
}
},
"Vpc": {
"resource": {
"type": "Vpc",
@ -2423,6 +2490,15 @@
}
}
},
"waiters": {
"Exists": {
"waiterName": "VpcPeeringConnectionExists",
"params": [
{ "target": "VpcPeeringConnectionIds[]", "source": "identifier", "name": "Id" }
],
"path": "VpcPeeringConnections[0]"
}
},
"has": {
"AccepterVpc": {
"resource": {

View file

@ -846,6 +846,15 @@
{ "target": "Key", "source": "identifier", "name": "Key" }
]
}
},
"RestoreObject": {
"request": {
"operation": "RestoreObject",
"params": [
{ "target": "Bucket", "source": "identifier", "name": "BucketName" },
{ "target": "Key", "source": "identifier", "name": "Key" }
]
}
}
},
"batchActions": {
@ -1013,6 +1022,15 @@
{ "target": "Key", "source": "identifier", "name": "Key" }
]
}
},
"RestoreObject": {
"request": {
"operation": "RestoreObject",
"params": [
{ "target": "Bucket", "source": "identifier", "name": "BucketName" },
{ "target": "Key", "source": "identifier", "name": "Key" }
]
}
}
},
"batchActions": {

View file

@ -40,6 +40,7 @@ class ActionDocumenter(BaseDocumenter):
'automatically handle the passing in of arguments set '
'from identifiers and some attributes.'),
intro_link='actions_intro')
for action_name in sorted(resource_actions):
action_section = section.add_new_section(action_name)
if action_name in ['load', 'reload'] and self._resource_model.load:
@ -61,7 +62,7 @@ class ActionDocumenter(BaseDocumenter):
)
else:
document_custom_method(
section, action_name, resource_actions[action_name])
action_section, action_name, resource_actions[action_name])
def document_action(section, resource_name, event_emitter, action_model,

View file

@ -11,16 +11,28 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.docs.utils import py_type_name
from botocore.docs.params import ResponseParamsDocumenter
from boto3.docs.utils import get_identifier_description
def document_attribute(section, attr_name, attr_model, include_signature=True):
class ResourceShapeDocumenter(ResponseParamsDocumenter):
EVENT_NAME = 'resource-shape'
def document_attribute(section, service_name, resource_name, attr_name,
event_emitter, attr_model, include_signature=True):
if include_signature:
section.style.start_sphinx_py_attr(attr_name)
attr_type = '*(%s)* ' % py_type_name(attr_model.type_name)
section.write(attr_type)
section.include_doc_string(attr_model.documentation)
# Note that an attribute may have one, may have many, or may have no
# operations that back the resource's shape. So we just set the
# operation_name to the resource name if we ever to hook in and modify
# a particular attribute.
ResourceShapeDocumenter(
service_name=service_name, operation_name=resource_name,
event_emitter=event_emitter).document_params(
section=section,
shape=attr_model)
def document_identifier(section, resource_name, identifier_model,

View file

@ -169,7 +169,10 @@ class ResourceDocumenter(BaseDocumenter):
attribute_list.append(attr_name)
document_attribute(
section=attribute_section,
service_name=self._service_name,
resource_name=self._resource_name,
attr_name=attr_name,
event_emitter=self._resource.meta.client.meta.events,
attr_model=attr_shape
)

View file

@ -10,9 +10,11 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import boto3
from botocore.exceptions import DataNotFoundError
from botocore.docs.paginator import PaginatorDocumenter
from botocore.docs.waiter import WaiterDocumenter
from botocore.docs.service import ServiceDocumenter as BaseServiceDocumenter
from botocore.docs.bcdoc.restdoc import DocumentStructure
from boto3.utils import ServiceContext
@ -21,17 +23,20 @@ from boto3.docs.resource import ResourceDocumenter
from boto3.docs.resource import ServiceResourceDocumenter
class ServiceDocumenter(object):
class ServiceDocumenter(BaseServiceDocumenter):
# The path used to find examples
EXAMPLE_PATH = os.path.join(os.path.dirname(boto3.__file__), 'examples')
def __init__(self, service_name, session):
self._service_name = service_name
self._session = session
self._boto3_session = session
# I know that this is an internal attribute, but the botocore session
# is needed to load the paginator and waiter models.
self._botocore_session = session._session
self._client = self._session.client(service_name)
self._session = session._session
self._client = self._boto3_session.client(service_name)
self._service_resource = None
if self._service_name in self._session.get_available_resources():
self._service_resource = self._session.resource(service_name)
if self._service_name in self._boto3_session.get_available_resources():
self._service_resource = self._boto3_session.resource(service_name)
self.sections = [
'title',
'table-of-contents',
@ -39,7 +44,8 @@ class ServiceDocumenter(object):
'paginators',
'waiters',
'service-resource',
'resources'
'resources',
'examples'
]
def document_service(self):
@ -50,73 +56,74 @@ class ServiceDocumenter(object):
doc_structure = DocumentStructure(
self._service_name, section_names=self.sections,
target='html')
self._document_title(doc_structure.get_section('title'))
self._document_table_of_contents(
doc_structure.get_section('table-of-contents'))
self._document_client(doc_structure.get_section('client'))
self._document_paginators(doc_structure.get_section('paginators'))
self._document_waiters(doc_structure.get_section('waiters'))
self.title(doc_structure.get_section('title'))
self.table_of_contents(doc_structure.get_section('table-of-contents'))
self.client_api(doc_structure.get_section('client'))
self.paginator_api(doc_structure.get_section('paginators'))
self.waiter_api(doc_structure.get_section('waiters'))
if self._service_resource:
self._document_service_resource(
doc_structure.get_section('service-resource'))
self._document_resources(doc_structure.get_section('resources'))
self._document_examples(doc_structure.get_section('examples'))
return doc_structure.flush_structure()
def _document_title(self, section):
section.style.h1(self._client.__class__.__name__)
def _document_table_of_contents(self, section):
section.style.table_of_contents(title='Table of Contents', depth=2)
def _document_client(self, section):
Boto3ClientDocumenter(self._client).document_client(section)
def _document_paginators(self, section):
def client_api(self, section):
examples = None
try:
paginator_model = self._botocore_session.get_paginator_model(
self._service_name)
examples = self.get_examples(self._service_name)
except DataNotFoundError:
return
paginator_documenter = PaginatorDocumenter(
self._client, paginator_model)
paginator_documenter.document_paginators(section)
pass
def _document_waiters(self, section):
if self._client.waiter_names:
service_waiter_model = self._botocore_session.get_waiter_model(
self._service_name)
waiter_documenter = WaiterDocumenter(
self._client, service_waiter_model)
waiter_documenter.document_waiters(section)
Boto3ClientDocumenter(self._client, examples).document_client(section)
def _document_service_resource(self, section):
ServiceResourceDocumenter(
self._service_resource, self._botocore_session).document_resource(
self._service_resource, self._session).document_resource(
section)
def _document_resources(self, section):
temp_identifier_value = 'foo'
loader = self._botocore_session.get_component('data_loader')
loader = self._session.get_component('data_loader')
json_resource_model = loader.load_service_model(
self._service_name, 'resources-1')
service_model = self._service_resource.meta.client.meta.service_model
for resource_name in json_resource_model['resources']:
resource_model = json_resource_model['resources'][resource_name]
resource_cls = self._session.resource_factory.load_from_definition(
resource_name=resource_name,
single_resource_json_definition=resource_model,
service_context=ServiceContext(
service_name=self._service_name,
resource_json_definitions=json_resource_model['resources'],
service_model=service_model,
service_waiter_model=None
resource_cls = self._boto3_session.resource_factory.\
load_from_definition(
resource_name=resource_name,
single_resource_json_definition=resource_model,
service_context=ServiceContext(
service_name=self._service_name,
resource_json_definitions=json_resource_model[
'resources'],
service_model=service_model,
service_waiter_model=None
)
)
)
identifiers = resource_cls.meta.resource_model.identifiers
args = []
for _ in identifiers:
args.append(temp_identifier_value)
resource = resource_cls(*args, client=self._client)
ResourceDocumenter(
resource, self._botocore_session).document_resource(
resource, self._session).document_resource(
section.add_new_section(resource.meta.resource_model.name))
def _get_example_file(self):
return os.path.realpath(
os.path.join(self.EXAMPLE_PATH,
self._service_name + '.rst'))
def _document_examples(self, section):
examples_file = self._get_example_file()
if os.path.isfile(examples_file):
section.style.h2('Examples')
section.style.new_line()
section.write(".. contents::\n :local:\n :depth: 1")
section.style.new_line()
section.style.new_line()
with open(examples_file, 'r') as f:
section.write(f.read())

View file

@ -98,18 +98,20 @@ class BatchWriter(object):
self._flush()
def _flush(self):
items_to_send = self._items_buffer[:self._flush_amount]
self._items_buffer = self._items_buffer[self._flush_amount:]
response = self._client.batch_write_item(
RequestItems={self._table_name: self._items_buffer})
RequestItems={self._table_name: items_to_send})
unprocessed_items = response['UnprocessedItems']
if unprocessed_items and unprocessed_items[self._table_name]:
# Any unprocessed_items are immediately added to the
# next batch we send.
self._items_buffer = unprocessed_items[self._table_name]
self._items_buffer.extend(unprocessed_items[self._table_name])
else:
self._items_buffer = []
logger.debug("Batch write sent %s, unprocessed: %s",
self._flush_amount, len(self._items_buffer))
len(items_to_send), len(self._items_buffer))
def __enter__(self):
return self

34
boto3/ec2/deletetags.py Normal file
View file

@ -0,0 +1,34 @@
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from boto3.resources.action import CustomModeledAction
def inject_delete_tags(event_emitter, **kwargs):
action_model = {
'request': {
'operation': 'DeleteTags',
'params': [{
'target': 'Resources[0]',
'source': 'identifier',
'name': 'Id'
}]
}
}
action = CustomModeledAction(
'delete_tags', action_model, delete_tags, event_emitter)
action.inject(**kwargs)
def delete_tags(self, **kwargs):
kwargs['Resources'] = [self.id]
return self.meta.client.delete_tags(**kwargs)

View file

@ -0,0 +1,37 @@
Generate a signed URL for Amazon CloudFront
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following example shows how to generate a signed URL for Amazon CloudFront.
Note that you will need the ``cryptography`` `library <https://cryptography.io/en/latest/>`__ to follow this example::
import datetime
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import padding
from botocore.signers import CloudFrontSigner
def rsa_signer(message):
with open('path/to/key.pem', 'rb') as key_file:
private_key = serialization.load_pem_private_key(
key_file.read(),
password=None,
backend=default_backend()
)
signer = private_key.signer(padding.PKCS1v15(), hashes.SHA1())
signer.update(message)
return signer.finalize()
key_id = 'AKIAIOSFODNN7EXAMPLE'
url = 'http://d2949o5mkkp72v.cloudfront.net/hello.txt'
expire_date = datetime.datetime(2017, 1, 1)
cloudfront_signer = CloudFrontSigner(key_id, rsa_signer)
# Create a signed url that will be valid until the specfic expiry date
# provided using a canned policy.
signed_url = cloudfront_signer.generate_presigned_url(
url, date_less_than=expire_date)
print(signed_url)

61
boto3/examples/s3.rst Normal file
View file

@ -0,0 +1,61 @@
List objects in an Amazon S3 bucket
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following example shows how to use an Amazon S3 bucket resource to list
the objects in the bucket.
.. code-block:: python
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('my-bucket')
for obj in bucket.objects.all():
print(obj.key)
List top-level common prefixes in Amazon S3 bucket
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
This example shows how to list all of the top-level common prefixes in an
Amazon S3 bucket:
.. code-block:: python
import boto3
client = boto3.client('s3')
paginator = client.get_paginator('list_objects')
result = paginator.paginate(Bucket='my-bucket', Delimiter='/')
for prefix in result.search('CommonPrefixes'):
print(prefix.get('Prefix'))
Restore Glacier objects in an Amazon S3 bucket
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following example shows how to initiate restoration of glacier objects in
an Amazon S3 bucket, determine if a restoration is on-going, and determine if a
restoration is finished.
.. code-block:: python
import boto3
s3 = boto3.resource('s3')
bucket = s3.Bucket('glacier-bucket')
for obj_sum in bucket.objects.all():
obj = s3.Object(obj_sum.bucket_name, obj_sum.key)
if obj.storage_class == 'GLACIER':
# Try to restore the object if the storage class is glacier and
# the object does not have a completed or ongoing restoration
# request.
if obj.restore is None:
print('Submitting restoration request: %s' % obj.key)
obj.restore_object()
# Print out objects whose restoration is on-going
elif 'ongoing-request="true"' in obj.restore:
print('Restoration in-progress: %s' % obj.key)
# Print out objects whose restoration is complete
elif 'ongoing-request="false"' in obj.restore:
print('Restoration complete: %s' % obj.key)

View file

@ -11,29 +11,77 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
class ResourceLoadException(Exception):
# All exceptions in this class should subclass from Boto3Error.
import botocore.exceptions
# All exceptions should subclass from Boto3Error in this module.
class Boto3Error(Exception):
"""Base class for all Boto3 errors."""
class ResourceLoadException(Boto3Error):
pass
class NoVersionFound(Exception):
# NOTE: This doesn't appear to be used anywhere.
# It's probably safe to remove this.
class NoVersionFound(Boto3Error):
pass
class RetriesExceededError(Exception):
# We're subclassing from botocore.exceptions.DataNotFoundError
# to keep backwards compatibility with anyone that was catching
# this low level Botocore error before this exception was
# introduced in boto3.
# Same thing for ResourceNotExistsError below.
class UnknownAPIVersionError(Boto3Error,
botocore.exceptions.DataNotFoundError):
def __init__(self, service_name, bad_api_version,
available_api_versions):
msg = (
"The '%s' resource does not an API version of: %s\n"
"Valid API versions are: %s"
% (service_name, bad_api_version, available_api_versions)
)
# Not using super because we don't want the DataNotFoundError
# to be called, it has a different __init__ signature.
Boto3Error.__init__(self, msg)
class ResourceNotExistsError(Boto3Error,
botocore.exceptions.DataNotFoundError):
"""Raised when you attempt to create a resource that does not exist."""
def __init__(self, service_name, available_services, has_low_level_client):
msg = (
"The '%s' resource does not exist.\n"
"The available resources are:\n"
" - %s\n" % (service_name, '\n - '.join(available_services))
)
if has_low_level_client:
msg += (
"\nConsider using a boto3.client('%s') instead "
"of a resource for '%s'" % (service_name, service_name))
# Not using super because we don't want the DataNotFoundError
# to be called, it has a different __init__ signature.
Boto3Error.__init__(self, msg)
class RetriesExceededError(Boto3Error):
def __init__(self, last_exception, msg='Max Retries Exceeded'):
super(RetriesExceededError, self).__init__(msg)
self.last_exception = last_exception
class S3TransferFailedError(Exception):
class S3TransferFailedError(Boto3Error):
pass
class S3UploadFailedError(Exception):
class S3UploadFailedError(Boto3Error):
pass
class DynamoDBOperationNotSupportedError(Exception):
class DynamoDBOperationNotSupportedError(Boto3Error):
"""Raised for operantions that are not supported for an operand"""
def __init__(self, operation, value):
msg = (
@ -46,7 +94,7 @@ class DynamoDBOperationNotSupportedError(Exception):
# FIXME: Backward compatibility
DynanmoDBOperationNotSupportedError = DynamoDBOperationNotSupportedError
class DynamoDBNeedsConditionError(Exception):
class DynamoDBNeedsConditionError(Boto3Error):
"""Raised when input is not a condition"""
def __init__(self, value):
msg = (
@ -56,5 +104,5 @@ class DynamoDBNeedsConditionError(Exception):
Exception.__init__(self, msg)
class DynamoDBNeedsKeyConditionError(Exception):
class DynamoDBNeedsKeyConditionError(Boto3Error):
pass

View file

@ -17,6 +17,10 @@ from botocore import xform_name
from .params import create_request_parameters
from .response import RawHandler, ResourceHandler
from .model import Action
from boto3.docs.docstring import ActionDocstring
from boto3.utils import inject_attribute
logger = logging.getLogger(__name__)
@ -122,7 +126,7 @@ class BatchAction(ServiceAction):
# the necessary parameters and call the batch operation.
for page in parent.pages():
params = {}
for resource in page:
for index, resource in enumerate(page):
# There is no public interface to get a service name
# or low-level client from a collection, so we get
# these from the first resource in the collection.
@ -132,7 +136,8 @@ class BatchAction(ServiceAction):
client = resource.meta.client
create_request_parameters(
resource, self._action_model.request, params=params)
resource, self._action_model.request,
params=params, index=index)
if not params:
# There are no items, no need to make a call.
@ -196,3 +201,42 @@ class WaiterAction(object):
response = waiter.wait(**params)
logger.debug('Response: %r', response)
class CustomModeledAction(object):
"""A custom, modeled action to inject into a resource."""
def __init__(self, action_name, action_model,
function, event_emitter):
"""
:type action_name: str
:param action_name: The name of the action to inject, e.g. 'delete_tags'
:type action_model: dict
:param action_model: A JSON definition of the action, as if it were
part of the resource model.
:type function: function
:param function: The function to perform when the action is called.
The first argument should be 'self', which will be the resource
the function is to be called on.
:type event_emitter: :py:class:`botocore.hooks.BaseEventHooks`
:param event_emitter: The session event emitter.
"""
self.name = action_name
self.model = action_model
self.function = function
self.emitter = event_emitter
def inject(self, class_attributes, service_context, event_name, **kwargs):
resource_name = event_name.rsplit(".")[-1]
action = Action(self.name, self.model, {})
self.function.__name__ = self.name
self.function.__doc__ = ActionDocstring(
resource_name=resource_name,
event_emitter=self.emitter,
action_model=action,
service_model=service_context.service_model,
include_signature=False
)
inject_attribute(class_attributes, self.name, self.function)

View file

@ -395,11 +395,13 @@ class CollectionFactory(object):
self._load_batch_actions(
attrs, resource_name, collection_model,
service_context.service_model, event_emitter)
# Add the documentation to the collection class's methods
self._load_documented_collection_methods(
attrs=attrs, resource_name=resource_name,
collection_model=collection_model,
service_model=service_context.service_model,
event_emitter=event_emitter)
event_emitter=event_emitter,
base_class=ResourceCollection)
if service_context.service_name == resource_name:
cls_name = '{0}.{1}Collection'.format(
@ -411,12 +413,13 @@ class CollectionFactory(object):
collection_cls = type(str(cls_name), (ResourceCollection,),
attrs)
# Add the documentation to the collection methods
# Add the documentation to the collection manager's methods
self._load_documented_collection_methods(
attrs=attrs, resource_name=resource_name,
collection_model=collection_model,
service_model=service_context.service_model,
event_emitter=event_emitter)
event_emitter=event_emitter,
base_class=CollectionManager)
attrs['_collection_cls'] = collection_cls
cls_name += 'Manager'
@ -434,18 +437,18 @@ class CollectionFactory(object):
resource_name, snake_cased, action_model, collection_model,
service_model, event_emitter)
def _load_documented_collection_methods(factory_self, attrs, resource_name,
collection_model, service_model,
event_emitter):
# The CollectionManger already has these methods defined. However
def _load_documented_collection_methods(
factory_self, attrs, resource_name, collection_model,
service_model, event_emitter, base_class):
# The base class already has these methods defined. However
# the docstrings are generic and not based for a particular service
# or resource. So we override these methods by proxying to the
# CollectionManager's builtin method and adding a docstring
# base class's builtin method and adding a docstring
# that pertains to the resource.
# A collection's all() method.
def all(self):
return CollectionManager.all(self)
return base_class.all(self)
all.__doc__ = docstring.CollectionMethodDocstring(
resource_name=resource_name,
@ -459,7 +462,7 @@ class CollectionFactory(object):
# The collection's filter() method.
def filter(self, **kwargs):
return CollectionManager.filter(self, **kwargs)
return base_class.filter(self, **kwargs)
filter.__doc__ = docstring.CollectionMethodDocstring(
resource_name=resource_name,
@ -473,7 +476,7 @@ class CollectionFactory(object):
# The collection's limit method.
def limit(self, count):
return CollectionManager.limit(self, count)
return base_class.limit(self, count)
limit.__doc__ = docstring.CollectionMethodDocstring(
resource_name=resource_name,
@ -487,7 +490,7 @@ class CollectionFactory(object):
# The collection's page_size method.
def page_size(self, count):
return CollectionManager.page_size(self, count)
return base_class.page_size(self, count)
page_size.__doc__ = docstring.CollectionMethodDocstring(
resource_name=resource_name,

View file

@ -105,7 +105,8 @@ class ResourceFactory(object):
# Attributes that get auto-loaded
self._load_attributes(
attrs=attrs, meta=meta, resource_model=resource_model,
attrs=attrs, meta=meta, resource_name=resource_name,
resource_model=resource_model,
service_context=service_context)
# Collections and their corresponding methods
@ -133,9 +134,10 @@ class ResourceFactory(object):
base_classes = [ServiceResource]
if self._emitter is not None:
self._emitter.emit('creating-resource-class.%s' % cls_name,
class_attributes=attrs,
base_classes=base_classes)
self._emitter.emit(
'creating-resource-class.%s' % cls_name,
class_attributes=attrs, base_classes=base_classes,
service_context=service_context)
return type(str(cls_name), tuple(base_classes), attrs)
def _load_identifiers(self, attrs, meta, resource_model, resource_name):
@ -167,21 +169,39 @@ class ResourceFactory(object):
action_model=action, resource_name=resource_name,
service_context=service_context)
def _load_attributes(self, attrs, meta, resource_model, service_context):
def _load_attributes(self, attrs, meta, resource_name, resource_model,
service_context):
"""
Load resource attributes based on the resource shape. The shape
name is referenced in the resource JSON, but the shape itself
is defined in the Botocore service JSON, hence the need for
access to the ``service_model``.
"""
if resource_model.shape:
shape = service_context.service_model.shape_for(
resource_model.shape)
if not resource_model.shape:
return
attributes = resource_model.get_attributes(shape)
for name, (orig_name, member) in attributes.items():
attrs[name] = self._create_autoload_property(
name=orig_name, snake_cased=name, member_model=member)
shape = service_context.service_model.shape_for(
resource_model.shape)
identifiers = dict((i.member_name, i)
for i in resource_model.identifiers if i.member_name)
attributes = resource_model.get_attributes(shape)
for name, (orig_name, member) in attributes.items():
if name in identifiers:
prop = self._create_identifier_alias(
resource_name=resource_name,
identifier=identifiers[name],
member_model=member,
service_context=service_context
)
else:
prop = self._create_autoload_property(
resource_name=resource_name,
name=orig_name, snake_cased=name,
member_model=member,
service_context=service_context
)
attrs[name] = prop
def _load_collections(self, attrs, resource_model, service_context):
"""
@ -228,6 +248,26 @@ class ResourceFactory(object):
service_context=service_context
)
self._create_available_subresources_command(
attrs, resource_model.subresources)
def _create_available_subresources_command(self, attrs, subresources):
_subresources = [subresource.name for subresource in subresources]
_subresources = sorted(_subresources)
def get_available_subresources(factory_self):
"""
Returns a list of all the available sub-resources for this
Resource.
:returns: A list containing the name of each sub-resource for this
resource
:rtype: list of str
"""
return _subresources
attrs['get_available_subresources'] = get_available_subresources
def _load_waiters(self, attrs, resource_name, resource_model,
service_context):
"""
@ -264,8 +304,28 @@ class ResourceFactory(object):
return property(get_identifier)
def _create_autoload_property(factory_self, name, snake_cased,
member_model):
def _create_identifier_alias(factory_self, resource_name, identifier,
member_model, service_context):
"""
Creates a read-only property that aliases an identifier.
"""
def get_identifier(self):
return getattr(self, '_' + identifier.name, None)
get_identifier.__name__ = str(identifier.member_name)
get_identifier.__doc__ = docstring.AttributeDocstring(
service_name=service_context.service_name,
resource_name=resource_name,
attr_name=identifier.member_name,
event_emitter=factory_self._emitter,
attr_model=member_model,
include_signature=False
)
return property(get_identifier)
def _create_autoload_property(factory_self, resource_name, name,
snake_cased, member_model, service_context):
"""
Creates a new property on the resource to lazy-load its value
via the resource's ``load`` method (if it exists).
@ -286,7 +346,10 @@ class ResourceFactory(object):
property_loader.__name__ = str(snake_cased)
property_loader.__doc__ = docstring.AttributeDocstring(
service_name=service_context.service_name,
resource_name=resource_name,
attr_name=snake_cased,
event_emitter=factory_self._emitter,
attr_model=member_model,
include_signature=False
)

View file

@ -38,9 +38,10 @@ class Identifier(object):
:type name: string
:param name: The name of the identifier
"""
def __init__(self, name):
def __init__(self, name, member_name=None):
#: (``string``) The name of the identifier
self.name = name
self.member_name = member_name
class Action(object):
@ -428,7 +429,10 @@ class ResourceModel(object):
for item in self._definition.get('identifiers', []):
name = self._get_name('identifier', item['name'])
identifiers.append(Identifier(name))
member_name = item.get('memberName', None)
if member_name:
member_name = self._get_name('attribute', member_name)
identifiers.append(Identifier(name, member_name))
return identifiers

View file

@ -48,7 +48,7 @@ def get_data_member(parent, path):
return jmespath.search(path, parent.meta.data)
def create_request_parameters(parent, request_model, params=None):
def create_request_parameters(parent, request_model, params=None, index=None):
"""
Handle request parameters that can be filled in from identifiers,
resource data members or constants.
@ -64,6 +64,8 @@ def create_request_parameters(parent, request_model, params=None):
:type params: dict
:param params: If set, then add to this existing dict. It is both
edited in-place and returned.
:type index: int
:param index: The position of an item within a list
:rtype: dict
:return: Pre-filled parameters to be sent to the request operation.
"""
@ -91,11 +93,12 @@ def create_request_parameters(parent, request_model, params=None):
raise NotImplementedError(
'Unsupported source type: {0}'.format(source))
build_param_structure(params, target, value)
build_param_structure(params, target, value, index)
return params
def build_param_structure(params, target, value):
def build_param_structure(params, target, value, index=None):
"""
This method provides a basic reverse JMESPath implementation that
lets you go from a JMESPath-like string to a possibly deeply nested
@ -125,11 +128,12 @@ def build_param_structure(params, target, value):
result = INDEX_RE.search(part)
if result:
if result.group(1):
# We have an explicit index
index = int(result.group(1))
# Strip index off part name
part = part[:-len(str(index) + '[]')]
if result.group(1) == '*':
part = part[:-3]
else:
# We have an explicit index
index = int(result.group(1))
part = part[:-len(str(index) + '[]')]
else:
# Index will be set after we know the proper part
# name and that it's a list instance.

View file

@ -34,6 +34,10 @@ def inject_object_methods(class_attributes, **kwargs):
class_attributes, 'download_file', object_download_file)
def inject_object_summary_methods(class_attributes, **kwargs):
utils.inject_attribute(class_attributes, 'load', object_summary_load)
def bucket_load(self, *args, **kwargs):
"""Calls s3.Client.list_buckets() to update the attributes of the Bucket resource."""
# The docstring above is phrased this way to match what the autogenerated
@ -51,6 +55,15 @@ def bucket_load(self, *args, **kwargs):
'ListBuckets')
def object_summary_load(self, *args, **kwargs):
"""Calls s3.Client.head_object to update the attributes of the ObjectSummary resource."""
response = self.meta.client.head_object(
Bucket=self.bucket_name, Key=self.key)
if 'ContentLength' in response:
response['Size'] = response.pop('ContentLength')
self.meta.data = response
def upload_file(self, Filename, Bucket, Key, ExtraArgs=None,
Callback=None, Config=None):
"""Upload a file to an S3 object.

View file

@ -72,7 +72,7 @@ client operation. Here are a few examples using ``upload_file``::
extra_args={'ContentType': "application/json"})
The ``S3Transfer`` clas also supports progress callbacks so you can
The ``S3Transfer`` class also supports progress callbacks so you can
provide transfer progress to users. Both the ``upload_file`` and
``download_file`` methods take an optional ``callback`` parameter.
Here's an example of how to print a simple progress percentage
@ -94,7 +94,7 @@ to the user:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (self._filename, self._seen_so_far,
"\\r%s %s / %s (%.2f%%)" % (self._filename, self._seen_so_far,
self._size, percentage))
sys.stdout.flush()
@ -148,6 +148,10 @@ queue = six.moves.queue
MB = 1024 * 1024
SHUTDOWN_SENTINEL = object()
S3_RETRYABLE_ERRORS = (
socket.timeout, boto3.compat.SOCKET_ERROR,
ReadTimeoutError, IncompleteReadError
)
def random_file_extension(num_digits=8):
@ -175,7 +179,7 @@ class ReadFileChunk(object):
callback=None, enable_callback=True):
"""
Given a file object shown below:
Given a file object shown below::
|___________________________________________________|
0 | | full_file_size
@ -268,6 +272,9 @@ class ReadFileChunk(object):
def seek(self, where):
self._fileobj.seek(self._start_byte + where)
if self._callback is not None and self._callback_enabled:
# To also rewind the callback() for an accurate progress report
self._callback(where - self._amount_read)
self._amount_read = where
def close(self):
@ -460,7 +467,7 @@ class MultipartDownloader(object):
# 1 thread for the future that manages IO writes.
download_parts_handler = functools.partial(
self._download_file_as_future,
bucket, key, filename, object_size, callback)
bucket, key, filename, object_size, extra_args, callback)
parts_future = controller.submit(download_parts_handler)
io_writes_handler = functools.partial(
@ -476,13 +483,13 @@ class MultipartDownloader(object):
future.result()
def _download_file_as_future(self, bucket, key, filename, object_size,
callback):
extra_args, callback):
part_size = self._config.multipart_chunksize
num_parts = int(math.ceil(object_size / float(part_size)))
max_workers = self._config.max_concurrency
download_partial = functools.partial(
self._download_range, bucket, key, filename,
part_size, num_parts, callback)
part_size, num_parts, extra_args, callback)
try:
with self._executor_cls(max_workers=max_workers) as executor:
list(executor.map(download_partial, range(num_parts)))
@ -499,7 +506,8 @@ class MultipartDownloader(object):
return range_param
def _download_range(self, bucket, key, filename,
part_size, num_parts, callback, part_index):
part_size, num_parts,
extra_args, callback, part_index):
try:
range_param = self._calculate_range_param(
part_size, part_index, num_parts)
@ -510,7 +518,8 @@ class MultipartDownloader(object):
try:
logger.debug("Making get_object call.")
response = self._client.get_object(
Bucket=bucket, Key=key, Range=range_param)
Bucket=bucket, Key=key, Range=range_param,
**extra_args)
streaming_body = StreamReaderProgress(
response['Body'], callback)
buffer_size = 1024 * 16
@ -520,8 +529,7 @@ class MultipartDownloader(object):
self._ioqueue.put((current_index, chunk))
current_index += len(chunk)
return
except (socket.timeout, socket.error,
ReadTimeoutError, IncompleteReadError) as e:
except S3_RETRYABLE_ERRORS as e:
logger.debug("Retrying exception caught (%s), "
"retrying request, (attempt %s / %s)", e, i,
max_attempts, exc_info=True)
@ -532,6 +540,15 @@ class MultipartDownloader(object):
logger.debug("EXITING _download_range for part: %s", part_index)
def _perform_io_writes(self, filename):
try:
self._loop_on_io_writes(filename)
except Exception as e:
logger.debug("Caught exception in IO thread: %s",
e, exc_info=True)
self._ioqueue.trigger_shutdown()
raise
def _loop_on_io_writes(self, filename):
with self._os.open(filename, 'wb') as f:
while True:
task = self._ioqueue.get()
@ -540,15 +557,9 @@ class MultipartDownloader(object):
"shutting down IO handler.")
return
else:
try:
offset, data = task
f.seek(offset)
f.write(data)
except Exception as e:
logger.debug("Caught exception in IO thread: %s",
e, exc_info=True)
self._ioqueue.trigger_shutdown()
raise
offset, data = task
f.seek(offset)
f.write(data)
class TransferConfig(object):
@ -696,10 +707,7 @@ class S3Transfer(object):
try:
return self._do_get_object(bucket, key, filename,
extra_args, callback)
except (socket.timeout, socket.error,
ReadTimeoutError, IncompleteReadError) as e:
# TODO: we need a way to reset the callback if the
# download failed.
except S3_RETRYABLE_ERRORS as e:
logger.debug("Retrying exception caught (%s), "
"retrying request, (attempt %s / %s)", e, i,
max_attempts, exc_info=True)

View file

@ -16,9 +16,11 @@ import os
import botocore.session
from botocore.client import Config
from botocore.exceptions import DataNotFoundError, UnknownServiceError
import boto3
import boto3.utils
from boto3.exceptions import ResourceNotExistsError, UnknownAPIVersionError
from .resources.factory import ResourceFactory
@ -89,6 +91,13 @@ class Session(object):
"""
return self._session.profile or 'default'
@property
def region_name(self):
"""
The **read-only** region name.
"""
return self._session.get_config_variable('region')
@property
def events(self):
"""
@ -102,7 +111,7 @@ class Session(object):
"""
self._loader = self._session.get_component('data_loader')
self._loader.search_paths.append(
os.path.join(os.path.dirname(__file__), 'data'))
os.path.join(os.path.dirname(__file__), 'data'))
def get_available_services(self):
"""
@ -124,6 +133,47 @@ class Session(object):
"""
return self._loader.list_available_services(type_name='resources-1')
def get_available_partitions(self):
"""Lists the available partitions
:rtype: list
:return: Returns a list of partition names (e.g., ["aws", "aws-cn"])
"""
return self._session.get_available_partitions()
def get_available_regions(self, service_name, partition_name='aws',
allow_non_regional=False):
"""Lists the region and endpoint names of a particular partition.
:type service_name: string
:param service_name: Name of a service to list endpoint for (e.g., s3).
:type partition_name: string
:param partition_name: Name of the partition to limit endpoints to.
(e.g., aws for the public AWS endpoints, aws-cn for AWS China
endpoints, aws-us-gov for AWS GovCloud (US) Endpoints, etc.)
:type allow_non_regional: bool
:param allow_non_regional: Set to True to include endpoints that are
not regional endpoints (e.g., s3-external-1,
fips-us-gov-west-1, etc).
:return: Returns a list of endpoint names (e.g., ["us-east-1"]).
"""
return self._session.get_available_regions(
service_name=service_name, partition_name=partition_name,
allow_non_regional=allow_non_regional)
def get_credentials(self):
"""
Return the :class:`botocore.credential.Credential` object
associated with this session. If the credentials have not
yet been loaded, this will attempt to load them. If they
have already been loaded, this will return the cached
credentials.
"""
return self._session.get_credentials()
def client(self, service_name, region_name=None, api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
@ -200,9 +250,9 @@ class Session(object):
aws_session_token=aws_session_token, config=config)
def resource(self, service_name, region_name=None, api_version=None,
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, config=None):
use_ssl=True, verify=None, endpoint_url=None,
aws_access_key_id=None, aws_secret_access_key=None,
aws_session_token=None, config=None):
"""
Create a resource service client by name.
@ -268,11 +318,40 @@ class Session(object):
:return: Subclass of :py:class:`~boto3.resources.base.ServiceResource`
"""
try:
resource_model = self._loader.load_service_model(
service_name, 'resources-1', api_version)
except UnknownServiceError as e:
available = self.get_available_resources()
has_low_level_client = (
service_name in self.get_available_services())
raise ResourceNotExistsError(service_name, available,
has_low_level_client)
except DataNotFoundError as e:
# This is because we've provided an invalid API version.
available_api_versions = self._loader.list_api_versions(
service_name, 'resources-1')
raise UnknownAPIVersionError(
service_name, api_version, ', '.join(available_api_versions))
if api_version is None:
# Even though botocore's load_service_model() can handle
# using the latest api_version if not provided, we need
# to track this api_version in boto3 in order to ensure
# we're pairing a resource model with a client model
# of the same API version. It's possible for the latest
# API version of a resource model in boto3 to not be
# the same API version as a service model in botocore.
# So we need to look up the api_version if one is not
# provided to ensure we load the same API version of the
# client.
#
# Note: This is relying on the fact that
# loader.load_service_model(..., api_version=None)
# and loader.determine_latest_version(..., 'resources-1')
# both load the same api version of the file.
api_version = self._loader.determine_latest_version(
service_name, 'resources-1')
resource_model = self._loader.load_service_model(
service_name, 'resources-1', api_version)
# Creating a new resource instance requires the low-level client
# and service model, the resource version and resource JSON data.
@ -325,6 +404,10 @@ class Session(object):
'creating-resource-class.s3.Object',
boto3.utils.lazy_call(
'boto3.s3.inject.inject_object_methods'))
self._session.register(
'creating-resource-class.s3.ObjectSummary',
boto3.utils.lazy_call(
'boto3.s3.inject.inject_object_summary_methods'))
# DynamoDb customizations
self._session.register(
@ -343,3 +426,9 @@ class Session(object):
'creating-resource-class.ec2.ServiceResource',
boto3.utils.lazy_call(
'boto3.ec2.createtags.inject_create_tags'))
self._session.register(
'creating-resource-class.ec2.Instance',
boto3.utils.lazy_call(
'boto3.ec2.deletetags.inject_delete_tags',
event_emitter=self.events))

View file

@ -53,11 +53,15 @@ def import_module(name):
return sys.modules[name]
def lazy_call(full_name):
def lazy_call(full_name, **kwargs):
parent_kwargs = kwargs
def _handler(**kwargs):
module, function_name = full_name.rsplit('.', 1)
module = import_module(module)
kwargs.update(parent_kwargs)
return getattr(module, function_name)(**kwargs)
return _handler

View file

@ -0,0 +1,6 @@
{%- extends "!layout.html" %}
{%- block breadcrumbs %}
{{ super() }}
<!--REGION_DISCLAIMER_DO_NOT_REMOVE-->
{%- endblock %}

View file

@ -1,13 +1,16 @@
.. _guide_configuration:
Configuration
=============
Credentials
===========
Boto can be configured in multiple ways. Regardless of the source or sources
that you choose, you **must** have AWS credentials and a region set in
order to make requests.
Interactive Configuration
-------------------------
If you have the `AWS CLI <http://aws.amazon.com/cli/>`_, then you can use
its interactive ``configure`` command to set up your credentials and
default region::
@ -17,101 +20,430 @@ default region::
Follow the prompts and it will generate configuration files in the
correct locations for you.
Configuration Sources
---------------------
There are multiple sources from which configuration data can be loaded.
The general order in which they are checked is as follows:
Configuring Credentials
-----------------------
1. Method parameters
2. Environment variables
3. Configuration files
4. EC2 Instance metadata
There are two types of configuration data in boto3: credentials and
non-credentials. Credentials include items such as ``aws_access_key_id``,
``aws_secret_access_key``, and ``aws_session_token``. Non-credential
configuration includes items such as which ``region`` to use or which
addressing style to use for Amazon S3. The distinction between
credentials and non-credentials configuration is important because
the lookup process is slightly different. Boto3 will look in several
additional locations when searching for credentials that do not apply
when searching for non-credential configuration.
If a configuration value is set in multiple places, then the first
will be used according the the order above. For example, if I have
set a default region in both my environment variables and configuration
file, then the environment variable is used.
The mechanism in which boto3 looks for credentials is to search through
a list of possible locations and stop as soon as it finds credentials.
The order in which Boto3 searches for credentials is:
#. Passing credentials as parameters in the ``boto.client()`` method
#. Passing credentials as parameters when creating a ``Session`` object
#. Environment variables
#. Shared credential file (``~/.aws/credentials``)
#. AWS config file (``~/.aws/config``)
#. Assume Role provider
#. Boto2 config file (``/etc/boto.cfg`` and ``~/.boto``)
#. Instance metadata service on an Amazon EC2 instance that has an
IAM role configured.
Each of those locations is discussed in more detail below.
Available Options
-----------------
The available options for various configuration sources are listed below.
Method Parameters
~~~~~~~~~~~~~~~~~
When creating a session, client, or resource you can pass in credential
and configuration options::
from boto3.session import Session
The first option for providing credentials to boto3 is passing them
as parameters when creating clients or when creating a ``Session``.
For example::
session = Session(aws_access_key_id='<YOUR ACCESS KEY ID>',
aws_secret_access_key='<YOUR SECRET KEY>',
region_name='<REGION NAME>')
import boto3
client = boto3.client(
's3',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKN,
)
ec2 = session.resource('ec2')
ec2_us_west_2 = session.resource('ec2', region_name='us-west-2')
# Or via the Session
session = boto3.Session(
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKN,
)
# List all of my EC2 instances in my default region.
print('Default region:')
for instance in ec2.instances.all():
print(instance.id)
where ``ACCESS_KEY``, ``SECRET_KEY`` and ``SESSION_TOKEN`` are variables
that contain your access key, secret key, and optional session token.
Note that the examples above do not have hard coded credentials. We
do not recommend hard coding credentials in your source code. For example::
# List all of my EC2 instances in us-west-2.
print('US West 2 region:')
for instance in ec2_us_west_2.instances.all():
print(instance.id)
# Do not hard code credentials
client = boto3.client(
's3',
# Hard coded strings as credentials, not recommended.
aws_access_key_id='AKIAIO5FODNN7EXAMPLE',
aws_secret_access_key='ABCDEF+c2L7yXeGvUyrPgYsDnWRRC1AYEXAMPLE'
)
Valid uses cases for providing credentials to the ``client()`` method
and ``Session`` objects include:
* Retrieving temporary credentials using AWS STS (such as
``sts.get_session_token()``).
* Loading credentials from some external location, e.g the OS keychain.
For a list of all options, look at the :py:class:`~boto3.session.Session`
documentation.
Environment Variables
~~~~~~~~~~~~~~~~~~~~~
Boto3 will check these environment variables for credentials:
``AWS_ACCESS_KEY_ID``
The access key for your AWS account.
``AWS_SECRET_ACCESS_KEY``
The secret key for your AWS account.
``AWS_SESSION_TOKEN``
The session key for your AWS account. This is only needed when
you are using temporary credentials. The ``AWS_SECURITY_TOKEN``
environment variable can also be used, but is only supported
for backwards compatibility purposes. ``AWS_SESSION_TOKEN`` is
supported by multiple AWS SDKs besides python.
Shared Credentials File
~~~~~~~~~~~~~~~~~~~~~~~
The shared credentials file has a default location of
``~/.aws/credentials``. You can change the location of the shared
credentials file by setting the ``AWS_SHARED_CREDENTIALS_FILE``
environment variable.
This file is an INI formatted file with section names
corresponding to profiles. With each section, the three configuration
variables shown above can be specified: ``aws_access_key_id``,
``aws_secret_access_key``, ``aws_session_token``. **These are the only
supported values in the shared credential file.**
Below is an minimal example of the shared credentials file::
[default]
aws_access_key_id=foo
aws_secret_access_key=bar
aws_session_token=baz
The shared credentials file also supports the concept of ``profiles``.
Profiles represent logical groups of configuration. The shared
credential file can have multiple profiles defined::
[default]
aws_access_key_id=foo
aws_secret_access_key=bar
[dev]
aws_access_key_id=foo2
aws_secret_access_key=bar2
[prod]
aws_access_key_id=foo3
aws_secret_access_key=bar3
You can then specify a profile name via the ``AWS_PROFILE`` environment
variable or the ``profile_name`` argument when creating a Session::
session = boto3.Session(profile_name='dev')
# Any clients created from this session will use credentials
# from the [dev] section of ~/.aws/credentials.
dev_s3_client = session.client('s3')
AWS Config File
~~~~~~~~~~~~~~~
Boto3 can also load credentials from ``~/.aws/config``. You can change
this default location by setting the ``AWS_CONFIG_FILE`` environment variable.
The config file is an INI format, with the same keys supported by the
shared credentials file. The only difference is that profile sections
**must** have the format of ``[profile profile-name]``, except for
the default profile. For example::
# Example ~/.aws/config file.
[default]
aws_access_key_id=foo
aws_secret_access_key=bar
[profile dev]
aws_access_key_id=foo2
aws_secret_access_key=bar2
[profile prod]
aws_access_key_id=foo3
aws_secret_access_key=bar3
The reason that section names must start with ``profile`` in the
``~/.aws/config`` file is because there are other sections in this file
that are permitted that aren't profile configurations.
Assume Role Provider
~~~~~~~~~~~~~~~~~~~~
.. note::
This is a different set of credentials configuration than using
IAM roles for EC2 instances, which is discussed in a section
below.
Within the ``~/.aws/config`` file, you can also configure a profile
to indicate that boto3 should assume a role. When you do this,
boto3 will automatically make the corresponding ``AssumeRole`` calls
to AWS STS on your behalf. It will handle in memory caching as well as
refreshing credentials as needed.
You can specify the following configuration values for configuring an
IAM role in boto3:
* ``role_arn`` - The ARN of the role you want to assume.
* ``source_profile`` - The boto3 profile that contains credentials we should
use for the initial ``AssumeRole`` call.
* ``external_id`` - A unique identifier that is used by third parties to assume
a role in their customers' accounts. This maps to the ``ExternalId``
parameter in the ``AssumeRole`` operation. This is an optional parameter.
* ``mfa_serial`` - The identification number of the MFA device to use when
assuming a role. This is an optional parameter. Specify this value if the
trust policy of the role being assumed includes a condition that requires MFA
authentication. The value is either the serial number for a hardware device
(such as GAHT12345678) or an Amazon Resource Name (ARN) for a virtual device
(such as arn:aws:iam::123456789012:mfa/user).
* ``role_session_name`` - The name applied to this assume-role session. This
value affects the assumed role user ARN (such as
arn:aws:sts::123456789012:assumed-role/role_name/role_session_name). This
maps to the ``RoleSessionName`` parameter in the ``AssumeRole`` operation.
This is an optional parameter. If you do not provide this value, a
session name will be automatically generated.
If you do not have MFA authentication required, then you only need to specify a
``role_arn`` and a ``source_profile``.
When you specify a profile that has IAM role configuration, boto3 will make an
``AssumeRole`` call to retrieve temporary credentials. Subsequent boto3 API
calls will use the cached temporary credentials until they expire, in which
case boto3 will automatically refresh credentials. boto3 does not write these
temporary credentials to disk. This means that temporary credentials from the
``AssumeRole`` calls are only cached in memory within a single ``Session``.
All clients created from that session will share the same temporary
credentials.
If you specify an ``mfa_serial``, then the first time an ``AssumeRole`` call is
made, you will be prompted to enter the MFA code. **Your code will block until
you enter your MFA code.** You'll need to keep this in mind if you have an
``mfa_serial`` configured but would like to use boto3 in some automated script.
Below is an example configuration for the minimal amount of configuration
needed to configure an assume role profile::
# In ~/.aws/credentials:
[development]
aws_access_key_id=foo
aws_access_key_id=bar
# In ~/.aws/config
[profile crossaccount]
role_arn=arn:aws:iam:...
source_profile=development
See `Using IAM Roles`_ for general information on IAM roles.
Boto2 Config
~~~~~~~~~~~~
Boto3 will attempt to load credentials from the Boto2 config file.
It will check ``/etc/boto.cfg`` and ``~/.boto``. Note that
*only* the ``[Credentials]`` section of the boto config file is used.
All other configuration data in the boto config file is ignored.
Example::
# Example ~/.boto file
[Credentials]
aws_access_key_id = foo
aws_secret_access_key = bar
This credential provider is primarily for backwards compatibility purposes
with boto2.
IAM Role
~~~~~~~~
If you are running on Amazon EC2 and no credentials have been found
by any of the providers above, boto3 will try to load credentials
from the instance metadata service. In order to take advantage of this
feature, you must have specified an IAM role to use when you launched
your EC2 instance. For more information on how to configure IAM roles
on EC2 instances, see the `IAM Roles for Amazon EC2`_ guide.
Note that if you've launched an EC2 instance with an IAM role configured,
there's no explicit configuration you need to set in boto3 to use these
credentials. Boto3 will automatically use IAM role credentials if it does
not find credentials in any of the other places listed above.
Best Practices for Configuring Credentials
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you're running on an EC2 instance, use AWS IAM roles. See the
`IAM Roles for Amazon EC2`_ guide for more information on how to set this
up.
If you want to interoperate with multiple AWS SDKs (e.g Java, Javascript,
Ruby, PHP, .NET, AWS CLI, Go, C++), use the shared credentials file
(``~/.aws/credentials``). By using the shared credentials file, you can use a
single file for credentials that will work in all the AWS SDKs.
Configuration
=============
In addition to credentials, you can also configure non-credential values. In
general, boto3 follows the same approach used in credential lookup: try various
locations until a value is found. Boto3 uses these sources for configuration:
* Explicitly passed as the ``config`` paramter when creating a client.
* Environment variables
* The ``~/.aws/config`` file.
Environment Variable Configuration
----------------------------------
``AWS_ACCESS_KEY_ID``
The access key for your AWS account.
``AWS_SECRET_ACCESS_KEY``
The secret key for your AWS account.
``AWS_SESSION_TOKEN``
The session key for your AWS account. This is only needed when
you are using temporary credentials. The ``AWS_SECURITY_TOKEN``
environment variable can also be used, but is only supported
for backwards compatibility purposes. ``AWS_SESSION_TOKEN`` is
supported by multiple AWS SDKs besides python.
``AWS_DEFAULT_REGION``
The default region to use, e.g. `us-east-1`.
The default region to use, e.g. ``us-west-2``, ``us-west-2``, etc.
``AWS_PROFILE``
The default credential and configuration profile to use, if any.
The default profile to use, if any. If no value is specified, boto3
will attempt to seach the shared credentials file and the config file
for the ``default`` profile.
``AWS_CONFIG_FILE``
The location of the config file used by boto3. By default this
value is ``~/.aws/config``. You only need to set this variable if
you want to change this location.
``AWS_SHARED_CREDENTIALS_FILE``
The location of the shared credentials file. By default this value
is ``~/.aws/credentials``. You only need to set this variable if
you want to change this location.
``AWS_CA_BUNDLE``
The path to a custom certificate bundle to use when establishing
SSL/TLS connections. Boto3 includes a bundled CA bundle it will
use by default, but you can set this environment variable to use
a different CA bundle.
``AWS_METADATA_SERVICE_TIMEOUT``
The number of seconds before a connection to the instance metadata
service should time out. When attempting to retrieve credentials
on an EC2 instance that has been configured with an IAM role,
a connection to the instance metadata service will time out after
1 second by default. If you know you are running on an EC2 instance
with an IAM role configured, you can increase this value if needed.
``AWS_METADATA_SERVICE_NUM_ATTEMPTS``
When attempting to retrieve credentials on an EC2 instance that has
been configured with an IAM role, boto3 will only make one attempt
to retrieve credentials from the instance metadata service before
giving up. If you know your code will be running on an EC2 instance,
you can increase this value to make boto3 retry multiple times
before giving up.
``AWS_DATA_PATH``
A list of **additional** directories to check when loading botocore data.
You typically do not need to set this value. There's two built in search
paths: ``<botocoreroot>/data/`` and ``~/.aws/models``. Setting this
environment variable indicates additional directories to first check before
falling back to the built in search paths. Multiple entries should be
separated with the ``os.pathsep`` character which is ``:`` on linux and
``;`` on windows.
Configuration Files
~~~~~~~~~~~~~~~~~~~
There are two configuration files that Boto checks. The first is the
shared credential file, which holds only credentials and is shared between
various SDKs and tools like Boto and the AWS CLI. By default, this
file is located at ``~/.aws/credentials``::
Configuration File
~~~~~~~~~~~~~~~~~~
[default]
# The access key for your AWS account
aws_access_key_id=<YOUR ACCESS KEY ID>
Boto3 will also search the ``~/.aws/config`` file when looking for
configuration values. You can change the location of this file by
setting the ``AWS_CONFIG_FILE`` environment variable.
# The secret key for your AWS account
aws_secret_access_key=<YOUR SECRET KEY>
This file is an INI formatted file that contains at least one
section: ``[default]``. You can create multiple profiles (logical
groups of configuration) by creating sections named ``[profile profile-name]``.
If your profile name has spaces, you'll need to surround this value in quotes:
``[profile "my profile name"]``. Below are all the config variables supported
in the ``~/.aws/config`` file:
Credentials can also be set for individual profiles::
``region``
The default region to use, e.g. ``us-west-2``, ``us-west-2``, etc.
``aws_access_key_id``
The access key to use.
``aws_secret_access_key``
The secret access key to use.
``aws_session_token``
The session token to use. This is typically only needed when using
temporary credentials. Note ``aws_security_token`` is supported for
backwards compatibility.
``ca_bundle``
The CA bundle to use. See the docs above on ``AWS_CA_BUNDLE`` for
more information.
``metadata_service_timeout``
The number of seconds before timing out when retrieving data from the
instance metadata service. See the docs above on
``AWS_METADATA_SERVICE_TIMEOUT`` for more information.
``metadata_service_num_attempts``
The number of attempts to make before giving up when retrieving data from
the instance metadata service. See the docs above on
``AWS_METADATA_SERVICE_NUM_ATTEMPTS`` for more information.
``role_arn``
The ARN of the role you want to assume.
``source_profile``
The profile name that contains credentials we should use for the
initial ``AssumeRole`` call.
``external_id``
Unique identifier to pass when making ``AssumeRole`` calls.
``mfa_serial``
Serial number of ARN of an MFA device to use when assuming a role.
``role_session_name``
The role name to use when assuming a role. If this value is not
provided, a session name will be automatically generated.
``s3``
Set S3 specific configuration data. You typically will not need to
set these values. Boto3 will automatically switching signature versions
and addressing styles if necessary.
This is a nested configuration value. See the Nested Configuration section
for more information on the format. The sub config keys supported for
``s3`` are:
[dev-profile]
# The access key for your dev-profile account
aws_access_key_id=<YOUR ACCESS KEY ID>
* ``addressing_style``: Specifies which addressing style to use.
This controls if the bucket name is in the hostname or part of
the URL. Value values are: ``path``, ``virtual``,
and ``auto``.
* ``signature_version``: Which AWS signature version to use when
signing requests. Value values are: ``s3`` and ``s3v4``.
# The secret key for your dev-profile account
aws_secret_access_key=<YOUR SECRET KEY>
The second configuration file stores all settings which are not
credentials. Its default location is ``~/.aws/config``::
[default]
# The default region when making requests
region=<REGION NAME>
It also supports profiles, but these are prefixed with the word
``profile`` because this file supports sections other than profiles::
[profile dev-profile]
# The default region when using the dev-profile account
region=<REGION NAME>
.. _IAM Roles for Amazon EC2: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html
.. _Using IAM Roles: http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use.html

View file

@ -138,10 +138,18 @@ Expected Output::
Updating Item
-------------
Using the retrieved item, you can update attributes of the item in the table::
You can then update attributes of the item in the table::
item['age'] = 26
table.put_item(Item=item)
table.update_item(
Key={
'username': 'janedoe',
'last_name': 'Doe'
},
UpdateExpression='SET age = :val1',
ExpressionAttributeValues={
':val1': 26
}
)
Then if you retrieve the item again, it will be updated appropriately::

View file

@ -20,6 +20,7 @@ General Feature Guides
resources
collections
clients
paginators
session
configuration
events
@ -31,3 +32,4 @@ Service Feature Guides
.. toctree::
dynamodb
s3

View file

@ -101,7 +101,7 @@ Getting and setting canned access control values in Boto 3 operates on an ``ACL`
# Boto 3
bucket.Acl().put(ACL='public-read')
obj.put(ACL='public-read')
obj.Acl().put(ACL='public-read')
It's also possible to retrieve the policy grant information::
@ -113,7 +113,7 @@ It's also possible to retrieve the policy grant information::
# Boto 3
acl = bucket.Acl()
for grant in acl.grants:
print(grant['DisplayName'], grant['Permission'])
print(grant['Grantee']['DisplayName'], grant['Permission'])
Boto 3 lacks the grant shortcut methods present in Boto 2.x, but it is still fairly simple to add grantees::

View file

@ -0,0 +1,115 @@
Paginators
==========
Some AWS operations return results that are incomplete and require subsequent
requests in order to attain the entire result set. The process of sending
subsequent requests to continue where a previous request left off is called
*pagination*. For example, the ``list_objects`` operation of Amazon S3
returns up to 1000 objects at a time, and you must send subsequent requests
with the appropriate ``Marker`` in order to retrieve the next *page* of
results.
*Paginators* are a feature of boto3 that act as an abstraction over the
process of iterating over an entire result set of a truncated API operation.
Creating Paginators
-------------------
Paginators are created via the ``get_paginator()`` method of a boto3
client. The ``get_paginator()`` method accepts an operation name and returns
a reusable ``Paginator`` object. You then call the ``paginate`` method of the
Paginator, passing in any relevant operation parameters to apply to the
underlying API operation. The ``paginate`` method then returns an iterable
``PageIterator``::
import boto3
# Create a client
client = boto3.client('s3', region_name='us-west-2')
# Create a reusable Paginator
paginator = client.get_paginator('list_objects')
# Create a PageIterator from the Paginator
page_iterator = paginator.paginate(Bucket='my-bucket')
for page in page_iterator:
print(page['Contents'])
Customizing Page Iterators
~~~~~~~~~~~~~~~~~~~~~~~~~~
You must call the ``paginate`` method of a Paginator in order to iterate over
the pages of API operation results. The ``paginate`` method accepts a
``PaginationConfig`` named argument that can be used to customize the
pagination::
paginator = client.get_paginator('list_objects')
page_iterator = paginator.paginate(Bucket='my-bucket',
PaginationConfig={'MaxItems': 10})
``MaxItems``
Limits the maximum number of total returned items returned while
paginating.
``StartingToken``
Can be used to modify the starting marker or token of a paginator. This
argument if useful for resuming pagination from a previous token or
starting pagination at a known position.
``PageSize``
Controls the number of items returned per page of each result.
.. note::
Services may choose to return more or fewer items than specified in the
``PageSize`` argument depending on the service, the operation, or the
resource you are paginating.
Filtering results
-----------------
Many Paginators can be filtered server-side with options that are passed
through to each underlying API call. For example,
:py:meth:`S3.Paginator.list_objects.paginate` accepts a ``Prefix`` parameter
used to filter the paginated results by prefix server-side before sending them
to the client::
import boto3
client = boto3.client('s3', region_name='us-west-2')
paginator = client.get_paginator('list_objects')
operation_parameters = {'Bucket': 'my-bucket',
'Prefix': 'foo/baz'}
page_iterator = paginator.paginate(**operation_parameters)
for page in page_iterator:
print(page['Contents'])
Filtering Results with JMESPath
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
`JMESPath <http://jmespath.org>`_ is a query language for JSON that can be used
directly on paginated results. You can filter results client-side using
JMESPath expressions that are applied to each page of results through the
``search`` method of a ``PageIterator``.
.. code-block:: python
paginator = client.get_paginator('list_objects')
page_iterator = paginator.paginate(Bucket='my-bucket')
filtered_iterator = page_iterator.search("Contents[?Size > `100`][]")
for key_data in filtered_iterator:
print(key_data)
When filtering with JMESPath expressions, each page of results that is yielded
by the paginator is mapped through the JMESPath expression. If a JMESPath
expression returns a single value that is not an array, that value is yielded
directly. If the the result of applying the JMESPath expression to a page of
results is a list, then each value of the list is yielded individually
(essentially implementing a flat map). For example, in the above expression,
each key that has a ``Size`` greater than `100` is yielded by the
``filtered_iterator``.

View file

@ -88,7 +88,7 @@ the ``load`` or ``reload`` action. Examples of attributes::
# S3 Object
obj.last_modified
obj.md5
obj.e_tag
.. warning::

186
docs/source/guide/s3.rst Normal file
View file

@ -0,0 +1,186 @@
.. _s3_guide:
S3
==
By following this guide, you will learn how to use features of S3 client that
are unique to the SDK, specifically the generation and use of pre-signed URLs,
pre-signed POSTs, and the use of the transfer manager. You will also learn how
to use a few common, but important, settings specific to S3.
Changing the Addressing Style
-----------------------------
S3 supports two different ways to address a bucket, Virtual Host Style and Path
Style. This guide won't cover all the details of `virtual host addressing`_, but
you can read up on that in S3's docs. In general, the SDK will handle the
decision of what style to use for you, but there are some cases where you may
want to set it yourself. For instance, if you have a CORS configured bucket
that is only a few hours old, you may need to use path style addressing for
generating pre-signed POSTs and URLs until the necessary DNS changes have time
to propagagte.
Note: if you set the addressing style to path style, you HAVE to set the correct
region.
The preferred way to set the addressing style is to use the ``addressing_style``
config parameter when you create your client or resource.::
import boto3
from botocore.client import Config
# Other valid options here are 'auto' (default) and 'virtual'
s3 = boto3.client('s3', 'us-west-2', config=Config(s3={'addressing_style': 'path'}))
Using the Transfer Manager
--------------------------
The `s3 transfer manager`_ provides you with less painful multipart uploads and
downloads. Its functions are automatically added into the client when you create
it, so there is no need to create your own transfer manager. Below you will see
several examples of how to use it.
The methods on the base client are :py:meth:`S3.Client.upload_file` and
:py:meth:`S3.Client.download_file`::
import boto3
# Get the service client
s3 = boto3.client('s3')
# Upload tmp.txt to bucket-name
s3.upload_file("tmp.txt", "bucket-name", "tmp.txt")
# Download tmp.txt as tmp2.txt
s3.download_file("bucket-name", "tmp.txt", "tmp2.txt")
If you happen to be using the resource model, the same function are accessed
through :py:meth:`S3.Object.upload_file` and
:py:meth:`S3.Object.download_file`::
import boto3
# Get the service resource
s3 = boto3.resource('s3')
# Get bucket-name
bucket = s3.Bucket('bucket-name')
# Get the object representation
obj = bucket.Object('tmp.txt')
# Upload tmp.txt
obj.upload_file('tmp.txt')
# Download tmp.txt as tmp2.txt
obj.download_file('tmp2.txt')
Generating Presigned URLs
-------------------------
Pre-signed URLs allow you to give your users access to a specific object in your
bucket without requiring them to have AWS security credentials or permissions.
To generate a pre-signed URL, use the
:py:meth:`S3.Client.generate_presigned_url` method::
import boto3
import requests
# Get the service client.
s3 = boto3.client('s3')
# Generate the URL to get 'key-name' from 'bucket-name'
url = s3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': 'bucket-name',
'Key': 'key-name'
}
)
# Use the URL to perform the GET operation. You can use any method you like
# to send the GET, but we will use requests here to keep things simple.
response = requests.get(url)
If your bucket requires the use of signature version 4, you can elect to use it
to sign your URL. This does not fundamentally change how you use generator,
you only need to make sure that the client used has signature version 4
configured.::
import boto3
from botocore.client import Config
# Get the service client with sigv4 configured
s3 = boto3.client('s3', config=Config(signature_version='s3v4'))
# Generate the URL to get 'key-name' from 'bucket-name'
url = s3.generate_presigned_url(
ClientMethod='get_object',
Params={
'Bucket': 'bucket-name',
'Key': 'key-name'
}
)
Note: if your bucket is new and you require CORS, it is advised that
you use path style addressing (which is set by default in signature version 4).
Generating Presigned POSTs
--------------------------
Much like pre-signed URLs, pre-signed POSTs allow you to give write access to a
user without giving them AWS credentials. The information you need to make the
POST is returned by the :py:meth:`S3.Client.generate_presigned_post` method::
import boto3
import requests
# Get the service client
s3 = boto3.client('s3')
# Generate the POST attributes
post = s3.generate_presigned_post(
Bucket='bucket-name',
Key='key-name'
)
# Use the returned values to POST an object. Note that you need to use ALL
# of the returned fields in your post. You can use any method you like to
# send the POST, but we will use requests here to keep things simple.
files = {"file": "file_content"}
response = requests.post(post["url"], data=post["fields"], files=files)
When generating these POSTs, you may wish to auto fill certain fields or
constrain what your users submit. You can do this by providing those fields and
conditions when you generate the POST data.::
import boto3
# Get the service client
s3 = boto3.client('s3')
# Make sure everything posted is publicly readable
fields = {"acl": "public-read"}
# Ensure that the ACL isn't changed and restrict the user to a length
# between 10 and 100.
conditions = [
{"acl": "public-read"},
["content-length-range", 10, 100]
]
# Generate the POST attributes
post = s3.generate_presigned_post(
Bucket='bucket-name',
Key='key-name'
)
Note: if your bucket is new and you require CORS, it is advised that
you use path style addressing (which is set by default in signature version 4).
.. _s3 transfer manager: http://boto3.readthedocs.org/en/latest/reference/customizations/s3.html#module-boto3.s3.transfer
.. _virtual host addressing: http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html

4
readthedocs.yml Normal file
View file

@ -0,0 +1,4 @@
formats:
- none
python:
setup_py_install: true

View file

@ -1,5 +1,5 @@
-e git://github.com/boto/botocore.git@develop#egg=botocore
-e git://github.com/boto/jmespath.git@develop#egg=jmespath
nose==1.3.3
mock==1.0.1
mock==1.3.0
wheel==0.24.0

208
scripts/new-change Executable file
View file

@ -0,0 +1,208 @@
#!/usr/bin/env python
"""Generate a new changelog entry.
Usage
=====
To generate a new changelog entry::
scripts/new-change
This will open up a file in your editor (via the ``EDITOR`` env var).
You'll see this template::
# Type should be one of: feature, bugfix
type:
# Category is the high level feature area.
# This can be a service identifier (e.g ``s3``),
# or something like: Paginator.
category:
# A brief description of the change. You can
# use github style references to issues such as
# "fixes #489", "boto/boto3#100", etc. These
# will get automatically replaced with the correct
# link.
description:
Fill in the appropriate values, save and exit the editor.
Make sure to commit these changes as part of your pull request.
If, when your editor is open, you decide don't don't want to add a changelog
entry, save an empty file and no entry will be generated.
You can then use the ``scripts/gen-changelog`` to generate the
CHANGELOG.rst file.
"""
import os
import re
import sys
import json
import string
import random
import tempfile
import subprocess
import argparse
VALID_CHARS = set(string.letters + string.digits)
CHANGES_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))),
'.changes'
)
TEMPLATE = """\
# Type should be one of: feature, bugfix
type: {change_type}
# Category is the high level feature area.
# This can be a service identifier (e.g ``s3``),
# or something like: Paginator.
category: {category}
# A brief description of the change. You can
# use github style references to issues such as
# "fixes #489", "boto/boto3#100", etc. These
# will get automatically replaced with the correct
# link.
description: {description}
"""
def new_changelog_entry(args):
# Changelog values come from one of two places.
# Either all values are provided on the command line,
# or we open a text editor and let the user provide
# enter their values.
if all_values_provided(args):
parsed_values = {
'type': args.change_type,
'category': args.category,
'description': args.description,
}
else:
parsed_values = get_values_from_editor(args)
if has_empty_values(parsed_values):
sys.stderr.write(
"Empty changelog values received, skipping entry creation.\n")
return 1
replace_issue_references(parsed_values, args.repo)
write_new_change(parsed_values)
return 0
def has_empty_values(parsed_values):
return not (parsed_values.get('type') and
parsed_values.get('category') and
parsed_values.get('description'))
def all_values_provided(args):
return args.change_type and args.category and args.description
def get_values_from_editor(args):
with tempfile.NamedTemporaryFile('w') as f:
contents = TEMPLATE.format(
change_type=args.change_type,
category=args.category,
description=args.description,
)
f.write(contents)
f.flush()
env = os.environ
editor = env.get('VISUAL', env.get('EDITOR', 'vim'))
p = subprocess.Popen('%s %s' % (editor, f.name), shell=True)
p.communicate()
with open(f.name) as f:
filled_in_contents = f.read()
parsed_values = parse_filled_in_contents(filled_in_contents)
return parsed_values
def replace_issue_references(parsed, repo_name):
description = parsed['description']
def linkify(match):
number = match.group()[1:]
return (
'`%s <https://github.com/%s/issues/%s>`__' % (
match.group(), repo_name, number))
new_description = re.sub('#\d+', linkify, description)
parsed['description'] = new_description
def write_new_change(parsed_values):
if not os.path.isdir(CHANGES_DIR):
os.makedirs(CHANGES_DIR)
# Assume that new changes go into the next release.
dirname = os.path.join(CHANGES_DIR, 'next-release')
if not os.path.isdir(dirname):
os.makedirs(dirname)
# Need to generate a unique filename for this change.
# We'll try a couple things until we get a unique match.
category = parsed_values['category']
short_summary = ''.join(filter(lambda x: x in VALID_CHARS, category))
filename = '{type_name}-{summary}'.format(
type_name=parsed_values['type'],
summary=short_summary)
possible_filename = os.path.join(dirname, filename) + '.json'
while os.path.isfile(possible_filename):
possible_filename = os.path.join(
dirname, '%s-%s.json' % (filename, str(random.randint(1, 100000))))
with open(possible_filename, 'w') as f:
f.write(json.dumps(parsed_values, indent=2))
def parse_filled_in_contents(contents):
"""Parse filled in file contents and returns parsed dict.
Return value will be::
{
"type": "bugfix",
"category": "category",
"description": "This is a description"
}
"""
if not contents.strip():
return {}
parsed = {}
lines = iter(contents.splitlines())
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
if 'type' not in parsed and line.startswith('type:'):
parsed['type'] = line.split(':')[1].strip()
elif 'category' not in parsed and line.startswith('category:'):
parsed['category'] = line.split(':')[1].strip()
elif 'description' not in parsed and line.startswith('description:'):
# Assume that everything until the end of the file is part
# of the description, so we can break once we pull in the
# remaining lines.
first_line = line.split(':')[1].strip()
full_description = '\n'.join([first_line] + list(lines))
parsed['description'] = full_description.strip()
break
return parsed
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--type', dest='change_type',
default='', choices=('bugfix', 'feature'))
parser.add_argument('-c', '--category', dest='category',
default='')
parser.add_argument('-d', '--description', dest='description',
default='')
parser.add_argument('-r', '--repo', default='boto/boto3',
help='Optional repo name, e.g: boto/boto3')
args = parser.parse_args()
sys.exit(new_changelog_entry(args))
if __name__ == '__main__':
main()

View file

@ -3,6 +3,6 @@ universal = 1
[metadata]
requires-dist =
botocore>=1.3.0,<1.4.0
botocore>=1.4.1,<1.5.0
jmespath>=0.7.1,<1.0.0
futures==2.2.0; python_version=="2.6" or python_version=="2.7"
futures>=2.2.0,<4.0.0; python_version=="2.6" or python_version=="2.7"

View file

@ -15,7 +15,7 @@ VERSION_RE = re.compile(r'''__version__ = ['"]([0-9.]+)['"]''')
requires = [
'botocore>=1.3.0,<1.4.0',
'botocore>=1.4.1,<1.5.0',
'jmespath>=0.7.1,<1.0.0',
]
@ -43,12 +43,14 @@ setup(
package_data={
'boto3': [
'data/aws/resources/*.json',
'examples/*.rst'
]
},
include_package_data=True,
install_requires=requires,
extras_require={
':python_version=="2.6" or python_version=="2.7"': ['futures==2.2.0']
':python_version=="2.6" or python_version=="2.7"': [
'futures>=2.2.0,<4.0.0']
},
license="Apache License 2.0",
classifiers=[
@ -62,5 +64,6 @@ setup(
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)

View file

@ -59,21 +59,21 @@ class BaseDocsFunctionalTests(unittest.TestCase):
return contents[:end_index]
def get_request_parameter_document_block(self, param_name, contents):
start_param_document = ' :type %s:' % param_name
start_param_document = ':type %s:' % param_name
start_index = contents.find(start_param_document)
self.assertNotEqual(start_index, -1, 'Param is not found in contents')
contents = contents[start_index:]
end_index = contents.find(' :type', len(start_param_document))
end_index = contents.find(':type', len(start_param_document))
return contents[:end_index]
def get_response_parameter_document_block(self, param_name, contents):
start_param_document = ' **Response Structure**'
start_param_document = '**Response Structure**'
start_index = contents.find(start_param_document)
self.assertNotEqual(start_index, -1, 'There is no response structure')
start_param_document = ' - **%s**' % param_name
start_param_document = '- **%s**' % param_name
start_index = contents.find(start_param_document)
self.assertNotEqual(start_index, -1, 'Param is not found in contents')
contents = contents[start_index:]
end_index = contents.find(' - **', len(start_param_document))
end_index = contents.find('- **', len(start_param_document))
return contents[:end_index]

View file

@ -42,19 +42,19 @@ class TestDynamoDBCustomizations(BaseDocsFunctionalTests):
request_syntax_contents = self.get_request_syntax_document_block(
method_contents)
self.assert_contains_lines_in_order([
' response = table.put_item(',
' Item={',
(' \'string\': \'string\'|123|Binary(b\'bytes\')'
'response = table.put_item(',
'Item={',
('\'string\': \'string\'|123|Binary(b\'bytes\')'
'|True|None|set([\'string\'])|set([123])|'
'set([Binary(b\'bytes\')])|[]|{}'),
' },',
' Expected={',
' \'string\': {',
(' \'Value\': \'string\'|123'
'},',
'Expected={',
'\'string\': {',
('\'Value\': \'string\'|123'
'|Binary(b\'bytes\')|True|None|set([\'string\'])'
'|set([123])|set([Binary(b\'bytes\')])|[]|{},'),
' \'AttributeValueList\': [',
(' \'string\'|123|Binary(b\'bytes\')'
'\'AttributeValueList\': [',
('\'string\'|123|Binary(b\'bytes\')'
'|True|None|set([\'string\'])|set([123])|'
'set([Binary(b\'bytes\')])|[]|{},')],
request_syntax_contents)
@ -63,22 +63,22 @@ class TestDynamoDBCustomizations(BaseDocsFunctionalTests):
response_syntax_contents = self.get_response_syntax_document_block(
method_contents)
self.assert_contains_lines_in_order([
' {',
' \'Attributes\': {',
(' \'string\': \'string\'|123|'
'{',
'\'Attributes\': {',
('\'string\': \'string\'|123|'
'Binary(b\'bytes\')|True|None|set([\'string\'])|'
'set([123])|set([Binary(b\'bytes\')])|[]|{}'),
' },'],
'},'],
response_syntax_contents)
# Make sure the request parameter is documented correctly.
request_param_contents = self.get_request_parameter_document_block(
'Item', method_contents)
self.assert_contains_lines_in_order([
' :type Item: dict',
' :param Item: **[REQUIRED]**',
' - *(string) --*',
(' - *(valid DynamoDB type) --* - The value of the '
':type Item: dict',
':param Item: **[REQUIRED]**',
'- *(string) --*',
('- *(valid DynamoDB type) --* - The value of the '
'attribute. The valid value types are listed in the '
':ref:`DynamoDB Reference Guide<ref_valid_dynamodb_types>`.')],
request_param_contents
@ -88,9 +88,9 @@ class TestDynamoDBCustomizations(BaseDocsFunctionalTests):
response_param_contents = self.get_response_parameter_document_block(
'Attributes', method_contents)
self.assert_contains_lines_in_order([
' - **Attributes** *(dict) --*',
' - *(string) --*',
(' - *(valid DynamoDB type) --* - The value of '
'- **Attributes** *(dict) --*',
'- *(string) --*',
('- *(valid DynamoDB type) --* - The value of '
'the attribute. The valid value types are listed in the '
':ref:`DynamoDB Reference Guide<ref_valid_dynamodb_types>`.')],
response_param_contents)
@ -106,23 +106,23 @@ class TestDynamoDBCustomizations(BaseDocsFunctionalTests):
request_syntax_contents = self.get_request_syntax_document_block(
method_contents)
self.assert_contains_lines_in_order([
' response = table.query(',
(' FilterExpression=Attr(\'myattribute\').'
'response = table.query(',
('FilterExpression=Attr(\'myattribute\').'
'eq(\'myvalue\'),'),
(' KeyConditionExpression=Key(\'mykey\')'
('KeyConditionExpression=Key(\'mykey\')'
'.eq(\'myvalue\'),')],
request_syntax_contents)
# Make sure the request parameter is documented correctly.
self.assert_contains_lines_in_order([
(' :type FilterExpression: condition from '
(':type FilterExpression: condition from '
':py:class:`boto3.dynamodb.conditions.Attr` method'),
(' :param FilterExpression: The condition(s) an '
(':param FilterExpression: The condition(s) an '
'attribute(s) must meet. Valid conditions are listed in '
'the :ref:`DynamoDB Reference Guide<ref_dynamodb_conditions>`.'),
(' :type KeyConditionExpression: condition from '
(':type KeyConditionExpression: condition from '
':py:class:`boto3.dynamodb.conditions.Key` method'),
(' :param KeyConditionExpression: The condition(s) a '
(':param KeyConditionExpression: The condition(s) a '
'key(s) must meet. Valid conditions are listed in the '
':ref:`DynamoDB Reference Guide<ref_dynamodb_conditions>`.')],
method_contents)

View file

@ -0,0 +1,35 @@
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests.functional.docs import BaseDocsFunctionalTests
from boto3.session import Session
from boto3.docs.service import ServiceDocumenter
class TestInstanceDeleteTags(BaseDocsFunctionalTests):
def setUp(self):
self.documenter = ServiceDocumenter(
'ec2', session=Session(region_name='us-east-1'))
self.generated_contents = self.documenter.document_service()
self.generated_contents = self.generated_contents.decode('utf-8')
def test_delete_tags_method_is_documented(self):
contents = self.get_class_document_block(
'EC2.Instance', self.generated_contents)
method_contents = self.get_method_document_block(
'delete_tags', contents)
self.assert_contains_lines_in_order([
'response = instance.delete_tags(',
'DryRun=True|False,',
'Tags=[',
], method_contents)

View file

@ -0,0 +1,33 @@
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from tests import unittest
from boto3.session import Session
from boto3.resources.collection import ResourceCollection
class TestCollection(unittest.TestCase):
def setUp(self):
self.session = Session(
aws_access_key_id='dummy', aws_secret_access_key='dummy',
region_name='us-east-1')
# Pick an arbitrary resource.
self.ec2_resource = self.session.resource('ec2')
def test_can_use_collection_methods(self):
self.assertIsInstance(
self.ec2_resource.instances.all(), ResourceCollection)
def test_can_chain_methods(self):
self.assertIsInstance(
self.ec2_resource.instances.all().page_size(5), ResourceCollection)

View file

@ -0,0 +1,36 @@
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import unittest
import boto3.session
from botocore.stub import Stubber
class TestInstanceDeleteTags(unittest.TestCase):
def setUp(self):
self.session = boto3.session.Session(region_name='us-west-2')
self.service_resource = self.session.resource('ec2')
self.instance_resource = self.service_resource.Instance('i-abc123')
def test_delete_tags_injected(self):
self.assertTrue(hasattr(self.instance_resource, 'delete_tags'),
'delete_tags was not injected onto Instance resource.')
def test_delete_tags(self):
stubber = Stubber(self.instance_resource.meta.client)
stubber.add_response('delete_tags', {})
stubber.activate()
response = self.instance_resource.delete_tags(Tags=[{'Key': 'foo'}])
stubber.assert_no_pending_responses()
self.assertEqual(response, {})
stubber.deactivate()

View file

@ -11,6 +11,7 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import boto3
from boto3.exceptions import ResourceNotExistsError
import botocore.session
from tests import unittest
@ -36,3 +37,19 @@ class TestResourceCustomization(unittest.TestCase):
resource = session.resource('s3')
self.assertTrue(hasattr(resource, 'my_method'))
self.assertEqual(resource.my_method('anything'), 'anything')
class TestSessionErrorMessages(unittest.TestCase):
def test_has_good_error_message_when_no_resource(self):
bad_resource_name = 'doesnotexist'
err_regex = (
'%s.*resource does not exist.' % bad_resource_name
)
with self.assertRaisesRegexp(ResourceNotExistsError, err_regex):
boto3.resource(bad_resource_name)
class TestGetAvailableSubresources(unittest.TestCase):
def test_s3_available_subresources_exists(self):
s3 = boto3.resource('s3')
self.assertTrue(hasattr(s3, 'get_available_subresources'))

View file

@ -12,6 +12,8 @@
# language governing permissions and limitations under the License.
from tests import unittest
from botocore.stub import Stubber
import boto3.session
@ -43,3 +45,44 @@ class TestS3MethodInjection(unittest.TestCase):
'upload_file was not injected onto S3 object')
self.assertTrue(hasattr(obj, 'download_file'),
'download_file was not injected onto S3 object')
class TestS3ObjectSummary(unittest.TestCase):
def setUp(self):
self.session = boto3.session.Session(
aws_access_key_id='foo', aws_secret_access_key='bar',
region_name='us-west-2')
self.s3 = self.session.resource('s3')
self.obj_summary = self.s3.ObjectSummary('my_bucket', 'my_key')
self.obj_summary_size = 12
self.stubber = Stubber(self.s3.meta.client)
self.stubber.activate()
self.stubber.add_response(
method='head_object',
service_response={
'ContentLength': self.obj_summary_size, 'ETag': 'my-etag',
'ContentType': 'binary'
},
expected_params={
'Bucket': 'my_bucket',
'Key': 'my_key'
}
)
def tearDown(self):
self.stubber.deactivate()
def test_has_load(self):
self.assertTrue(hasattr(self.obj_summary, 'load'),
'load() was not injected onto ObjectSummary resource.')
def test_autoloads_correctly(self):
# In HeadObject the parameter returned is ContentLength, this
# should get mapped to Size of ListObject since the resource uses
# the shape returned to by ListObjects.
self.assertEqual(self.obj_summary.size, self.obj_summary_size)
def test_cannot_access_other_non_related_parameters(self):
# Even though an HeadObject was used to load this, it should
# only expose the attributes from its shape defined in ListObjects.
self.assertFalse(hasattr(self.obj_summary, 'content_length'))

View file

@ -32,3 +32,17 @@ class TestSession(unittest.TestCase):
self.session.events.emit('myevent', my_list=initial_list)
# Ensure that the registered handler was called.
self.assertEqual(initial_list, ['my_handler called'])
def test_can_access_region_property(self):
session = boto3.session.Session(region_name='us-west-1')
self.assertEqual(session.region_name, 'us-west-1')
def test_get_available_partitions(self):
partitions = self.session.get_available_partitions()
self.assertIsInstance(partitions, list)
self.assertTrue(partitions)
def test_get_available_regions(self):
regions = self.session.get_available_regions('s3')
self.assertIsInstance(regions, list)
self.assertTrue(regions)

View file

@ -219,6 +219,24 @@ class TestS3Resource(unittest.TestCase):
contents = bucket.Object('mp-test.txt').get()['Body'].read()
self.assertEqual(contents, b'hello, world!')
def test_s3_batch_delete(self):
# Create the bucket
bucket = self.create_bucket_resource(self.bucket_name)
bucket.wait_until_exists()
bucket.Versioning().enable()
# Create several versions of an object
obj = bucket.Object('test.txt')
for i in range(10):
obj.put(Body="Version %s" % i)
# Delete all the versions of the object
bucket.object_versions.all().delete()
versions = list(bucket.object_versions.all())
self.assertEqual(len(versions), 0)
class TestS3Transfers(unittest.TestCase):
"""Tests for the high level boto3.s3.transfer module."""
@ -466,6 +484,32 @@ class TestS3Transfers(unittest.TestCase):
download_path)
assert_files_equal(filename, download_path)
def test_download_file_with_directory_not_exist(self):
transfer = self.create_s3_transfer()
self.client.put_object(Bucket=self.bucket_name,
Key='foo.txt',
Body=b'foo')
self.addCleanup(self.delete_object, 'foo.txt')
download_path = os.path.join(self.files.rootdir, 'a', 'b', 'c',
'downloaded.txt')
with self.assertRaises(IOError):
transfer.download_file(self.bucket_name, 'foo.txt', download_path)
def test_download_large_file_directory_not_exist(self):
transfer = self.create_s3_transfer()
filename = self.files.create_file_with_size(
'foo.txt', filesize=20 * 1024 * 1024)
with open(filename, 'rb') as f:
self.client.put_object(Bucket=self.bucket_name,
Key='foo.txt',
Body=f)
self.addCleanup(self.delete_object, 'foo.txt')
download_path = os.path.join(self.files.rootdir, 'a', 'b', 'c',
'downloaded.txt')
with self.assertRaises(IOError):
transfer.download_file(self.bucket_name, 'foo.txt', download_path)
def test_transfer_methods_through_client(self):
# This is really just a sanity check to ensure that the interface
# from the clients work. We're not exhaustively testing through

View file

@ -0,0 +1,3 @@
** This is an example **
This is the contents!

View file

@ -0,0 +1,3 @@
**Other example**
This is for another service

View file

@ -38,17 +38,25 @@ class BaseDocsTest(unittest.TestCase):
self.version_dirs, 'paginators-1.json')
self.resource_model_file = os.path.join(
self.version_dirs, 'resources-1.json')
self.example_model_file = os.path.join(
self.version_dirs, 'examples-1.json')
self.json_model = {}
self.waiter_json_model = {}
self.paginator_json_model = {}
self.resource_json_model = {}
self._setup_models()
self._write_models()
self.doc_name = 'MyDoc'
self.doc_structure = DocumentStructure(self.doc_name)
self.setup_client_and_resource()
def tearDown(self):
shutil.rmtree(self.root_dir)
def setup_client_and_resource(self):
self._write_models()
self.loader = Loader(extra_search_paths=[self.root_dir])
self.botocore_session = botocore.session.get_session()
self.botocore_session.register_component('data_loader', self.loader)
@ -57,9 +65,6 @@ class BaseDocsTest(unittest.TestCase):
self.client = self.session.client('myservice', 'us-east-1')
self.resource = self.session.resource('myservice', 'us-east-1')
def tearDown(self):
shutil.rmtree(self.root_dir)
def _setup_models(self):
self.json_model = {
'metadata': {
@ -94,6 +99,25 @@ class BaseDocsTest(unittest.TestCase):
}
}
self.example_json_model = {
"version": 1,
"examples": {
"SampleOperation": [{
"id": "sample-id",
"title": "sample-title",
"description": "Sample Description.",
"input": OrderedDict([
("Foo", "bar"),
]),
"comments": {
"input": {
"Foo": "biz"
},
}
}]
}
}
self.waiter_json_model = {
"version": 2,
"waiters": {
@ -138,7 +162,7 @@ class BaseDocsTest(unittest.TestCase):
"type": "Sample",
"identifiers": [
{"target": "Name", "source": "response",
"path": "Samples[].Name"}
"path": "Samples[].Name"}
],
"path": "Samples[]"
}
@ -241,6 +265,9 @@ class BaseDocsTest(unittest.TestCase):
with open(self.model_file, 'w') as f:
json.dump(self.json_model, f)
with open(self.example_model_file, 'w') as f:
json.dump(self.example_json_model, f)
def add_shape(self, shape):
shape_name = list(shape.keys())[0]
self.json_model['shapes'][shape_name] = shape[shape_name]

View file

@ -0,0 +1,77 @@
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.hooks import HierarchicalEmitter
from tests.unit.docs import BaseDocsTest
from boto3.docs.attr import document_attribute
class TestDocumentAttribute(BaseDocsTest):
def setUp(self):
super(TestDocumentAttribute, self).setUp()
self.add_shape({
'NestedStruct': {
'type': 'structure',
'members': {
'NestedStrAttr': {
'shape': 'String',
'documentation': 'Documents a nested string attribute'
}
}
}
})
self.add_shape({
'ResourceShape': {
'type': 'structure',
'members': {
'StringAttr': {
'shape': 'String',
'documentation': 'Documents a string attribute'
},
'NestedAttr': {
'shape': 'NestedStruct',
'documentation': 'Documents a nested attribute'
}
}
}
})
self.setup_client_and_resource()
self.event_emitter = HierarchicalEmitter()
self.service_name = 'myservice'
self.resource_name = 'myresource'
self.service_model = self.client.meta.service_model
def test_document_attr_scalar(self):
shape_model = self.service_model.shape_for('ResourceShape')
attr_name = 'StringAttr'
document_attribute(
self.doc_structure, self.service_name, self.resource_name,
attr_name, self.event_emitter, shape_model.members[attr_name])
self.assert_contains_lines_in_order([
'.. py:attribute:: StringAttr',
' - *(string) --* Documents a string attribute'
])
def test_document_attr_structure(self):
shape_model = self.service_model.shape_for('ResourceShape')
attr_name = 'NestedAttr'
document_attribute(
self.doc_structure, self.service_name, self.resource_name,
attr_name, self.event_emitter, shape_model.members[attr_name])
self.assert_contains_lines_in_order([
'.. py:attribute:: NestedAttr',
' - *(dict) --* Documents a nested attribute',
(' - **NestedStrAttr** *(string) --* Documents a nested '
'string attribute')
])

View file

@ -78,7 +78,7 @@ class TestResourceDocstrings(BaseDocsTest):
help(self.resource.Sample('id').__class__.foo)
attribute_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order([
' *(string)* Documents Foo'
' - *(string) --* Documents Foo'
], attribute_docstring)
def test_identifier_help(self):
@ -175,6 +175,21 @@ class TestResourceDocstrings(BaseDocsTest):
' :returns: A list of Sample resources',
], collection_method_docstring)
def test_collection_chaining_help(self):
collection = self.resource.samples.all()
with mock.patch('sys.stdout', six.StringIO()) as mock_stdout:
help(collection.all)
collection_method_docstring = mock_stdout.getvalue()
self.assert_contains_lines_in_order([
(' Creates an iterable of all Sample resources in the '
'collection.'),
' **Request Syntax** ',
' ::',
' sample_iterator = myservice.samples.all()',
' :rtype: list(:py:class:`myservice.Sample`)',
' :returns: A list of Sample resources',
], collection_method_docstring)
def test_batch_action_help(self):
with mock.patch('sys.stdout', six.StringIO()) as mock_stdout:
help(self.resource.samples.operate)

View file

@ -48,9 +48,9 @@ class TestResourceDocumenter(BaseDocsTest):
' .. rst-class:: admonition-title',
' Attributes',
' .. py:attribute:: bar',
' *(string)* Documents Bar',
' - *(string) --* Documents Bar',
' .. py:attribute:: foo',
' *(string)* Documents Foo',
' - *(string) --* Documents Foo',
' .. rst-class:: admonition-title',
' Actions',
' .. py:method:: load()',

View file

@ -11,7 +11,9 @@
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import mock
import boto3
from tests.unit.docs import BaseDocsTest
from boto3.docs.service import ServiceDocumenter
@ -32,6 +34,10 @@ class TestServiceDocumenter(BaseDocsTest):
'.. py:class:: MyService.Client',
' These are the available methods:',
' * :py:meth:`sample_operation`',
' **Examples** ',
' Sample Description.',
' ::',
' response = client.sample_operation(',
'==========',
'Paginators',
'==========',
@ -111,3 +117,25 @@ class TestServiceDocumenter(BaseDocsTest):
service_documenter = ServiceDocumenter('myservice', self.session)
contents = service_documenter.document_service().decode('utf-8')
self.assertNotIn('Waiters', contents)
def test_creates_correct_path_to_examples_based_on_service_name(self):
path = os.sep.join([os.path.dirname(boto3.__file__),
'examples', 'myservice.rst'])
path = os.path.realpath(path)
with mock.patch('os.path.isfile') as isfile:
isfile.return_value = False
s = ServiceDocumenter('myservice', self.session)
s.document_service()
self.assertEqual(
isfile.call_args_list[-1],
mock.call(path))
def test_injects_examples_when_found(self):
examples_path = os.sep.join([os.path.dirname(__file__), '..', 'data',
'examples'])
service_documenter = ServiceDocumenter(
'myservice', self.session)
service_documenter.EXAMPLE_PATH = examples_path
contents = service_documenter.document_service().decode('utf-8')
self.assertIn('This is an example', contents)
self.assertNotIn('This is for another service', contents)

View file

@ -166,6 +166,67 @@ class BaseTransformationTest(unittest.TestCase):
},
])
def test_never_send_more_than_max_batch_size(self):
# Suppose the server sends backs a response that indicates that
# all the items were unprocessed.
self.client.batch_write_item.side_effect = [
{
'UnprocessedItems': {
self.table_name: [
{'PutRequest': {'Item': {'Hash': 'foo1'}}},
{'PutRequest': {'Item': {'Hash': 'foo2'}}},
],
},
},
{
'UnprocessedItems': {
self.table_name: [
{'PutRequest': {'Item': {'Hash': 'foo2'}}},
],
},
},
{
'UnprocessedItems': {}
},
]
with BatchWriter(self.table_name, self.client, flush_amount=2) as b:
b.put_item(Item={'Hash': 'foo1'})
b.put_item(Item={'Hash': 'foo2'})
b.put_item(Item={'Hash': 'foo3'})
# Note how we're never sending more than flush_amount=2.
first_batch = {
'RequestItems': {
self.table_name: [
{'PutRequest': {'Item': {'Hash': 'foo1'}}},
{'PutRequest': {'Item': {'Hash': 'foo2'}}},
]
}
}
# Even when the server sends us unprocessed items of 2 elements,
# we'll still only send 2 at a time, in order.
second_batch = {
'RequestItems': {
self.table_name: [
{'PutRequest': {'Item': {'Hash': 'foo1'}}},
{'PutRequest': {'Item': {'Hash': 'foo2'}}},
]
}
}
# And then we still see one more unprocessed item so
# we need to send another batch.
third_batch = {
'RequestItems': {
self.table_name: [
{'PutRequest': {'Item': {'Hash': 'foo3'}}},
{'PutRequest': {'Item': {'Hash': 'foo2'}}},
]
}
}
self.assert_batch_write_calls_are([first_batch, second_batch,
third_batch])
def test_repeated_flushing_on_exit(self):
# We're going to simulate unprocessed_items
# returning multiple unprocessed items across calls.

View file

@ -0,0 +1,40 @@
# Copyright 2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import unittest
import mock
from boto3.ec2.deletetags import delete_tags
class TestDeleteTags(unittest.TestCase):
def setUp(self):
self.client = mock.Mock()
self.resource = mock.Mock()
self.resource.meta.client = self.client
self.instance_id = 'instance_id'
self.resource.id = self.instance_id
def test_delete_tags(self):
tags = {
'Tags': [
{'Key': 'key1', 'Value': 'value1'},
{'Key': 'key2', 'Value': 'value2'},
{'Key': 'key3', 'Value': 'value3'}
]
}
delete_tags(self.resource, **tags)
kwargs = tags
kwargs['Resources'] = [self.instance_id]
self.client.delete_tags.assert_called_with(**kwargs)

View file

@ -245,14 +245,14 @@ class TestBatchActionCall(BaseTestCase):
action = BatchAction(model)
action(collection)
crp_mock.assert_called_with(item, model.request, params={})
crp_mock.assert_called_with(item, model.request, params={}, index=0)
client.get_frobs.assert_not_called()
@mock.patch('boto3.resources.action.create_request_parameters')
def test_batch_action_calls_operation(self, crp_mock):
# In this test we have an item and parameters, so the call
# to the batch operation should be made.
def side_effect(resource, model, params=None):
def side_effect(resource, model, params=None, index=None):
params['foo'] = 'bar'
crp_mock.side_effect = side_effect
@ -272,5 +272,5 @@ class TestBatchActionCall(BaseTestCase):
# Here the call is made with params={}, but they are edited
# in-place so we need to compare to the final edited value.
crp_mock.assert_called_with(item, model.request,
params={'foo': 'bar'})
params={'foo': 'bar'}, index=0)
client.get_frobs.assert_called_with(foo='bar')

View file

@ -80,8 +80,14 @@ class TestCollectionFactory(BaseTestCase):
'test.Chain.FrobsCollectionManager')
self.assertIsInstance(collection, CollectionManager)
# Make sure that collection manager created from the factory
# returns a ResourceCollection.
self.assertIsInstance(collection.all(), ResourceCollection)
# Make sure that the collection returned from the collection
# manager can be chained and return a ResourceCollection as well.
self.assertIsInstance(collection.all().all(), ResourceCollection)
@mock.patch('boto3.resources.collection.BatchAction')
def test_create_batch_actions(self, action_mock):
resource_defs = {

View file

@ -411,7 +411,7 @@ class TestResourceFactory(BaseTestResourceFactory):
# Accessing a property should call load
self.assertEqual(resource.e_tag, 'tag',
'ETag property returned wrong value')
action.assert_called_once()
self.assertEqual(action.call_count, 1)
# Both params should have been loaded into the data bag
self.assertIn('ETag', resource.meta.data)
@ -421,7 +421,7 @@ class TestResourceFactory(BaseTestResourceFactory):
# instead of making a second call.
self.assertEqual(resource.last_modified, 'never',
'LastModified property returned wrong value')
action.assert_called_once()
self.assertEqual(action.call_count, 1)
@mock.patch('boto3.resources.factory.ServiceAction')
def test_resource_lazy_properties_missing_load(self, action_cls):
@ -456,6 +456,35 @@ class TestResourceFactory(BaseTestResourceFactory):
with self.assertRaises(ResourceLoadException):
resource.last_modified
@mock.patch('boto3.resources.factory.ServiceAction')
def test_resource_aliases_identifiers(self, action_cls):
model = {
'shape': 'TestShape',
'identifiers': [
{'name': 'id', 'memberName': 'foo_id'}
]
}
shape = DenormalizedStructureBuilder().with_members({
'foo_id': {
'type': 'string',
},
'bar': {
'type': 'string'
},
}).build_model()
service_model = mock.Mock()
service_model.shape_for.return_value = shape
shape_id = 'baz'
resource = self.load(
'test', model, service_model=service_model)(shape_id)
try:
self.assertEqual(resource.id, shape_id)
self.assertEqual(resource.foo_id, shape_id)
except ResourceLoadException:
self.fail("Load attempted on identifier alias.")
def test_resource_loads_references(self):
model = {
'shape': 'InstanceShape',
@ -827,6 +856,13 @@ class TestServiceResourceSubresources(BaseTestResourceFactory):
self.assertIn('PriorityQueue', dir(resource))
self.assertIn('Message', dir(resource))
def test_get_available_subresources(self):
resource = self.load('test', self.model, self.defs)()
self.assertTrue(hasattr(resource, 'get_available_subresources'))
subresources = sorted(resource.get_available_subresources())
expected = sorted(['PriorityQueue', 'Message', 'QueueObject'])
self.assertEqual(subresources, expected)
def test_subresource_missing_all_subresources(self):
resource = self.load('test', self.model, self.defs)()
message = resource.Message('url', 'handle')
@ -846,8 +882,9 @@ class TestServiceResourceSubresources(BaseTestResourceFactory):
# Verify we send out the class attributes dict.
actual_class_attrs = sorted(call_args[1]['class_attributes'])
self.assertEqual(actual_class_attrs,
['Message', 'PriorityQueue', 'QueueObject', 'meta'])
self.assertEqual(actual_class_attrs, [
'Message', 'PriorityQueue', 'QueueObject',
'get_available_subresources', 'meta'])
base_classes = sorted(call_args[1]['base_classes'])
self.assertEqual(base_classes, [ServiceResource])

View file

@ -34,12 +34,13 @@ class TestModels(BaseTestCase):
model = ResourceModel('test', {
'identifiers': [
{'name': 'one'},
{'name': 'two'}
{'name': 'two', 'memberName': 'three'}
]
}, {})
self.assertEqual(model.identifiers[0].name, 'one')
self.assertEqual(model.identifiers[1].name, 'two')
self.assertEqual(model.identifiers[1].member_name, 'three')
def test_resource_action_raw(self):
model = ResourceModel('test', {

View file

@ -269,3 +269,18 @@ class TestStructBuilder(BaseTestCase):
build_param_structure(params, 'foo[]', 456)
self.assertEqual(params['foo'], [123, 456])
def test_provided_index_with_wildcard(self):
params = {}
index = 0
build_param_structure(params, 'foo[*].bar', 123, index)
build_param_structure(params, 'foo[*].baz', 456, index)
self.assertEqual(params['foo'][index], {'bar': 123, 'baz': 456})
index = 1
build_param_structure(params, 'foo[*].bar', 789, index)
build_param_structure(params, 'foo[*].baz', 123, index)
self.assertEqual(params['foo'], [
{'bar': 123, 'baz': 456},
{'bar': 789, 'baz': 123}
])

View file

@ -111,3 +111,24 @@ class TestObjectTransferMethods(unittest.TestCase):
self.obj.meta.client.download_file.assert_called_with(
Bucket=self.obj.bucket_name, Key=self.obj.key, Filename='foo',
ExtraArgs=None, Callback=None, Config=None)
class TestObejctSummaryLoad(unittest.TestCase):
def setUp(self):
self.client = mock.Mock()
self.resource = mock.Mock()
self.resource.meta.client = self.client
self.head_object_response = {
'ContentLength': 5, 'ETag': 'my-etag'
}
self.client.head_object.return_value = self.head_object_response
def test_object_summary_load(self):
inject.object_summary_load(self.resource)
self.assertEqual(
self.resource.meta.data, {'Size': 5, 'ETag': 'my-etag'})
def test_can_handle_missing_content_length(self):
self.head_object_response.pop('ContentLength')
inject.object_summary_load(self.resource)
self.assertEqual(self.resource.meta.data, {'ETag': 'my-etag'})

View file

@ -18,6 +18,8 @@ from tests import unittest
from contextlib import closing
import mock
from botocore.stub import Stubber
from botocore.session import Session
from botocore.vendored import six
from concurrent import futures
@ -184,40 +186,6 @@ class TestReadFileChunk(unittest.TestCase):
chunk.seek(0)
self.assertEqual(chunk.tell(), 0)
def test_callback_is_invoked_on_read(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'abc')
amounts_seen = []
def callback(amount):
amounts_seen.append(amount)
chunk = ReadFileChunk.from_filename(
filename, start_byte=0, chunk_size=3, callback=callback)
chunk.read(1)
chunk.read(1)
chunk.read(1)
self.assertEqual(amounts_seen, [1, 1, 1])
def test_callback_can_be_disabled(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
f.write(b'abc')
callback_calls = []
def callback(amount):
callback_calls.append(amount)
chunk = ReadFileChunk.from_filename(
filename, start_byte=0, chunk_size=3, callback=callback)
chunk.disable_callback()
# Now reading from the ReadFileChunk should not invoke
# the callback.
chunk.read()
self.assertEqual(callback_calls, [])
def test_file_chunk_supports_context_manager(self):
filename = os.path.join(self.tempdir, 'foo')
with open(filename, 'wb') as f:
@ -238,6 +206,45 @@ class TestReadFileChunk(unittest.TestCase):
self.assertEqual(list(chunk), [])
class TestReadFileChunkWithCallback(TestReadFileChunk):
def setUp(self):
super(TestReadFileChunkWithCallback, self).setUp()
self.filename = os.path.join(self.tempdir, 'foo')
with open(self.filename, 'wb') as f:
f.write(b'abc')
self.amounts_seen = []
def callback(self, amount):
self.amounts_seen.append(amount)
def test_callback_is_invoked_on_read(self):
chunk = ReadFileChunk.from_filename(
self.filename, start_byte=0, chunk_size=3, callback=self.callback)
chunk.read(1)
chunk.read(1)
chunk.read(1)
self.assertEqual(self.amounts_seen, [1, 1, 1])
def test_callback_can_be_disabled(self):
chunk = ReadFileChunk.from_filename(
self.filename, start_byte=0, chunk_size=3, callback=self.callback)
chunk.disable_callback()
# Now reading from the ReadFileChunk should not invoke
# the callback.
chunk.read()
self.assertEqual(self.amounts_seen, [])
def test_callback_will_also_be_triggered_by_seek(self):
chunk = ReadFileChunk.from_filename(
self.filename, start_byte=0, chunk_size=3, callback=self.callback)
chunk.read(2)
chunk.seek(0)
chunk.read(2)
chunk.seek(1)
chunk.read(2)
self.assertEqual(self.amounts_seen, [2, -2, 2, -1, 2])
class TestStreamReaderProgress(unittest.TestCase):
def test_proxies_to_wrapped_stream(self):
@ -386,6 +393,23 @@ class TestMultipartDownloader(unittest.TestCase):
mock.call(Range='bytes=4-7', **extra),
mock.call(Range='bytes=8-', **extra)])
def test_multipart_download_with_multiple_parts_and_extra_args(self):
client = Session().create_client('s3')
stubber = Stubber(client)
response_body = b'foobarbaz'
response = {'Body': six.BytesIO(response_body)}
expected_params = {
'Range': mock.ANY, 'Bucket': mock.ANY, 'Key': mock.ANY,
'RequestPayer': 'requester'}
stubber.add_response('get_object', response, expected_params)
stubber.activate()
downloader = MultipartDownloader(
client, TransferConfig(), InMemoryOSLayer({}), SequentialExecutor)
downloader.download_file(
'bucket', 'key', 'filename', len(response_body),
{'RequestPayer': 'requester'})
stubber.assert_no_pending_responses()
def test_retry_on_failures_from_stream_reads(self):
# If we get an exception during a call to the response body's .read()
# method, we should retry the request.
@ -393,7 +417,7 @@ class TestMultipartDownloader(unittest.TestCase):
response_body = b'foobarbaz'
stream_with_errors = mock.Mock()
stream_with_errors.read.side_effect = [
socket.error("fake error"),
socket.timeout("fake error"),
response_body
]
client.get_object.return_value = {'Body': stream_with_errors}
@ -424,7 +448,7 @@ class TestMultipartDownloader(unittest.TestCase):
client = mock.Mock()
response_body = b'foobarbaz'
stream_with_errors = mock.Mock()
stream_with_errors.read.side_effect = socket.error("fake error")
stream_with_errors.read.side_effect = socket.timeout("fake error")
client.get_object.return_value = {'Body': stream_with_errors}
config = TransferConfig(multipart_threshold=4,
multipart_chunksize=4)
@ -454,6 +478,22 @@ class TestMultipartDownloader(unittest.TestCase):
downloader.download_file('bucket', 'key', 'filename',
len(response_body), {})
def test_io_thread_fails_to_open_triggers_shutdown_error(self):
client = mock.Mock()
client.get_object.return_value = {
'Body': six.BytesIO(b'asdf')
}
os_layer = mock.Mock(spec=OSUtils)
os_layer.open.side_effect = IOError("Can't open file")
downloader = MultipartDownloader(
client, TransferConfig(),
os_layer, SequentialExecutor)
# We're verifying that the exception raised from the IO future
# propogates back up via download_file().
with self.assertRaisesRegexp(IOError, "Can't open file"):
downloader.download_file('bucket', 'key', 'filename',
len(b'asdf'), {})
def test_download_futures_fail_triggers_shutdown(self):
class FailedDownloadParts(SequentialExecutor):
def __init__(self, max_workers):
@ -614,7 +654,7 @@ class TestS3Transfer(unittest.TestCase):
'ContentLength': below_threshold}
self.client.get_object.side_effect = [
# First request fails.
socket.error("fake error"),
socket.timeout("fake error"),
# Second succeeds.
{'Body': six.BytesIO(b'foobar')}
]
@ -631,7 +671,7 @@ class TestS3Transfer(unittest.TestCase):
# Here we're raising an exception every single time, which
# will exhaust our retry count and propogate a
# RetriesExceededError.
self.client.get_object.side_effect = socket.error("fake error")
self.client.get_object.side_effect = socket.timeout("fake error")
with self.assertRaises(RetriesExceededError):
transfer.download_file('bucket', 'key', 'smallfile')

Some files were not shown because too many files have changed in this diff Show more