diff --git a/PKG-INFO b/PKG-INFO index 9f6e757e..ca8a9bf4 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.5.78 +Version: 1.5.80 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index 9f6e757e..ca8a9bf4 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.5.78 +Version: 1.5.80 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/SOURCES.txt b/botocore.egg-info/SOURCES.txt index 42a620d6..b6d8d523 100644 --- a/botocore.egg-info/SOURCES.txt +++ b/botocore.egg-info/SOURCES.txt @@ -172,6 +172,7 @@ botocore/data/dms/2016-01-01/examples-1.json botocore/data/dms/2016-01-01/paginators-1.json botocore/data/dms/2016-01-01/service-2.json botocore/data/ds/2015-04-16/examples-1.json +botocore/data/ds/2015-04-16/paginators-1.json botocore/data/ds/2015-04-16/service-2.json botocore/data/dynamodb/2012-08-10/examples-1.json botocore/data/dynamodb/2012-08-10/paginators-1.json @@ -604,6 +605,7 @@ tests/functional/test_regions.py tests/functional/test_s3.py tests/functional/test_session.py tests/functional/test_six_imports.py +tests/functional/test_six_threading.py tests/functional/test_stub.py tests/functional/test_waiter_config.py tests/functional/docs/__init__.py diff --git a/botocore/__init__.py b/botocore/__init__.py index 3d741122..d07d01c9 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.5.78' +__version__ = '1.5.80' class NullHandler(logging.Handler): diff --git a/botocore/args.py b/botocore/args.py index 9e69a538..49af1c7b 100644 --- a/botocore/args.py +++ b/botocore/args.py @@ -75,6 +75,7 @@ class ClientArgsCreator(object): endpoint_url=endpoint_config['endpoint_url'], verify=verify, response_parser_factory=self._response_parser_factory, max_pool_connections=new_config.max_pool_connections, + proxies=new_config.proxies, timeout=(new_config.connect_timeout, new_config.read_timeout)) serializer = botocore.serialize.create_serializer( @@ -129,6 +130,7 @@ class ClientArgsCreator(object): connect_timeout=client_config.connect_timeout, read_timeout=client_config.read_timeout, max_pool_connections=client_config.max_pool_connections, + proxies=client_config.proxies, ) s3_config = self.compute_s3_config(scoped_config, client_config) diff --git a/botocore/auth.py b/botocore/auth.py index 74785804..8cc73378 100644 --- a/botocore/auth.py +++ b/botocore/auth.py @@ -110,7 +110,7 @@ class SigV2Auth(BaseSigner): params = request.data else: # GET - params = request.param + params = request.params params['AWSAccessKeyId'] = self.credentials.access_key params['SignatureVersion'] = '2' params['SignatureMethod'] = 'HmacSHA256' diff --git a/botocore/compat.py b/botocore/compat.py index d1d45eeb..0180df4e 100644 --- a/botocore/compat.py +++ b/botocore/compat.py @@ -165,7 +165,7 @@ if sys.version_info[:2] == (2, 6): 'ignore', message="Certificate has no.*subjectAltName.*", category=exceptions.SecurityWarning, - module=".*urllib3\.connection") + module=r".*urllib3\.connection") else: import xml.etree.cElementTree XMLParseError = xml.etree.cElementTree.ParseError @@ -183,7 +183,7 @@ def filter_ssl_warnings(): 'ignore', message="A true SSLContext object is not available.*", category=exceptions.InsecurePlatformWarning, - module=".*urllib3\.util\.ssl_") + module=r".*urllib3\.util\.ssl_") filter_ssl_san_warnings() diff --git a/botocore/config.py b/botocore/config.py index 8c136aa7..b318941e 100644 --- a/botocore/config.py +++ b/botocore/config.py @@ -54,6 +54,12 @@ class Config(object): keep in a connection pool. If this value is not set, the default value of 10 is used. + :type proxies: dict + :param proxies: A dictionary of proxy servers to use by protocol or + endpoint, e.g.: + {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. + The proxies are used on each request. + :type s3: dict :param s3: A dictionary of s3 specific configurations. Valid keys are: @@ -92,6 +98,7 @@ class Config(object): ('read_timeout', DEFAULT_TIMEOUT), ('parameter_validation', True), ('max_pool_connections', MAX_POOL_CONNECTIONS), + ('proxies', None), ('s3', None) ]) diff --git a/botocore/data/cloudwatch/2010-08-01/service-2.json b/botocore/data/cloudwatch/2010-08-01/service-2.json index 822981db..3bf15427 100644 --- a/botocore/data/cloudwatch/2010-08-01/service-2.json +++ b/botocore/data/cloudwatch/2010-08-01/service-2.json @@ -23,6 +23,24 @@ ], "documentation":"

Deletes the specified alarms. In the event of an error, no alarms are deleted.

" }, + "DeleteDashboards":{ + "name":"DeleteDashboards", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteDashboardsInput"}, + "output":{ + "shape":"DeleteDashboardsOutput", + "resultWrapper":"DeleteDashboardsResult" + }, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"DashboardNotFoundError"}, + {"shape":"InternalServiceFault"} + ], + "documentation":"

Deletes all dashboards that you specify. You may specify up to 100 dashboards to delete. If there is an error during this call, no dashboards are deleted.

" + }, "DescribeAlarmHistory":{ "name":"DescribeAlarmHistory", "http":{ @@ -37,7 +55,7 @@ "errors":[ {"shape":"InvalidNextToken"} ], - "documentation":"

Retrieves the history for the specified alarm. You can filter the results by date range or item type. If an alarm name is not specified, the histories for all alarms are returned.

Note that Amazon CloudWatch retains the history of an alarm even if you delete the alarm.

" + "documentation":"

Retrieves the history for the specified alarm. You can filter the results by date range or item type. If an alarm name is not specified, the histories for all alarms are returned.

CloudWatch retains the history of an alarm even if you delete the alarm.

" }, "DescribeAlarms":{ "name":"DescribeAlarms", @@ -66,7 +84,7 @@ "shape":"DescribeAlarmsForMetricOutput", "resultWrapper":"DescribeAlarmsForMetricResult" }, - "documentation":"

Retrieves the alarms for the specified metric. Specify a statistic, period, or unit to filter the results.

" + "documentation":"

Retrieves the alarms for the specified metric. To filter the results, specify a statistic, period, or unit.

" }, "DisableAlarmActions":{ "name":"DisableAlarmActions", @@ -86,6 +104,24 @@ "input":{"shape":"EnableAlarmActionsInput"}, "documentation":"

Enables the actions for the specified alarms.

" }, + "GetDashboard":{ + "name":"GetDashboard", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetDashboardInput"}, + "output":{ + "shape":"GetDashboardOutput", + "resultWrapper":"GetDashboardResult" + }, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"DashboardNotFoundError"}, + {"shape":"InternalServiceFault"} + ], + "documentation":"

Displays the details of the dashboard that you specify.

To copy an existing dashboard, use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard to create the copy.

" + }, "GetMetricStatistics":{ "name":"GetMetricStatistics", "http":{ @@ -103,7 +139,24 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Gets statistics for the specified metric.

Amazon CloudWatch retains metric data as follows:

Note that CloudWatch started retaining 5-minute and 1-hour metric data as of 9 July 2016.

The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, Amazon CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. A period can be as short as one minute (60 seconds). Note that data points are not returned in chronological order.

Amazon CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, Amazon CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you cannot retrieve percentile statistics for this data unless one of the following conditions is true:

For a list of metrics and dimensions supported by AWS services, see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide.

" + "documentation":"

Gets statistics for the specified metric.

Amazon CloudWatch retains metric data as follows:

CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016.

The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. A period can be as short as one minute (60 seconds). Data points are not returned in chronological order.

CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

For a list of metrics and dimensions supported by AWS services, see the Amazon CloudWatch Metrics and Dimensions Reference in the Amazon CloudWatch User Guide.

" + }, + "ListDashboards":{ + "name":"ListDashboards", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListDashboardsInput"}, + "output":{ + "shape":"ListDashboardsOutput", + "resultWrapper":"ListDashboardsResult" + }, + "errors":[ + {"shape":"InvalidParameterValueException"}, + {"shape":"InternalServiceFault"} + ], + "documentation":"

Returns a list of the dashboards for your account. If you include DashboardNamePrefix, only those dashboards with names starting with the prefix are listed. Otherwise, all dashboards in your account are listed.

" }, "ListMetrics":{ "name":"ListMetrics", @@ -122,6 +175,23 @@ ], "documentation":"

List the specified metrics. You can use the returned metrics with GetMetricStatistics to obtain statistical data.

Up to 500 results are returned for any one call. To retrieve additional results, use the returned token with subsequent calls.

After you create a metric, allow up to fifteen minutes before the metric appears. Statistics about the metric, however, are available sooner using GetMetricStatistics.

" }, + "PutDashboard":{ + "name":"PutDashboard", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutDashboardInput"}, + "output":{ + "shape":"PutDashboardOutput", + "resultWrapper":"PutDashboardResult" + }, + "errors":[ + {"shape":"DashboardInvalidInputError"}, + {"shape":"InternalServiceFault"} + ], + "documentation":"

Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.

You can have up to 500 dashboards per account. All dashboards in your account are global, not region-specific.

To copy an existing dashboard, use GetDashboard, and then use the data returned within DashboardBody as the template for the new dashboard when you call PutDashboard to create the copy.

" + }, "PutMetricAlarm":{ "name":"PutMetricAlarm", "http":{ @@ -132,7 +202,7 @@ "errors":[ {"shape":"LimitExceededFault"} ], - "documentation":"

Creates or updates an alarm and associates it with the specified metric. Optionally, this operation can associate one or more Amazon SNS resources with the alarm.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is evaluated and its state is set appropriately. Any actions associated with the state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an AWS Identity and Access Management (IAM) user, you must have Amazon EC2 permissions for some operations:

If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions won't be performed. However, if you are later granted the required permissions, the alarm actions that you created earlier will be performed.

If you are using an IAM role (for example, an Amazon EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.

If you are using temporary security credentials granted using the AWS Security Token Service (AWS STS), you cannot stop or terminate an Amazon EC2 instance using alarm actions.

Note that you must create at least one stop, terminate, or reboot alarm using the Amazon EC2 or CloudWatch console to create the EC2ActionsAccess IAM role. After this IAM role is created, you can create stop, terminate, or reboot alarms using a command-line interface or an API.

" + "documentation":"

Creates or updates an alarm and associates it with the specified metric. Optionally, this operation can associate one or more Amazon SNS resources with the alarm.

When this operation creates an alarm, the alarm state is immediately set to INSUFFICIENT_DATA. The alarm is evaluated and its state is set appropriately. Any actions associated with the state are then executed.

When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.

If you are an IAM user, you must have Amazon EC2 permissions for some operations:

If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions are not performed. However, if you are later granted the required permissions, the alarm actions that you created earlier are performed.

If you are using an IAM role (for example, an EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.

If you are using temporary security credentials granted using AWS STS, you cannot stop or terminate an EC2 instance using alarm actions.

You must create at least one stop, terminate, or reboot alarm using either the Amazon EC2 or CloudWatch consoles to create the EC2ActionsAccess IAM role. After this IAM role is created, you can create stop, terminate, or reboot alarms using a command-line interface or API.

" }, "PutMetricData":{ "name":"PutMetricData", @@ -147,7 +217,7 @@ {"shape":"InvalidParameterCombinationException"}, {"shape":"InternalServiceFault"} ], - "documentation":"

Publishes metric data points to Amazon CloudWatch. Amazon CloudWatch associates the data points with the specified metric. If the specified metric does not exist, Amazon CloudWatch creates the metric. When Amazon CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

Each PutMetricData request is limited to 40 KB in size for HTTP POST requests.

Although the Value parameter accepts numbers of type Double, Amazon CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (e.g., NaN, +Infinity, -Infinity) are not supported.

You can use up to 10 dimensions per metric to further clarify what data the metric collects. For more information on specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricStatistics from the time they are submitted.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you cannot retrieve percentile statistics for this data unless one of the following conditions is true:

" + "documentation":"

Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to ListMetrics.

Each PutMetricData request is limited to 40 KB in size for HTTP POST requests.

Although the Value parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

You can use up to 10 dimensions per metric to further clarify what data the metric collects. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for GetMetricStatistics from the time they are submitted.

CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:

" }, "SetAlarmState":{ "name":"SetAlarmState", @@ -160,7 +230,7 @@ {"shape":"ResourceNotFound"}, {"shape":"InvalidFormatFault"} ], - "documentation":"

Temporarily sets the state of an alarm for testing purposes. When the updated state differs from the previous value, the action configured for the appropriate state is invoked. For example, if your alarm is configured to send an Amazon SNS message when an alarm is triggered, temporarily changing the alarm state to ALARM sends an Amazon SNS message. The alarm returns to its actual state (often within seconds). Because the alarm state change happens very quickly, it is typically only visible in the alarm's History tab in the Amazon CloudWatch console or through DescribeAlarmHistory.

" + "documentation":"

Temporarily sets the state of an alarm for testing purposes. When the updated state differs from the previous value, the action configured for the appropriate state is invoked. For example, if your alarm is configured to send an Amazon SNS message when an alarm is triggered, temporarily changing the alarm state to ALARM sends an SNS message. The alarm returns to its actual state (often within seconds). Because the alarm state change happens quickly, it is typically only visible in the alarm's History tab in the Amazon CloudWatch console or through DescribeAlarmHistory.

" } }, "shapes":{ @@ -235,6 +305,87 @@ "LessThanOrEqualToThreshold" ] }, + "DashboardArn":{"type":"string"}, + "DashboardBody":{"type":"string"}, + "DashboardEntries":{ + "type":"list", + "member":{"shape":"DashboardEntry"} + }, + "DashboardEntry":{ + "type":"structure", + "members":{ + "DashboardName":{ + "shape":"DashboardName", + "documentation":"

The name of the dashboard.

" + }, + "DashboardArn":{ + "shape":"DashboardArn", + "documentation":"

The Amazon Resource Name (ARN) of the dashboard.

" + }, + "LastModified":{ + "shape":"LastModified", + "documentation":"

The time stamp of when the dashboard was last modified, either by an API call or through the console. This number is expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.

" + }, + "Size":{ + "shape":"Size", + "documentation":"

The size of the dashboard, in bytes.

" + } + }, + "documentation":"

Represents a specific dashboard.

" + }, + "DashboardErrorMessage":{"type":"string"}, + "DashboardInvalidInputError":{ + "type":"structure", + "members":{ + "message":{"shape":"DashboardErrorMessage"}, + "dashboardValidationMessages":{"shape":"DashboardValidationMessages"} + }, + "documentation":"

Some part of the dashboard data is invalid.

", + "error":{ + "code":"InvalidParameterInput", + "httpStatusCode":400, + "senderFault":true + }, + "exception":true + }, + "DashboardName":{"type":"string"}, + "DashboardNamePrefix":{"type":"string"}, + "DashboardNames":{ + "type":"list", + "member":{"shape":"DashboardName"} + }, + "DashboardNotFoundError":{ + "type":"structure", + "members":{ + "message":{"shape":"DashboardErrorMessage"} + }, + "documentation":"

The specified dashboard does not exist.

", + "error":{ + "code":"ResourceNotFound", + "httpStatusCode":404, + "senderFault":true + }, + "exception":true + }, + "DashboardValidationMessage":{ + "type":"structure", + "members":{ + "DataPath":{ + "shape":"DataPath", + "documentation":"

The data path related to the message.

" + }, + "Message":{ + "shape":"Message", + "documentation":"

A message describing the error or warning.

" + } + }, + "documentation":"

An error or warning for the operation.

" + }, + "DashboardValidationMessages":{ + "type":"list", + "member":{"shape":"DashboardValidationMessage"} + }, + "DataPath":{"type":"string"}, "Datapoint":{ "type":"structure", "members":{ @@ -271,7 +422,7 @@ "documentation":"

The percentile statistic for the data point.

" } }, - "documentation":"

Encapsulates the statistical data that Amazon CloudWatch computes from metric data.

", + "documentation":"

Encapsulates the statistical data that CloudWatch computes from metric data.

", "xmlOrder":[ "Timestamp", "SampleCount", @@ -303,6 +454,20 @@ } } }, + "DeleteDashboardsInput":{ + "type":"structure", + "members":{ + "DashboardNames":{ + "shape":"DashboardNames", + "documentation":"

The dashboards to be deleted.

" + } + } + }, + "DeleteDashboardsOutput":{ + "type":"structure", + "members":{ + } + }, "DescribeAlarmHistoryInput":{ "type":"structure", "members":{ @@ -400,7 +565,7 @@ }, "AlarmNamePrefix":{ "shape":"AlarmNamePrefix", - "documentation":"

The alarm name prefix. You cannot specify AlarmNames if this parameter is specified.

" + "documentation":"

The alarm name prefix. If this parameter is specified, you cannot specify AlarmNames.

" }, "StateValue":{ "shape":"StateValue", @@ -535,6 +700,32 @@ "min":1 }, "FaultDescription":{"type":"string"}, + "GetDashboardInput":{ + "type":"structure", + "members":{ + "DashboardName":{ + "shape":"DashboardName", + "documentation":"

The name of the dashboard to be described.

" + } + } + }, + "GetDashboardOutput":{ + "type":"structure", + "members":{ + "DashboardArn":{ + "shape":"DashboardArn", + "documentation":"

The Amazon Resource Name (ARN) of the dashboard.

" + }, + "DashboardBody":{ + "shape":"DashboardBody", + "documentation":"

The detailed information about the dashboard, including what widgets are included and their location on the dashboard. For more information about the DashboardBody syntax, see CloudWatch-Dashboard-Body-Structure.

" + }, + "DashboardName":{ + "shape":"DashboardName", + "documentation":"

The name of the dashboard.

" + } + } + }, "GetMetricStatisticsInput":{ "type":"structure", "required":[ @@ -555,27 +746,27 @@ }, "Dimensions":{ "shape":"Dimensions", - "documentation":"

The dimensions. If the metric contains multiple dimensions, you must include a value for each dimension. CloudWatch treats each unique combination of dimensions as a separate metric. You can't retrieve statistics using combinations of dimensions that were not specially published. You must specify the same dimensions that were used when the metrics were created. For an example, see Dimension Combinations in the Amazon CloudWatch User Guide. For more information on specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

" + "documentation":"

The dimensions. If the metric contains multiple dimensions, you must include a value for each dimension. CloudWatch treats each unique combination of dimensions as a separate metric. If a specific combination of dimensions was not published, you can't retrieve statistics for it. You must specify the same dimensions that were used when the metrics were created. For an example, see Dimension Combinations in the Amazon CloudWatch User Guide. For more information about specifying dimensions, see Publishing Metrics in the Amazon CloudWatch User Guide.

" }, "StartTime":{ "shape":"Timestamp", - "documentation":"

The time stamp that determines the first data point to return. Note that start times are evaluated relative to the time that CloudWatch receives the request.

The value specified is inclusive; results include data points with the specified time stamp. The time stamp must be in ISO 8601 UTC format (for example, 2016-10-03T23:00:00Z).

CloudWatch rounds the specified time stamp as follows:

" + "documentation":"

The time stamp that determines the first data point to return. Start times are evaluated relative to the time that CloudWatch receives the request.

The value specified is inclusive; results include data points with the specified time stamp. The time stamp must be in ISO 8601 UTC format (for example, 2016-10-03T23:00:00Z).

CloudWatch rounds the specified time stamp as follows:

" }, "EndTime":{ "shape":"Timestamp", - "documentation":"

The time stamp that determines the last data point to return.

The value specified is exclusive; results will include data points up to the specified time stamp. The time stamp must be in ISO 8601 UTC format (for example, 2016-10-10T23:00:00Z).

" + "documentation":"

The time stamp that determines the last data point to return.

The value specified is exclusive; results include data points up to the specified time stamp. The time stamp must be in ISO 8601 UTC format (for example, 2016-10-10T23:00:00Z).

" }, "Period":{ "shape":"Period", - "documentation":"

The granularity, in seconds, of the returned data points. A period can be as short as one minute (60 seconds) and must be a multiple of 60. The default value is 60.

If the StartTime parameter specifies a time stamp that is greater than 15 days ago, you must specify the period as follows or no data points in that time range is returned:

" + "documentation":"

The granularity, in seconds, of the returned data points. A period can be as short as one minute (60 seconds) and must be a multiple of 60.

If the StartTime parameter specifies a time stamp that is greater than 15 days ago, you must specify the period as follows or no data points in that time range is returned:

" }, "Statistics":{ "shape":"Statistics", - "documentation":"

The metric statistics, other than percentile. For percentile statistics, use ExtendedStatistic.

" + "documentation":"

The metric statistics, other than percentile. For percentile statistics, use ExtendedStatistics. When calling GetMetricStatistics, you must specify either Statistics or ExtendedStatistics, but not both.

" }, "ExtendedStatistics":{ "shape":"ExtendedStatistics", - "documentation":"

The percentile statistics. Specify values between p0.0 and p100.

" + "documentation":"

The percentile statistics. Specify values between p0.0 and p100. When calling GetMetricStatistics, you must specify either Statistics or ExtendedStatistics, but not both.

" }, "Unit":{ "shape":"StandardUnit", @@ -670,7 +861,7 @@ "documentation":"

" } }, - "documentation":"

Parameters that cannot be used together were used together.

", + "documentation":"

Parameters were used together that cannot be used together.

", "error":{ "code":"InvalidParameterCombination", "httpStatusCode":400, @@ -694,6 +885,7 @@ }, "exception":true }, + "LastModified":{"type":"timestamp"}, "LimitExceededFault":{ "type":"structure", "members":{ @@ -710,6 +902,32 @@ }, "exception":true }, + "ListDashboardsInput":{ + "type":"structure", + "members":{ + "DashboardNamePrefix":{ + "shape":"DashboardNamePrefix", + "documentation":"

If you specify this parameter, only the dashboards with names starting with the specified string are listed. The maximum length is 255, and valid characters are A-Z, a-z, 0-9, \".\", \"-\", and \"_\".

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token returned by a previous call to indicate that there is more data available.

" + } + } + }, + "ListDashboardsOutput":{ + "type":"structure", + "members":{ + "DashboardEntries":{ + "shape":"DashboardEntries", + "documentation":"

The list of matching dashboards.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token that marks the start of the next batch of returned results.

" + } + } + }, "ListMetricsInput":{ "type":"structure", "members":{ @@ -753,6 +971,7 @@ "max":100, "min":1 }, + "Message":{"type":"string"}, "Metric":{ "type":"structure", "members":{ @@ -867,8 +1086,14 @@ "shape":"ComparisonOperator", "documentation":"

The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand.

" }, - "TreatMissingData":{"shape":"TreatMissingData"}, - "EvaluateLowSampleCountPercentile":{"shape":"EvaluateLowSampleCountPercentile"} + "TreatMissingData":{ + "shape":"TreatMissingData", + "documentation":"

Sets how this alarm is to handle missing data points. If this parameter is omitted, the default behavior of missing is used.

" + }, + "EvaluateLowSampleCountPercentile":{ + "shape":"EvaluateLowSampleCountPercentile", + "documentation":"

Used only for alarms based on percentiles. If ignore, the alarm state does not change during periods with too few data points to be statistically significant. If evaluate or this parameter is not used, the alarm will always be evaluated and possibly change state no matter how many data points are available.

" + } }, "documentation":"

Represents an alarm.

", "xmlOrder":[ @@ -924,7 +1149,7 @@ }, "Value":{ "shape":"DatapointValue", - "documentation":"

The value for the metric.

Although the parameter accepts numbers of type Double, Amazon CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

" + "documentation":"

The value for the metric.

Although the parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.

" }, "StatisticValues":{ "shape":"StatisticSet", @@ -933,7 +1158,8 @@ "Unit":{ "shape":"StandardUnit", "documentation":"

The unit of the metric.

" - } + }, + "StorageResolution":{"shape":"StorageResolution"} }, "documentation":"

Encapsulates the information sent to either create a metric or add new values to be aggregated into an existing metric.

" }, @@ -976,7 +1202,29 @@ }, "Period":{ "type":"integer", - "min":60 + "min":1 + }, + "PutDashboardInput":{ + "type":"structure", + "members":{ + "DashboardName":{ + "shape":"DashboardName", + "documentation":"

The name of the dashboard. If a dashboard with this name already exists, this call modifies that dashboard, replacing its current contents. Otherwise, a new dashboard is created. The maximum length is 255, and valid characters are A-Z, a-z, 0-9, \".\", \"-\", and \"_\".

" + }, + "DashboardBody":{ + "shape":"DashboardBody", + "documentation":"

The detailed information about the dashboard in JSON format, including the widgets to include and their location on the dashboard.

For more information about the syntax, see CloudWatch-Dashboard-Body-Structure.

" + } + } + }, + "PutDashboardOutput":{ + "type":"structure", + "members":{ + "DashboardValidationMessages":{ + "shape":"DashboardValidationMessages", + "documentation":"

If the input for PutDashboard was correct and the dashboard was successfully created or modified, this result is empty.

If this result includes only warning messages, then the input was valid enough for the dashboard to be created or modified, but some elements of the dashboard may not render.

If this result includes error messages, the input was not valid and the operation failed.

" + } + } }, "PutMetricAlarmInput":{ "type":"structure", @@ -1036,15 +1284,15 @@ }, "Period":{ "shape":"Period", - "documentation":"

The period, in seconds, over which the specified statistic is applied.

" + "documentation":"

The period, in seconds, over which the specified statistic is applied. An alarm's total current evaluation period can be no longer than one day, so this number multiplied by EvaluationPeriods must be 86,400 or less.

" }, "Unit":{ "shape":"StandardUnit", - "documentation":"

The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately.

If you specify a unit, you must use a unit that is appropriate for the metric. Otherwise, the Amazon CloudWatch alarm can get stuck in the INSUFFICIENT DATA state.

" + "documentation":"

The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately.

If you specify a unit, you must use a unit that is appropriate for the metric. Otherwise, the CloudWatch alarm can get stuck in the INSUFFICIENT DATA state.

" }, "EvaluationPeriods":{ "shape":"EvaluationPeriods", - "documentation":"

The number of periods over which data is compared to the specified threshold.

" + "documentation":"

The number of periods over which data is compared to the specified threshold. An alarm's total current evaluation period can be no longer than one day, so this number multiplied by Period must be 86,400 or less.

" }, "Threshold":{ "shape":"Threshold", @@ -1060,7 +1308,7 @@ }, "EvaluateLowSampleCountPercentile":{ "shape":"EvaluateLowSampleCountPercentile", - "documentation":"

Used only for alarms based on percentiles. If you specify ignore, the alarm state will not change during periods with too few data points to be statistically significant. If you specify evaluate or omit this parameter, the alarm will always be evaluated and possibly change state no matter how many data points are available. For more information, see Percentile-Based CloudWatch Alarms and Low Data Samples.

Valid Values: evaluate | ignore

" + "documentation":"

Used only for alarms based on percentiles. If you specify ignore, the alarm state does not change during periods with too few data points to be statistically significant. If you specify evaluate or omit this parameter, the alarm is always evaluated and possibly changes state no matter how many data points are available. For more information, see Percentile-Based CloudWatch Alarms and Low Data Samples.

Valid Values: evaluate | ignore

" } } }, @@ -1133,6 +1381,7 @@ } } }, + "Size":{"type":"long"}, "StandardUnit":{ "type":"string", "enum":[ @@ -1227,6 +1476,10 @@ "max":5, "min":1 }, + "StorageResolution":{ + "type":"integer", + "min":1 + }, "Threshold":{"type":"double"}, "Timestamp":{"type":"timestamp"}, "TreatMissingData":{ @@ -1235,5 +1488,5 @@ "min":1 } }, - "documentation":"

Amazon CloudWatch monitors your Amazon Web Services (AWS) resources and the applications you run on AWS in real-time. You can use CloudWatch to collect and track metrics, which are the variables you want to measure for your resources and applications.

CloudWatch alarms send notifications or automatically make changes to the resources you are monitoring based on rules that you define. For example, you can monitor the CPU usage and disk reads and writes of your Amazon Elastic Compute Cloud (Amazon EC2) instances and then use this data to determine whether you should launch additional instances to handle increased load. You can also use this data to stop under-used instances to save money.

In addition to monitoring the built-in metrics that come with AWS, you can monitor your own custom metrics. With CloudWatch, you gain system-wide visibility into resource utilization, application performance, and operational health.

" + "documentation":"

Amazon CloudWatch monitors your Amazon Web Services (AWS) resources and the applications you run on AWS in real time. You can use CloudWatch to collect and track metrics, which are the variables you want to measure for your resources and applications.

CloudWatch alarms send notifications or automatically change the resources you are monitoring based on rules that you define. For example, you can monitor the CPU usage and disk reads and writes of your Amazon EC2 instances. Then, use this data to determine whether you should launch additional instances to handle increased load. You can also use this data to stop under-used instances to save money.

In addition to monitoring the built-in metrics that come with AWS, you can monitor your own custom metrics. With CloudWatch, you gain system-wide visibility into resource utilization, application performance, and operational health.

" } diff --git a/botocore/data/ds/2015-04-16/paginators-1.json b/botocore/data/ds/2015-04-16/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/ds/2015-04-16/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/ds/2015-04-16/service-2.json b/botocore/data/ds/2015-04-16/service-2.json index 0a364af5..6446bcde 100644 --- a/botocore/data/ds/2015-04-16/service-2.json +++ b/botocore/data/ds/2015-04-16/service-2.json @@ -320,6 +320,24 @@ ], "documentation":"

Obtains information about the directories that belong to this account.

You can retrieve information about specific directories by passing the directory identifiers in the DirectoryIds parameter. Otherwise, all directories that belong to the current account are returned.

This operation supports pagination with the use of the NextToken request and response parameters. If more results are available, the DescribeDirectoriesResult.NextToken member contains a token that you pass in the next call to DescribeDirectories to retrieve the next set of items.

You can also specify a maximum number of return results with the Limit parameter.

" }, + "DescribeDomainControllers":{ + "name":"DescribeDomainControllers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeDomainControllersRequest"}, + "output":{"shape":"DescribeDomainControllersResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"InvalidParameterException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"}, + {"shape":"UnsupportedOperationException"} + ], + "documentation":"

Provides information about any domain controllers in your directory.

" + }, "DescribeEventTopics":{ "name":"DescribeEventTopics", "http":{ @@ -618,6 +636,25 @@ ], "documentation":"

Updates a conditional forwarder that has been set up for your AWS directory.

" }, + "UpdateNumberOfDomainControllers":{ + "name":"UpdateNumberOfDomainControllers", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"UpdateNumberOfDomainControllersRequest"}, + "output":{"shape":"UpdateNumberOfDomainControllersResult"}, + "errors":[ + {"shape":"EntityDoesNotExistException"}, + {"shape":"DirectoryUnavailableException"}, + {"shape":"DomainControllerLimitExceededException"}, + {"shape":"InvalidParameterException"}, + {"shape":"UnsupportedOperationException"}, + {"shape":"ClientException"}, + {"shape":"ServiceException"} + ], + "documentation":"

Adds or removes domain controllers to or from the directory. Based on the difference between current value and new value (provided through this API call), domain controllers will be added or removed. It may take up to 45 minutes for any new domain controllers to become fully active once the requested number of domain controllers is updated. During this time, you cannot make another update request.

" + }, "UpdateRadius":{ "name":"UpdateRadius", "http":{ @@ -1072,7 +1109,10 @@ "shape":"Description", "documentation":"

A textual description for the directory. This label will appear on the AWS console Directory Details page after the directory is created.

" }, - "VpcSettings":{"shape":"DirectoryVpcSettings"} + "VpcSettings":{ + "shape":"DirectoryVpcSettings", + "documentation":"

Contains VPC information for the CreateDirectory or CreateMicrosoftAD operation.

" + } }, "documentation":"

Creates a Microsoft AD in the AWS cloud.

" }, @@ -1332,6 +1372,41 @@ }, "documentation":"

Contains the results of the DescribeDirectories operation.

" }, + "DescribeDomainControllersRequest":{ + "type":"structure", + "required":["DirectoryId"], + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier of the directory for which to retrieve the domain controller information.

" + }, + "DomainControllerIds":{ + "shape":"DomainControllerIds", + "documentation":"

A list of identifiers for the domain controllers whose information will be provided.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The DescribeDomainControllers.NextToken value from a previous call to DescribeDomainControllers. Pass null if this is the first call.

" + }, + "Limit":{ + "shape":"Limit", + "documentation":"

The maximum number of items to return.

" + } + } + }, + "DescribeDomainControllersResult":{ + "type":"structure", + "members":{ + "DomainControllers":{ + "shape":"DomainControllers", + "documentation":"

List of the DomainController objects that were retrieved.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If not null, more results are available. Pass this value for the NextToken parameter in a subsequent call to DescribeDomainControllers retrieve the next set of items.

" + } + } + }, "DescribeEventTopicsRequest":{ "type":"structure", "members":{ @@ -1434,6 +1509,10 @@ "min":0, "pattern":"^([a-zA-Z0-9_])[\\\\a-zA-Z0-9_@#%*+=:?./!\\s-]*$" }, + "DesiredNumberOfDomainControllers":{ + "type":"integer", + "min":2 + }, "DirectoryConnectSettings":{ "type":"structure", "required":[ @@ -1566,6 +1645,10 @@ "SsoEnabled":{ "shape":"SsoEnabled", "documentation":"

Indicates if single-sign on is enabled for the directory. For more information, see EnableSso and DisableSso.

" + }, + "DesiredNumberOfDomainControllers":{ + "shape":"DesiredNumberOfDomainControllers", + "documentation":"

The desired number of domain controllers in the directory if the directory is Microsoft AD.

" } }, "documentation":"

Contains information about an AWS Directory Service directory.

" @@ -1769,6 +1852,86 @@ "type":"list", "member":{"shape":"IpAddr"} }, + "DomainController":{ + "type":"structure", + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier of the directory where the domain controller resides.

" + }, + "DomainControllerId":{ + "shape":"DomainControllerId", + "documentation":"

Identifies a specific domain controller in the directory.

" + }, + "DnsIpAddr":{ + "shape":"IpAddr", + "documentation":"

The IP address of the domain controller.

" + }, + "VpcId":{ + "shape":"VpcId", + "documentation":"

The identifier of the VPC that contains the domain controller.

" + }, + "SubnetId":{ + "shape":"SubnetId", + "documentation":"

Identifier of the subnet in the VPC that contains the domain controller.

" + }, + "AvailabilityZone":{ + "shape":"AvailabilityZone", + "documentation":"

The Availability Zone where the domain controller is located.

" + }, + "Status":{ + "shape":"DomainControllerStatus", + "documentation":"

The status of the domain controller.

" + }, + "StatusReason":{ + "shape":"DomainControllerStatusReason", + "documentation":"

A description of the domain controller state.

" + }, + "LaunchTime":{ + "shape":"LaunchTime", + "documentation":"

Specifies when the domain controller was created.

" + }, + "StatusLastUpdatedDateTime":{ + "shape":"LastUpdatedDateTime", + "documentation":"

The date and time that the status was last updated.

" + } + }, + "documentation":"

Contains information about the domain controllers for a specified directory.

" + }, + "DomainControllerId":{ + "type":"string", + "pattern":"^dc-[0-9a-f]{10}$" + }, + "DomainControllerIds":{ + "type":"list", + "member":{"shape":"DomainControllerId"} + }, + "DomainControllerLimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ExceptionMessage"}, + "RequestId":{"shape":"RequestId"} + }, + "documentation":"

The maximum allowed number of domain controllers per directory was exceeded. The default limit per directory is 20 domain controllers.

", + "exception":true + }, + "DomainControllerStatus":{ + "type":"string", + "enum":[ + "Creating", + "Active", + "Impaired", + "Restoring", + "Deleting", + "Deleted", + "Failed" + ] + }, + "DomainControllerStatusReason":{"type":"string"}, + "DomainControllers":{ + "type":"list", + "member":{"shape":"DomainController"} + }, "EnableRadiusRequest":{ "type":"structure", "required":[ @@ -2741,6 +2904,28 @@ }, "documentation":"

The result of an UpdateConditionalForwarder request.

" }, + "UpdateNumberOfDomainControllersRequest":{ + "type":"structure", + "required":[ + "DirectoryId", + "DesiredNumber" + ], + "members":{ + "DirectoryId":{ + "shape":"DirectoryId", + "documentation":"

Identifier of the directory to which the domain controllers will be added or removed.

" + }, + "DesiredNumber":{ + "shape":"DesiredNumberOfDomainControllers", + "documentation":"

The number of domain controllers desired in the directory.

" + } + } + }, + "UpdateNumberOfDomainControllersResult":{ + "type":"structure", + "members":{ + } + }, "UpdateRadiusRequest":{ "type":"structure", "required":[ diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 90700c1f..204f5b1d 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -131,6 +131,9 @@ }, "athena" : { "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, "us-west-2" : { } @@ -274,10 +277,13 @@ "ap-northeast-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -285,8 +291,10 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-northeast-2" : { }, + "ap-south-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, @@ -320,11 +328,14 @@ "ap-northeast-1" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -1604,6 +1615,16 @@ "cn-north-1" : { } } }, + "ecr" : { + "endpoints" : { + "cn-north-1" : { } + } + }, + "ecs" : { + "endpoints" : { + "cn-north-1" : { } + } + }, "elasticache" : { "endpoints" : { "cn-north-1" : { } @@ -1709,6 +1730,11 @@ "cn-north-1" : { } } }, + "ssm" : { + "endpoints" : { + "cn-north-1" : { } + } + }, "storagegateway" : { "endpoints" : { "cn-north-1" : { } @@ -1929,6 +1955,11 @@ } } }, + "ssm" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "streams.dynamodb" : { "defaults" : { "credentialScope" : { diff --git a/botocore/data/kinesis/2013-12-02/service-2.json b/botocore/data/kinesis/2013-12-02/service-2.json index ec0d26ed..c73d33fc 100644 --- a/botocore/data/kinesis/2013-12-02/service-2.json +++ b/botocore/data/kinesis/2013-12-02/service-2.json @@ -139,7 +139,13 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArgumentException"}, {"shape":"ProvisionedThroughputExceededException"}, - {"shape":"ExpiredIteratorException"} + {"shape":"ExpiredIteratorException"}, + {"shape":"KMSDisabledException"}, + {"shape":"KMSInvalidStateException"}, + {"shape":"KMSAccessDeniedException"}, + {"shape":"KMSNotFoundException"}, + {"shape":"KMSOptInRequired"}, + {"shape":"KMSThrottlingException"} ], "documentation":"

Gets data records from an Amazon Kinesis stream's shard.

Specify a shard iterator using the ShardIterator parameter. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially. If there are no records available in the portion of the shard that the iterator points to, GetRecords returns an empty list. Note that it might take multiple calls to get to a portion of the shard that contains records.

You can scale by provisioning multiple shards per stream while considering service limits (for more information, see Streams Limits in the Amazon Kinesis Streams Developer Guide). Your application should have one thread per shard, each reading continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in NextShardIterator. Specify the shard iterator returned in NextShardIterator in subsequent calls to GetRecords. Note that if the shard has been closed, the shard iterator can't return more data and GetRecords returns null in NextShardIterator. You can terminate the loop when the shard is closed, or when the shard iterator reaches the record with the sequence number or other attribute that marks it as the last record to process.

Each data record can be up to 1 MB in size, and each shard can read up to 2 MB per second. You can ensure that your calls don't exceed the maximum supported size or throughput by using the Limit parameter to specify the maximum number of records that GetRecords can return. Consider your average record size when determining this limit.

The size of the data returned by GetRecords varies depending on the utilization of the shard. The maximum size of data that GetRecords can return is 10 MB. If a call returns this amount of data, subsequent calls made within the next 5 seconds throw ProvisionedThroughputExceededException. If there is insufficient provisioned throughput on the shard, subsequent calls made within the next 1 second throw ProvisionedThroughputExceededException. Note that GetRecords won't return any data when it throws an exception. For this reason, we recommend that you wait one second between calls to GetRecords; however, it's possible that the application will get exceptions for longer than 1 second.

To detect whether the application is falling behind in processing, you can use the MillisBehindLatest response attribute. You can also monitor the stream using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon Kinesis Streams Developer Guide).

Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, that is set when a stream successfully receives and stores a record. This is commonly referred to as a server-side timestamp, whereas a client-side timestamp is set when a data producer creates or sends the record to a stream (a data producer is any data source putting data records into a stream, for example with PutRecords). The timestamp has millisecond precision. There are no guarantees about the timestamp accuracy, or that the timestamp is always increasing. For example, records in a shard or across a stream might have timestamps that are out of order.

" }, @@ -226,9 +232,15 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArgumentException"}, - {"shape":"ProvisionedThroughputExceededException"} + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"KMSDisabledException"}, + {"shape":"KMSInvalidStateException"}, + {"shape":"KMSAccessDeniedException"}, + {"shape":"KMSNotFoundException"}, + {"shape":"KMSOptInRequired"}, + {"shape":"KMSThrottlingException"} ], - "documentation":"

Writes a single data record into an Amazon Kinesis stream. Call PutRecord to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself.

The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

The partition key is used by Amazon Kinesis to distribute data across shards. Amazon Kinesis segregates the data records that belong to a stream into multiple shards, using the partition key associated with each data record to determine which shard a given data record belongs to.

Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the ExplicitHashKey parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

PutRecord returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record.

Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the SequenceNumberForOrdering parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

If a PutRecord request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException.

Data records are accessible for only 24 hours from the time that they are added to a stream.

" + "documentation":"

Writes a single data record into an Amazon Kinesis stream. Call PutRecord to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself.

The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

The partition key is used by Amazon Kinesis to distribute data across shards. Amazon Kinesis segregates the data records that belong to a stream into multiple shards, using the partition key associated with each data record to determine which shard a given data record belongs to.

Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the ExplicitHashKey parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

PutRecord returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record.

Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the SequenceNumberForOrdering parameter. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

If a PutRecord request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, PutRecord throws ProvisionedThroughputExceededException.

By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

" }, "PutRecords":{ "name":"PutRecords", @@ -241,9 +253,15 @@ "errors":[ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidArgumentException"}, - {"shape":"ProvisionedThroughputExceededException"} + {"shape":"ProvisionedThroughputExceededException"}, + {"shape":"KMSDisabledException"}, + {"shape":"KMSInvalidStateException"}, + {"shape":"KMSAccessDeniedException"}, + {"shape":"KMSNotFoundException"}, + {"shape":"KMSOptInRequired"}, + {"shape":"KMSThrottlingException"} ], - "documentation":"

Writes multiple data records into an Amazon Kinesis stream in a single call (also referred to as a PutRecords request). Use this operation to send data into the stream for data ingestion and processing.

Each PutRecords request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

You must specify the name of the stream that captures, stores, and transports the data; and an array of request Records, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob.

The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

The partition key is used by Amazon Kinesis as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

Each record in the Records array may include an optional parameter, ExplicitHashKey, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide.

The PutRecords response includes an array of response Records. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response Records array always includes the same number of records as the request array.

The response Records array includes both successfully and unsuccessfully processed records. Amazon Kinesis attempts to process all records in each PutRecords request. A single record failure does not stop the processing of subsequent records.

A successfully-processed record includes ShardId and SequenceNumber values. The ShardId parameter identifies the shard in the stream where the record is stored. The SequenceNumber parameter is an identifier assigned to the put record, unique to all records in the stream.

An unsuccessfully-processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides more detailed information about the ProvisionedThroughputExceededException exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide.

By default, data records are accessible for only 24 hours from the time that they are added to an Amazon Kinesis stream. This retention period can be modified using the DecreaseStreamRetentionPeriod and IncreaseStreamRetentionPeriod operations.

" + "documentation":"

Writes multiple data records into an Amazon Kinesis stream in a single call (also referred to as a PutRecords request). Use this operation to send data into the stream for data ingestion and processing.

Each PutRecords request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.

You must specify the name of the stream that captures, stores, and transports the data; and an array of request Records, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob.

The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.

The partition key is used by Amazon Kinesis as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Streams Developer Guide.

Each record in the Records array may include an optional parameter, ExplicitHashKey, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide.

The PutRecords response includes an array of response Records. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response Records array always includes the same number of records as the request array.

The response Records array includes both successfully and unsuccessfully processed records. Amazon Kinesis attempts to process all records in each PutRecords request. A single record failure does not stop the processing of subsequent records.

A successfully-processed record includes ShardId and SequenceNumber values. The ShardId parameter identifies the shard in the stream where the record is stored. The SequenceNumber parameter is an identifier assigned to the put record, unique to all records in the stream.

An unsuccessfully-processed record includes ErrorCode and ErrorMessage values. ErrorCode reflects the type of error and can be one of the following values: ProvisionedThroughputExceededException or InternalFailure. ErrorMessage provides more detailed information about the ProvisionedThroughputExceededException exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis Streams Developer Guide.

By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

" }, "RemoveTagsFromStream":{ "name":"RemoveTagsFromStream", @@ -275,6 +293,42 @@ ], "documentation":"

Splits a shard into two new shards in the Amazon Kinesis stream to increase the stream's capacity to ingest and transport data. SplitShard is called when there is a need to increase the overall capacity of a stream because of an expected increase in the volume of data records being ingested.

You can also use SplitShard when a shard appears to be approaching its maximum utilization; for example, the producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call SplitShard to increase stream capacity, so that more Amazon Kinesis applications can simultaneously read data from the stream for real-time processing.

You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might simply be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information about splitting shards, see Split a Shard in the Amazon Kinesis Streams Developer Guide.

You can use DescribeStream to determine the shard ID and hash key values for the ShardToSplit and NewStartingHashKey parameters that are specified in the SplitShard request.

SplitShard is an asynchronous operation. Upon receiving a SplitShard request, Amazon Kinesis immediately returns a response and sets the stream status to UPDATING. After the operation is completed, Amazon Kinesis sets the stream status to ACTIVE. Read and write operations continue to work while the stream is in the UPDATING state.

You can use DescribeStream to check the status of the stream, which is returned in StreamStatus. If the stream is in the ACTIVE state, you can call SplitShard. If a stream is in CREATING or UPDATING or DELETING states, DescribeStream returns a ResourceInUseException.

If the specified stream does not exist, DescribeStream returns a ResourceNotFoundException. If you try to create more shards than are authorized for your account, you receive a LimitExceededException.

For the default shard limit for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase this limit, contact AWS Support.

If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a LimitExceededException.

SplitShard has limit of 5 transactions per second per account.

" }, + "StartStreamEncryption":{ + "name":"StartStreamEncryption", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartStreamEncryptionInput"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"}, + {"shape":"KMSDisabledException"}, + {"shape":"KMSInvalidStateException"}, + {"shape":"KMSAccessDeniedException"}, + {"shape":"KMSNotFoundException"}, + {"shape":"KMSOptInRequired"}, + {"shape":"KMSThrottlingException"} + ], + "documentation":"

Enables or updates server-side encryption using an AWS KMS key for a specified stream.

Starting encryption is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to UPDATING. After the update is complete, Amazon Kinesis sets the status of the stream back to ACTIVE. Updating or applying encryption normally takes a few seconds to complete but it can take minutes. You can continue to read and write data to your stream while its status is UPDATING. Once the status of the stream is ACTIVE, records written to the stream will begin to be encrypted.

API Limits: You can successfully apply a new AWS KMS key for server-side encryption 25 times in a rolling 24 hour period.

Note: It can take up to 5 seconds after the stream is in an ACTIVE status before all records written to the stream are encrypted. After you’ve enabled encryption, you can verify encryption was applied by inspecting the API response from PutRecord or PutRecords.

" + }, + "StopStreamEncryption":{ + "name":"StopStreamEncryption", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StopStreamEncryptionInput"}, + "errors":[ + {"shape":"InvalidArgumentException"}, + {"shape":"LimitExceededException"}, + {"shape":"ResourceInUseException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"

Disables server-side encryption for a specified stream.

Stopping encryption is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to UPDATING. After the update is complete, Amazon Kinesis sets the status of the stream back to ACTIVE. Stopping encryption normally takes a few seconds to complete but it can take minutes. You can continue to read and write data to your stream while its status is UPDATING. Once the status of the stream is ACTIVE records written to the stream will no longer be encrypted by the Amazon Kinesis Streams service.

API Limits: You can successfully disable server-side encryption 25 times in a rolling 24 hour period.

Note: It can take up to 5 seconds after the stream is in an ACTIVE status before all records written to the stream are no longer subject to encryption. After you’ve disabled encryption, you can verify encryption was not applied by inspecting the API response from PutRecord or PutRecords.

" + }, "UpdateShardCount":{ "name":"UpdateShardCount", "http":{ @@ -289,7 +343,7 @@ {"shape":"ResourceInUseException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

Updates the shard count of the specified stream to the specified number of shards.

Updating the shard count is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to UPDATING. After the update is complete, Amazon Kinesis sets the status of the stream back to ACTIVE. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is UPDATING.

To update the shard count, Amazon Kinesis performs splits and merges and individual shards. This can cause short-lived shards to be created, in addition to the final shards. We recommend that you double or halve the shard count, as this results in the fewest number of splits or merges.

This operation has a rate limit of twice per rolling 24 hour period. You cannot scale above double your current shard count, scale below half your current shard count, or exceed the shard limits for your account.

For the default limits for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase a limit, contact AWS Support.

" + "documentation":"

Updates the shard count of the specified stream to the specified number of shards.

Updating the shard count is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to UPDATING. After the update is complete, Amazon Kinesis sets the status of the stream back to ACTIVE. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is UPDATING.

To update the shard count, Amazon Kinesis performs splits or merges on individual shards. This can cause short-lived shards to be created, in addition to the final shards. We recommend that you double or halve the shard count, as this results in the fewest number of splits or merges.

This operation has the following limits, which are per region per account unless otherwise noted:

For the default limits for an AWS account, see Streams Limits in the Amazon Kinesis Streams Developer Guide. If you need to increase a limit, contact AWS Support.

" } }, "shapes":{ @@ -457,6 +511,13 @@ }, "documentation":"

Represents the input for EnableEnhancedMonitoring.

" }, + "EncryptionType":{ + "type":"string", + "enum":[ + "NONE", + "KMS" + ] + }, "EnhancedMetrics":{ "type":"structure", "members":{ @@ -633,6 +694,77 @@ "documentation":"

A specified parameter exceeds its restrictions, is not supported, or can't be used. For more information, see the returned message.

", "exception":true }, + "KMSAccessDeniedException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"ErrorMessage", + "documentation":"

A message that provides information about the error.

" + } + }, + "documentation":"

The ciphertext references a key that doesn't exist or that you don't have access to.

", + "exception":true + }, + "KMSDisabledException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"ErrorMessage", + "documentation":"

A message that provides information about the error.

" + } + }, + "documentation":"

The request was rejected because the specified CMK isn't enabled.

", + "exception":true + }, + "KMSInvalidStateException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"ErrorMessage", + "documentation":"

A message that provides information about the error.

" + } + }, + "documentation":"

The request was rejected because the state of the specified resource isn't valid for this request. For more information, see How Key State Affects Use of a Customer Master Key in the AWS Key Management Service Developer Guide.

", + "exception":true + }, + "KMSNotFoundException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"ErrorMessage", + "documentation":"

A message that provides information about the error.

" + } + }, + "documentation":"

The request was rejected because the specified entity or resource couldn't be found.

", + "exception":true + }, + "KMSOptInRequired":{ + "type":"structure", + "members":{ + "message":{ + "shape":"ErrorMessage", + "documentation":"

A message that provides information about the error.

" + } + }, + "documentation":"

The AWS access key ID needs a subscription for the service.

", + "exception":true + }, + "KMSThrottlingException":{ + "type":"structure", + "members":{ + "message":{ + "shape":"ErrorMessage", + "documentation":"

A message that provides information about the error.

" + } + }, + "documentation":"

The request was denied due to request throttling. For more information about throttling, see Limits in the AWS Key Management Service Developer Guide.

", + "exception":true + }, + "KeyId":{ + "type":"string", + "max":2048, + "min":1 + }, "LimitExceededException":{ "type":"structure", "members":{ @@ -835,6 +967,10 @@ "SequenceNumber":{ "shape":"SequenceNumber", "documentation":"

The sequence number identifier that was assigned to the put data record. The sequence number for the record is unique across all records in the stream. A sequence number is the identifier associated with every record put into the stream.

" + }, + "EncryptionType":{ + "shape":"EncryptionType", + "documentation":"

The encryption type to use on the record. This parameter can be one of the following values:

" } }, "documentation":"

Represents the output for PutRecord.

" @@ -868,6 +1004,10 @@ "Records":{ "shape":"PutRecordsResultEntryList", "documentation":"

An array of successfully and unsuccessfully processed record results, correlated with the request by natural ordering. A record that is successfully added to a stream includes SequenceNumber and ShardId in the result. A record that fails to be added to a stream includes ErrorCode and ErrorMessage in the result.

" + }, + "EncryptionType":{ + "shape":"EncryptionType", + "documentation":"

The encryption type used on the records. This parameter can be one of the following values:

" } }, "documentation":"

PutRecords results.

" @@ -938,7 +1078,7 @@ "members":{ "SequenceNumber":{ "shape":"SequenceNumber", - "documentation":"

The unique identifier of the record in the stream.

" + "documentation":"

The unique identifier of the record within its shard.

" }, "ApproximateArrivalTimestamp":{ "shape":"Timestamp", @@ -951,6 +1091,10 @@ "PartitionKey":{ "shape":"PartitionKey", "documentation":"

Identifies which shard in the stream the data record is assigned to.

" + }, + "EncryptionType":{ + "shape":"EncryptionType", + "documentation":"

The encryption type used on the record. This parameter can be one of the following values:

" } }, "documentation":"

The unit of data of the Amazon Kinesis stream, which is composed of a sequence number, a partition key, and a data blob.

" @@ -1106,6 +1250,50 @@ }, "documentation":"

Represents the input for SplitShard.

" }, + "StartStreamEncryptionInput":{ + "type":"structure", + "required":[ + "StreamName", + "EncryptionType", + "KeyId" + ], + "members":{ + "StreamName":{ + "shape":"StreamName", + "documentation":"

The name of the stream for which to start encrypting records.

" + }, + "EncryptionType":{ + "shape":"EncryptionType", + "documentation":"

The encryption type to use. This parameter can be one of the following values:

" + }, + "KeyId":{ + "shape":"KeyId", + "documentation":"

The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias aws/kinesis.

" + } + } + }, + "StopStreamEncryptionInput":{ + "type":"structure", + "required":[ + "StreamName", + "EncryptionType", + "KeyId" + ], + "members":{ + "StreamName":{ + "shape":"StreamName", + "documentation":"

The name of the stream on which to stop encrypting records.

" + }, + "EncryptionType":{ + "shape":"EncryptionType", + "documentation":"

The encryption type. This parameter can be one of the following values:

" + }, + "KeyId":{ + "shape":"KeyId", + "documentation":"

The GUID for the customer-managed key that was used for encryption.

" + } + } + }, "StreamARN":{"type":"string"}, "StreamDescription":{ "type":"structure", @@ -1151,6 +1339,14 @@ "EnhancedMonitoring":{ "shape":"EnhancedMonitoringList", "documentation":"

Represents the current enhanced monitoring settings of the stream.

" + }, + "EncryptionType":{ + "shape":"EncryptionType", + "documentation":"

The server-side encryption type used on the stream. This parameter can be one of the following values:

" + }, + "KeyId":{ + "shape":"KeyId", + "documentation":"

The GUID for the customer-managed KMS key used for encryption on the stream.

" } }, "documentation":"

Represents the output for DescribeStream.

" diff --git a/botocore/data/kms/2014-11-01/service-2.json b/botocore/data/kms/2014-11-01/service-2.json index f6d99057..f8bb423d 100644 --- a/botocore/data/kms/2014-11-01/service-2.json +++ b/botocore/data/kms/2014-11-01/service-2.json @@ -1446,6 +1446,13 @@ }, "documentation":"

Contains information about each entry in the key list.

" }, + "KeyManagerType":{ + "type":"string", + "enum":[ + "AWS", + "CUSTOMER" + ] + }, "KeyMetadata":{ "type":"structure", "required":["KeyId"], @@ -1497,6 +1504,10 @@ "ExpirationModel":{ "shape":"ExpirationModelType", "documentation":"

Specifies whether the CMK's key material expires. This value is present only when Origin is EXTERNAL, otherwise this value is omitted.

" + }, + "KeyManager":{ + "shape":"KeyManagerType", + "documentation":"

The CMK's manager. CMKs are either customer-managed or AWS-managed. For more information about the difference, see Customer Master Keys in the AWS Key Management Service Developer Guide.

" } }, "documentation":"

Contains metadata about a customer master key (CMK).

This data type is used as a response element for the CreateKey and DescribeKey operations.

" diff --git a/botocore/data/route53/2013-04-01/service-2.json b/botocore/data/route53/2013-04-01/service-2.json index e46b712b..e277beaf 100644 --- a/botocore/data/route53/2013-04-01/service-2.json +++ b/botocore/data/route53/2013-04-01/service-2.json @@ -2551,7 +2551,8 @@ "messages":{ "shape":"ErrorMessages", "documentation":"

Descriptive message for the error response.

" - } + }, + "message":{"shape":"ErrorMessage"} }, "documentation":"

This exception contains a list of messages that might contain one or more error messages. Each error message indicates one error in the change batch.

", "exception":true diff --git a/botocore/data/ssm/2014-11-06/service-2.json b/botocore/data/ssm/2014-11-06/service-2.json index a3a8e511..5055e338 100644 --- a/botocore/data/ssm/2014-11-06/service-2.json +++ b/botocore/data/ssm/2014-11-06/service-2.json @@ -165,7 +165,7 @@ {"shape":"ResourceDataSyncAlreadyExistsException"}, {"shape":"ResourceDataSyncInvalidConfigurationException"} ], - "documentation":"

Creates a resource data sync configuration to a single bucket in Amazon S3. This is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data to the Amazon S3 bucket. To check the status of the sync, use the ListResourceDataSync operation.

By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. To view an example of a restrictive Amazon S3 bucket policy for Resource Data Sync, see Creating a Resource Data Sync.

" + "documentation":"

Creates a resource data sync configuration to a single bucket in Amazon S3. This is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data to the Amazon S3 bucket. To check the status of the sync, use the ListResourceDataSync operation.

By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. To view an example of a restrictive Amazon S3 bucket policy for Resource Data Sync, see Configuring Resource Data Sync for Inventory.

" }, "DeleteActivation":{ "name":"DeleteActivation", @@ -453,9 +453,10 @@ "errors":[ {"shape":"InvalidResourceId"}, {"shape":"DoesNotExistException"}, + {"shape":"UnsupportedOperatingSystem"}, {"shape":"InternalServerError"} ], - "documentation":"

Retrieves the current effective patches (the patch and the approval state) for the specified patch baseline.

" + "documentation":"

Retrieves the current effective patches (the patch and the approval state) for the specified patch baseline. Note that this API applies only to Windows patch baselines.

" }, "DescribeInstanceAssociationsStatus":{ "name":"DescribeInstanceAssociationsStatus", @@ -715,7 +716,7 @@ "errors":[ {"shape":"InternalServerError"} ], - "documentation":"

Retrieves the default patch baseline.

" + "documentation":"

Retrieves the default patch baseline. Note that Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.

" }, "GetDeployablePatchSnapshotForInstance":{ "name":"GetDeployablePatchSnapshotForInstance", @@ -726,9 +727,10 @@ "input":{"shape":"GetDeployablePatchSnapshotForInstanceRequest"}, "output":{"shape":"GetDeployablePatchSnapshotForInstanceResult"}, "errors":[ - {"shape":"InternalServerError"} + {"shape":"InternalServerError"}, + {"shape":"UnsupportedOperatingSystem"} ], - "documentation":"

Retrieves the current snapshot for the patch baseline the instance uses. This API is primarily used by the AWS-ApplyPatchBaseline Systems Manager document.

" + "documentation":"

Retrieves the current snapshot for the patch baseline the instance uses. This API is primarily used by the AWS-RunPatchBaseline Systems Manager document.

" }, "GetDocument":{ "name":"GetDocument", @@ -2519,6 +2521,10 @@ "type":"structure", "required":["Name"], "members":{ + "OperatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

Defines the operating system the patch baseline applies to. Supported operating systems include WINDOWS, AMAZON_LINUX, UBUNTU and REDHAT_ENTERPRISE_LINUX. The Default value is WINDOWS.

" + }, "Name":{ "shape":"BaselineName", "documentation":"

The name of the patch baseline.

" @@ -2535,6 +2541,10 @@ "shape":"PatchIdList", "documentation":"

A list of explicitly approved patches for the baseline.

" }, + "ApprovedPatchesComplianceLevel":{ + "shape":"PatchComplianceLevel", + "documentation":"

Defines the compliance level for approved patches. This means that if an approved patch is reported as missing, this is the severity of the compliance violation. Valid compliance severity levels include the following: CRITICAL, HIGH, MEDIUM, LOW, INFORMATIONAL, UNSPECIFIED. The default value is UNSPECIFIED.

" + }, "RejectedPatches":{ "shape":"PatchIdList", "documentation":"

A list of explicitly rejected patches for the baseline.

" @@ -3606,6 +3616,10 @@ "documentation":"

The maximum number of patch groups to return (per page).

", "box":true }, + "Filters":{ + "shape":"PatchOrchestratorFilterList", + "documentation":"

One or more filters. Use a filter to return a more specific list of results.

" + }, "NextToken":{ "shape":"NextToken", "documentation":"

The token for the next set of items to return. (You received this token from a previous call.)

" @@ -4145,6 +4159,10 @@ "GetDefaultPatchBaselineRequest":{ "type":"structure", "members":{ + "OperatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

Returns the default patch baseline for the specified operating system.

" + } } }, "GetDefaultPatchBaselineResult":{ @@ -4153,6 +4171,10 @@ "BaselineId":{ "shape":"BaselineId", "documentation":"

The ID of the default patch baseline.

" + }, + "OperatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

The operating system for the returned patch baseline.

" } } }, @@ -4187,6 +4209,10 @@ "SnapshotDownloadUrl":{ "shape":"SnapshotDownloadUrl", "documentation":"

A pre-signed Amazon S3 URL that can be used to download the patch snapshot.

" + }, + "Product":{ + "shape":"Product", + "documentation":"

Returns the specific operating system (for example Windows Server 2012 or Amazon Linux 2015.09) on the instance for the specified patch snapshot.

" } } }, @@ -4607,6 +4633,10 @@ "PatchGroup":{ "shape":"PatchGroup", "documentation":"

The name of the patch group whose patch baseline should be retrieved.

" + }, + "OperatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

Returns he operating system rule specified for patch groups using the patch baseline.

" } } }, @@ -4620,6 +4650,10 @@ "PatchGroup":{ "shape":"PatchGroup", "documentation":"

The name of the patch group.

" + }, + "OperatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

The operating system rule specified for patch groups using the patch baseline.

" } } }, @@ -4644,6 +4678,10 @@ "shape":"BaselineName", "documentation":"

The name of the patch baseline.

" }, + "OperatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

Returns the operating system specified for the patch baseline.

" + }, "GlobalFilters":{ "shape":"PatchFilterGroup", "documentation":"

A set of global filters used to exclude patches from the baseline.

" @@ -4656,6 +4694,10 @@ "shape":"PatchIdList", "documentation":"

A list of explicitly approved patches for the baseline.

" }, + "ApprovedPatchesComplianceLevel":{ + "shape":"PatchComplianceLevel", + "documentation":"

Returns the specified compliance severity level for approved patches in the patch baseline.

" + }, "RejectedPatches":{ "shape":"PatchIdList", "documentation":"

A list of explicitly rejected patches for the baseline.

" @@ -5070,11 +5112,11 @@ "documentation":"

The number of patches from the patch baseline that aren't applicable for the instance and hence aren't installed on the instance.

" }, "OperationStartTime":{ - "shape":"PatchOperationStartTime", + "shape":"DateTime", "documentation":"

The time the most recent patching operation was started on the instance.

" }, "OperationEndTime":{ - "shape":"PatchOperationEndTime", + "shape":"DateTime", "documentation":"

The time the most recent patching operation completed on the instance.

" }, "Operation":{ @@ -6570,6 +6612,15 @@ "Invocation" ] }, + "OperatingSystem":{ + "type":"string", + "enum":[ + "WINDOWS", + "AMAZON_LINUX", + "UBUNTU", + "REDHAT_ENTERPRISE_LINUX" + ] + }, "OwnerInformation":{ "type":"string", "max":128, @@ -6914,13 +6965,17 @@ "shape":"BaselineName", "documentation":"

The name of the patch baseline.

" }, + "OperatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

Defines the operating system the patch baseline applies to. Supported operating systems include WINDOWS, AMAZON_LINUX, UBUNTU and REDHAT_ENTERPRISE_LINUX. The Default value is WINDOWS.

" + }, "BaselineDescription":{ "shape":"BaselineDescription", "documentation":"

The description of the patch baseline.

" }, "DefaultBaseline":{ "shape":"DefaultBaseline", - "documentation":"

Whether this is the default baseline.

" + "documentation":"

Whether this is the default baseline. Note that Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.

" } }, "documentation":"

Defines the basic information about a patch baseline.

" @@ -6952,7 +7007,7 @@ }, "KBId":{ "shape":"PatchKbNumber", - "documentation":"

The Microsoft Knowledge Base ID of the patch.

" + "documentation":"

The operating system-specific ID of the patch.

" }, "Classification":{ "shape":"PatchClassification", @@ -6967,8 +7022,8 @@ "documentation":"

The state of the patch on the instance (INSTALLED, INSTALLED_OTHER, MISSING, NOT_APPLICABLE or FAILED).

" }, "InstalledTime":{ - "shape":"PatchInstalledTime", - "documentation":"

The date/time the patch was installed on the instance.

" + "shape":"DateTime", + "documentation":"

The date/time the patch was installed on the instance. Note that not all operating systems provide this level of information.

" } }, "documentation":"

Information about the state of a patch on a particular instance as it relates to the patch baseline used to patch the instance.

" @@ -6987,6 +7042,17 @@ "FAILED" ] }, + "PatchComplianceLevel":{ + "type":"string", + "enum":[ + "CRITICAL", + "HIGH", + "MEDIUM", + "LOW", + "INFORMATIONAL", + "UNSPECIFIED" + ] + }, "PatchComplianceMaxResults":{ "type":"integer", "max":100, @@ -7039,7 +7105,10 @@ "PRODUCT", "CLASSIFICATION", "MSRC_SEVERITY", - "PATCH_ID" + "PATCH_ID", + "SECTION", + "PRIORITY", + "SEVERITY" ] }, "PatchFilterList":{ @@ -7089,7 +7158,8 @@ }, "PatchId":{ "type":"string", - "pattern":"(^KB[0-9]{1,7}$)|(^MS[0-9]{2}\\-[0-9]{3}$)" + "max":100, + "min":1 }, "PatchIdList":{ "type":"list", @@ -7099,7 +7169,6 @@ }, "PatchInstalledCount":{"type":"integer"}, "PatchInstalledOtherCount":{"type":"integer"}, - "PatchInstalledTime":{"type":"timestamp"}, "PatchKbNumber":{"type":"string"}, "PatchLanguage":{"type":"string"}, "PatchList":{ @@ -7110,8 +7179,6 @@ "PatchMsrcNumber":{"type":"string"}, "PatchMsrcSeverity":{"type":"string"}, "PatchNotApplicableCount":{"type":"integer"}, - "PatchOperationEndTime":{"type":"timestamp"}, - "PatchOperationStartTime":{"type":"timestamp"}, "PatchOperationType":{ "type":"string", "enum":[ @@ -7166,6 +7233,10 @@ "shape":"PatchFilterGroup", "documentation":"

The patch filter group that defines the criteria for the rule.

" }, + "ComplianceLevel":{ + "shape":"PatchComplianceLevel", + "documentation":"

A compliance severity level for all approved patches in a patch baseline. Valid compliance severity levels include the following: Unspecified, Critical, High, Medium, Low, and Informational.

" + }, "ApproveAfterDays":{ "shape":"ApproveAfterDays", "documentation":"

The number of days after the release date of each patch matched by the rule the patch is marked as approved in the patch baseline.

", @@ -7199,6 +7270,10 @@ "shape":"PatchDeploymentStatus", "documentation":"

The approval status of a patch (APPROVED, PENDING_APPROVAL, EXPLICIT_APPROVED, EXPLICIT_REJECTED).

" }, + "ComplianceLevel":{ + "shape":"PatchComplianceLevel", + "documentation":"

The compliance severity level for a patch.

" + }, "ApprovalDate":{ "shape":"DateTime", "documentation":"

The date the patch was approved (or will be approved if the status is PENDING_APPROVAL).

" @@ -7230,6 +7305,7 @@ "locationName":"PlatformType" } }, + "Product":{"type":"string"}, "PutInventoryRequest":{ "type":"structure", "required":[ @@ -8036,6 +8112,14 @@ "documentation":"

Inventory item type schema version has to match supported versions in the service. Check output of GetInventorySchema to see the available schema version for each type.

", "exception":true }, + "UnsupportedOperatingSystem":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The operating systems you specified is not supported, or the operation is not supported for the operating system. Valid operating systems include: Windows, AmazonLinux, RedhatEnterpriseLinux, and Ubuntu.

", + "exception":true + }, "UnsupportedParameterType":{ "type":"structure", "members":{ @@ -8299,6 +8383,10 @@ "shape":"PatchIdList", "documentation":"

A list of explicitly approved patches for the baseline.

" }, + "ApprovedPatchesComplianceLevel":{ + "shape":"PatchComplianceLevel", + "documentation":"

Assigns a new compliance severity level to an existing patch baseline.

" + }, "RejectedPatches":{ "shape":"PatchIdList", "documentation":"

A list of explicitly rejected patches for the baseline.

" @@ -8320,6 +8408,10 @@ "shape":"BaselineName", "documentation":"

The name of the patch baseline.

" }, + "OperatingSystem":{ + "shape":"OperatingSystem", + "documentation":"

The operating system rule used by the updated patch baseline.

" + }, "GlobalFilters":{ "shape":"PatchFilterGroup", "documentation":"

A set of global filters used to exclude patches from the baseline.

" @@ -8332,6 +8424,10 @@ "shape":"PatchIdList", "documentation":"

A list of explicitly approved patches for the baseline.

" }, + "ApprovedPatchesComplianceLevel":{ + "shape":"PatchComplianceLevel", + "documentation":"

The compliance severity level assigned to the patch baseline after the update completed.

" + }, "RejectedPatches":{ "shape":"PatchIdList", "documentation":"

A list of explicitly rejected patches for the baseline.

" diff --git a/botocore/docs/sharedexample.py b/botocore/docs/sharedexample.py index 117028d9..cb08d1f4 100644 --- a/botocore/docs/sharedexample.py +++ b/botocore/docs/sharedexample.py @@ -178,7 +178,7 @@ class SharedExampleDocumenter(object): section.write("datetime(%s)," % datetime_str) def _get_comment(self, path, comments): - key = re.sub('^\.', '', ''.join(path)) + key = re.sub(r'^\.', '', ''.join(path)) if comments and key in comments: return '# ' + comments[key] else: diff --git a/botocore/endpoint.py b/botocore/endpoint.py index 228d43d4..64ccfbd2 100644 --- a/botocore/endpoint.py +++ b/botocore/endpoint.py @@ -266,15 +266,18 @@ class EndpointCreator(object): def create_endpoint(self, service_model, region_name, endpoint_url, verify=None, response_parser_factory=None, timeout=DEFAULT_TIMEOUT, - max_pool_connections=MAX_POOL_CONNECTIONS): + max_pool_connections=MAX_POOL_CONNECTIONS, + proxies=None): if not is_valid_endpoint_url(endpoint_url): raise ValueError("Invalid endpoint: %s" % endpoint_url) + if proxies is None: + proxies = self._get_proxies(endpoint_url) return Endpoint( endpoint_url, endpoint_prefix=service_model.endpoint_prefix, event_emitter=self._event_emitter, - proxies=self._get_proxies(endpoint_url), + proxies=proxies, verify=self._get_verify_value(verify), timeout=timeout, max_pool_connections=max_pool_connections, diff --git a/botocore/handlers.py b/botocore/handlers.py index e914543e..bced6df2 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -54,7 +54,7 @@ REGISTER_LAST = object() # to be as long as 255 characters, and bucket names can contain any # combination of uppercase letters, lowercase letters, numbers, periods # (.), hyphens (-), and underscores (_). -VALID_BUCKET = re.compile('^[a-zA-Z0-9.\-_]{1,255}$') +VALID_BUCKET = re.compile(r'^[a-zA-Z0-9.\-_]{1,255}$') VERSION_ID_SUFFIX = re.compile(r'\?versionId=[^\s]+$') @@ -671,7 +671,7 @@ def check_openssl_supports_tls_version_1_2(**kwargs): import ssl try: openssl_version_tuple = ssl.OPENSSL_VERSION_INFO - if openssl_version_tuple[0] < 1 or openssl_version_tuple[2] < 1: + if openssl_version_tuple < (1, 0, 1): warnings.warn( 'Currently installed openssl version: %s does not ' 'support TLS 1.2, which is required for use of iot-data. ' diff --git a/botocore/utils.py b/botocore/utils.py index ab2a86fa..1d9185c3 100644 --- a/botocore/utils.py +++ b/botocore/utils.py @@ -39,7 +39,7 @@ METADATA_SECURITY_CREDENTIALS_URL = ( # These are chars that do not need to be urlencoded. # Based on rfc2986, section 2.3 SAFE_CHARS = '-._~' -LABEL_RE = re.compile('[a-z0-9][a-z0-9\-]*[a-z0-9]') +LABEL_RE = re.compile(r'[a-z0-9][a-z0-9\-]*[a-z0-9]') RESTRICTED_REGIONS = [ 'us-gov-west-1', 'fips-us-gov-west-1', @@ -79,7 +79,7 @@ def get_service_module_name(service_model): 'serviceFullName', service_model.service_name)) name = name.replace('Amazon', '') name = name.replace('AWS', '') - name = re.sub('\W+', '', name) + name = re.sub(r'\W+', '', name) return name @@ -637,7 +637,7 @@ def is_valid_endpoint_url(endpoint_url): if hostname[-1] == ".": hostname = hostname[:-1] allowed = re.compile( - "^((?!-)[A-Z\d-]{1,63}(?