New upstream version 1.5.80

This commit is contained in:
TANIGUCHI Takaki 2017-07-10 16:39:11 +09:00
parent 9d41c57c3f
commit d01fc80659
33 changed files with 1027 additions and 124 deletions

View file

@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: botocore
Version: 1.5.78
Version: 1.5.80
Summary: Low-level, data-driven core of boto 3.
Home-page: https://github.com/boto/botocore
Author: Amazon Web Services

View file

@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: botocore
Version: 1.5.78
Version: 1.5.80
Summary: Low-level, data-driven core of boto 3.
Home-page: https://github.com/boto/botocore
Author: Amazon Web Services

View file

@ -172,6 +172,7 @@ botocore/data/dms/2016-01-01/examples-1.json
botocore/data/dms/2016-01-01/paginators-1.json
botocore/data/dms/2016-01-01/service-2.json
botocore/data/ds/2015-04-16/examples-1.json
botocore/data/ds/2015-04-16/paginators-1.json
botocore/data/ds/2015-04-16/service-2.json
botocore/data/dynamodb/2012-08-10/examples-1.json
botocore/data/dynamodb/2012-08-10/paginators-1.json
@ -604,6 +605,7 @@ tests/functional/test_regions.py
tests/functional/test_s3.py
tests/functional/test_session.py
tests/functional/test_six_imports.py
tests/functional/test_six_threading.py
tests/functional/test_stub.py
tests/functional/test_waiter_config.py
tests/functional/docs/__init__.py

View file

@ -16,7 +16,7 @@ import os
import re
import logging
__version__ = '1.5.78'
__version__ = '1.5.80'
class NullHandler(logging.Handler):

View file

@ -75,6 +75,7 @@ class ClientArgsCreator(object):
endpoint_url=endpoint_config['endpoint_url'], verify=verify,
response_parser_factory=self._response_parser_factory,
max_pool_connections=new_config.max_pool_connections,
proxies=new_config.proxies,
timeout=(new_config.connect_timeout, new_config.read_timeout))
serializer = botocore.serialize.create_serializer(
@ -129,6 +130,7 @@ class ClientArgsCreator(object):
connect_timeout=client_config.connect_timeout,
read_timeout=client_config.read_timeout,
max_pool_connections=client_config.max_pool_connections,
proxies=client_config.proxies,
)
s3_config = self.compute_s3_config(scoped_config,
client_config)

View file

@ -110,7 +110,7 @@ class SigV2Auth(BaseSigner):
params = request.data
else:
# GET
params = request.param
params = request.params
params['AWSAccessKeyId'] = self.credentials.access_key
params['SignatureVersion'] = '2'
params['SignatureMethod'] = 'HmacSHA256'

View file

@ -165,7 +165,7 @@ if sys.version_info[:2] == (2, 6):
'ignore',
message="Certificate has no.*subjectAltName.*",
category=exceptions.SecurityWarning,
module=".*urllib3\.connection")
module=r".*urllib3\.connection")
else:
import xml.etree.cElementTree
XMLParseError = xml.etree.cElementTree.ParseError
@ -183,7 +183,7 @@ def filter_ssl_warnings():
'ignore',
message="A true SSLContext object is not available.*",
category=exceptions.InsecurePlatformWarning,
module=".*urllib3\.util\.ssl_")
module=r".*urllib3\.util\.ssl_")
filter_ssl_san_warnings()

View file

@ -54,6 +54,12 @@ class Config(object):
keep in a connection pool. If this value is not set, the default
value of 10 is used.
:type proxies: dict
:param proxies: A dictionary of proxy servers to use by protocol or
endpoint, e.g.:
{'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.
The proxies are used on each request.
:type s3: dict
:param s3: A dictionary of s3 specific configurations.
Valid keys are:
@ -92,6 +98,7 @@ class Config(object):
('read_timeout', DEFAULT_TIMEOUT),
('parameter_validation', True),
('max_pool_connections', MAX_POOL_CONNECTIONS),
('proxies', None),
('s3', None)
])

View file

@ -23,6 +23,24 @@
],
"documentation":"<p>Deletes the specified alarms. In the event of an error, no alarms are deleted.</p>"
},
"DeleteDashboards":{
"name":"DeleteDashboards",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"DeleteDashboardsInput"},
"output":{
"shape":"DeleteDashboardsOutput",
"resultWrapper":"DeleteDashboardsResult"
},
"errors":[
{"shape":"InvalidParameterValueException"},
{"shape":"DashboardNotFoundError"},
{"shape":"InternalServiceFault"}
],
"documentation":"<p>Deletes all dashboards that you specify. You may specify up to 100 dashboards to delete. If there is an error during this call, no dashboards are deleted.</p>"
},
"DescribeAlarmHistory":{
"name":"DescribeAlarmHistory",
"http":{
@ -37,7 +55,7 @@
"errors":[
{"shape":"InvalidNextToken"}
],
"documentation":"<p>Retrieves the history for the specified alarm. You can filter the results by date range or item type. If an alarm name is not specified, the histories for all alarms are returned.</p> <p>Note that Amazon CloudWatch retains the history of an alarm even if you delete the alarm.</p>"
"documentation":"<p>Retrieves the history for the specified alarm. You can filter the results by date range or item type. If an alarm name is not specified, the histories for all alarms are returned.</p> <p>CloudWatch retains the history of an alarm even if you delete the alarm.</p>"
},
"DescribeAlarms":{
"name":"DescribeAlarms",
@ -66,7 +84,7 @@
"shape":"DescribeAlarmsForMetricOutput",
"resultWrapper":"DescribeAlarmsForMetricResult"
},
"documentation":"<p>Retrieves the alarms for the specified metric. Specify a statistic, period, or unit to filter the results.</p>"
"documentation":"<p>Retrieves the alarms for the specified metric. To filter the results, specify a statistic, period, or unit.</p>"
},
"DisableAlarmActions":{
"name":"DisableAlarmActions",
@ -86,6 +104,24 @@
"input":{"shape":"EnableAlarmActionsInput"},
"documentation":"<p>Enables the actions for the specified alarms.</p>"
},
"GetDashboard":{
"name":"GetDashboard",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"GetDashboardInput"},
"output":{
"shape":"GetDashboardOutput",
"resultWrapper":"GetDashboardResult"
},
"errors":[
{"shape":"InvalidParameterValueException"},
{"shape":"DashboardNotFoundError"},
{"shape":"InternalServiceFault"}
],
"documentation":"<p>Displays the details of the dashboard that you specify.</p> <p>To copy an existing dashboard, use <code>GetDashboard</code>, and then use the data returned within <code>DashboardBody</code> as the template for the new dashboard when you call <code>PutDashboard</code> to create the copy.</p>"
},
"GetMetricStatistics":{
"name":"GetMetricStatistics",
"http":{
@ -103,7 +139,24 @@
{"shape":"InvalidParameterCombinationException"},
{"shape":"InternalServiceFault"}
],
"documentation":"<p>Gets statistics for the specified metric.</p> <p>Amazon CloudWatch retains metric data as follows:</p> <ul> <li> <p>Data points with a period of 60 seconds (1 minute) are available for 15 days</p> </li> <li> <p>Data points with a period of 300 seconds (5 minute) are available for 63 days</p> </li> <li> <p>Data points with a period of 3600 seconds (1 hour) are available for 455 days (15 months)</p> </li> </ul> <p>Note that CloudWatch started retaining 5-minute and 1-hour metric data as of 9 July 2016.</p> <p>The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, Amazon CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. A period can be as short as one minute (60 seconds). Note that data points are not returned in chronological order.</p> <p>Amazon CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, Amazon CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.</p> <p>CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you cannot retrieve percentile statistics for this data unless one of the following conditions is true:</p> <ul> <li> <p>The SampleCount of the statistic set is 1</p> </li> <li> <p>The Min and the Max of the statistic set are equal</p> </li> </ul> <p>For a list of metrics and dimensions supported by AWS services, see the <a href=\"http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CW_Support_For_AWS.html\">Amazon CloudWatch Metrics and Dimensions Reference</a> in the <i>Amazon CloudWatch User Guide</i>.</p>"
"documentation":"<p>Gets statistics for the specified metric.</p> <p>Amazon CloudWatch retains metric data as follows:</p> <ul> <li> <p>Data points with a period of 60 seconds (1-minute) are available for 15 days</p> </li> <li> <p>Data points with a period of 300 seconds (5-minute) are available for 63 days</p> </li> <li> <p>Data points with a period of 3600 seconds (1 hour) are available for 455 days (15 months)</p> </li> </ul> <p>CloudWatch started retaining 5-minute and 1-hour metric data as of July 9, 2016.</p> <p>The maximum number of data points returned from a single call is 1,440. If you request more than 1,440 data points, CloudWatch returns an error. To reduce the number of data points, you can narrow the specified time range and make multiple requests across adjacent time ranges, or you can increase the specified period. A period can be as short as one minute (60 seconds). Data points are not returned in chronological order.</p> <p>CloudWatch aggregates data points based on the length of the period that you specify. For example, if you request statistics with a one-hour period, CloudWatch aggregates all data points with time stamps that fall within each one-hour period. Therefore, the number of values aggregated by CloudWatch is larger than the number of data points returned.</p> <p>CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:</p> <ul> <li> <p>The SampleCount value of the statistic set is 1.</p> </li> <li> <p>The Min and the Max values of the statistic set are equal.</p> </li> </ul> <p>For a list of metrics and dimensions supported by AWS services, see the <a href=\"http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/CW_Support_For_AWS.html\">Amazon CloudWatch Metrics and Dimensions Reference</a> in the <i>Amazon CloudWatch User Guide</i>.</p>"
},
"ListDashboards":{
"name":"ListDashboards",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"ListDashboardsInput"},
"output":{
"shape":"ListDashboardsOutput",
"resultWrapper":"ListDashboardsResult"
},
"errors":[
{"shape":"InvalidParameterValueException"},
{"shape":"InternalServiceFault"}
],
"documentation":"<p>Returns a list of the dashboards for your account. If you include <code>DashboardNamePrefix</code>, only those dashboards with names starting with the prefix are listed. Otherwise, all dashboards in your account are listed. </p>"
},
"ListMetrics":{
"name":"ListMetrics",
@ -122,6 +175,23 @@
],
"documentation":"<p>List the specified metrics. You can use the returned metrics with <a>GetMetricStatistics</a> to obtain statistical data.</p> <p>Up to 500 results are returned for any one call. To retrieve additional results, use the returned token with subsequent calls.</p> <p>After you create a metric, allow up to fifteen minutes before the metric appears. Statistics about the metric, however, are available sooner using <a>GetMetricStatistics</a>.</p>"
},
"PutDashboard":{
"name":"PutDashboard",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"PutDashboardInput"},
"output":{
"shape":"PutDashboardOutput",
"resultWrapper":"PutDashboardResult"
},
"errors":[
{"shape":"DashboardInvalidInputError"},
{"shape":"InternalServiceFault"}
],
"documentation":"<p>Creates a dashboard if it does not already exist, or updates an existing dashboard. If you update a dashboard, the entire contents are replaced with what you specify here.</p> <p>You can have up to 500 dashboards per account. All dashboards in your account are global, not region-specific.</p> <p>To copy an existing dashboard, use <code>GetDashboard</code>, and then use the data returned within <code>DashboardBody</code> as the template for the new dashboard when you call <code>PutDashboard</code> to create the copy.</p>"
},
"PutMetricAlarm":{
"name":"PutMetricAlarm",
"http":{
@ -132,7 +202,7 @@
"errors":[
{"shape":"LimitExceededFault"}
],
"documentation":"<p>Creates or updates an alarm and associates it with the specified metric. Optionally, this operation can associate one or more Amazon SNS resources with the alarm.</p> <p>When this operation creates an alarm, the alarm state is immediately set to <code>INSUFFICIENT_DATA</code>. The alarm is evaluated and its state is set appropriately. Any actions associated with the state are then executed.</p> <p>When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.</p> <p>If you are an AWS Identity and Access Management (IAM) user, you must have Amazon EC2 permissions for some operations:</p> <ul> <li> <p> <code>ec2:DescribeInstanceStatus</code> and <code>ec2:DescribeInstances</code> for all alarms on EC2 instance status metrics</p> </li> <li> <p> <code>ec2:StopInstances</code> for alarms with stop actions</p> </li> <li> <p> <code>ec2:TerminateInstances</code> for alarms with terminate actions</p> </li> <li> <p> <code>ec2:DescribeInstanceRecoveryAttribute</code> and <code>ec2:RecoverInstances</code> for alarms with recover actions</p> </li> </ul> <p>If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions won't be performed. However, if you are later granted the required permissions, the alarm actions that you created earlier will be performed.</p> <p>If you are using an IAM role (for example, an Amazon EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.</p> <p>If you are using temporary security credentials granted using the AWS Security Token Service (AWS STS), you cannot stop or terminate an Amazon EC2 instance using alarm actions.</p> <p>Note that you must create at least one stop, terminate, or reboot alarm using the Amazon EC2 or CloudWatch console to create the <b>EC2ActionsAccess</b> IAM role. After this IAM role is created, you can create stop, terminate, or reboot alarms using a command-line interface or an API.</p>"
"documentation":"<p>Creates or updates an alarm and associates it with the specified metric. Optionally, this operation can associate one or more Amazon SNS resources with the alarm.</p> <p>When this operation creates an alarm, the alarm state is immediately set to <code>INSUFFICIENT_DATA</code>. The alarm is evaluated and its state is set appropriately. Any actions associated with the state are then executed.</p> <p>When you update an existing alarm, its state is left unchanged, but the update completely overwrites the previous configuration of the alarm.</p> <p>If you are an IAM user, you must have Amazon EC2 permissions for some operations:</p> <ul> <li> <p> <code>ec2:DescribeInstanceStatus</code> and <code>ec2:DescribeInstances</code> for all alarms on EC2 instance status metrics</p> </li> <li> <p> <code>ec2:StopInstances</code> for alarms with stop actions</p> </li> <li> <p> <code>ec2:TerminateInstances</code> for alarms with terminate actions</p> </li> <li> <p> <code>ec2:DescribeInstanceRecoveryAttribute</code> and <code>ec2:RecoverInstances</code> for alarms with recover actions</p> </li> </ul> <p>If you have read/write permissions for Amazon CloudWatch but not for Amazon EC2, you can still create an alarm, but the stop or terminate actions are not performed. However, if you are later granted the required permissions, the alarm actions that you created earlier are performed.</p> <p>If you are using an IAM role (for example, an EC2 instance profile), you cannot stop or terminate the instance using alarm actions. However, you can still see the alarm state and perform any other actions such as Amazon SNS notifications or Auto Scaling policies.</p> <p>If you are using temporary security credentials granted using AWS STS, you cannot stop or terminate an EC2 instance using alarm actions.</p> <p>You must create at least one stop, terminate, or reboot alarm using either the Amazon EC2 or CloudWatch consoles to create the <b>EC2ActionsAccess</b> IAM role. After this IAM role is created, you can create stop, terminate, or reboot alarms using a command-line interface or API.</p>"
},
"PutMetricData":{
"name":"PutMetricData",
@ -147,7 +217,7 @@
{"shape":"InvalidParameterCombinationException"},
{"shape":"InternalServiceFault"}
],
"documentation":"<p>Publishes metric data points to Amazon CloudWatch. Amazon CloudWatch associates the data points with the specified metric. If the specified metric does not exist, Amazon CloudWatch creates the metric. When Amazon CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to <a>ListMetrics</a>.</p> <p>Each <code>PutMetricData</code> request is limited to 40 KB in size for HTTP POST requests.</p> <p>Although the <code>Value</code> parameter accepts numbers of type <code>Double</code>, Amazon CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (e.g., NaN, +Infinity, -Infinity) are not supported.</p> <p>You can use up to 10 dimensions per metric to further clarify what data the metric collects. For more information on specifying dimensions, see <a href=\"http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html\">Publishing Metrics</a> in the <i>Amazon CloudWatch User Guide</i>.</p> <p>Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for <a>GetMetricStatistics</a> from the time they are submitted.</p> <p>CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you cannot retrieve percentile statistics for this data unless one of the following conditions is true:</p> <ul> <li> <p>The SampleCount of the statistic set is 1</p> </li> <li> <p>The Min and the Max of the statistic set are equal</p> </li> </ul>"
"documentation":"<p>Publishes metric data points to Amazon CloudWatch. CloudWatch associates the data points with the specified metric. If the specified metric does not exist, CloudWatch creates the metric. When CloudWatch creates a metric, it can take up to fifteen minutes for the metric to appear in calls to <a>ListMetrics</a>.</p> <p>Each <code>PutMetricData</code> request is limited to 40 KB in size for HTTP POST requests.</p> <p>Although the <code>Value</code> parameter accepts numbers of type <code>Double</code>, CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.</p> <p>You can use up to 10 dimensions per metric to further clarify what data the metric collects. For more information about specifying dimensions, see <a href=\"http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html\">Publishing Metrics</a> in the <i>Amazon CloudWatch User Guide</i>.</p> <p>Data points with time stamps from 24 hours ago or longer can take at least 48 hours to become available for <a>GetMetricStatistics</a> from the time they are submitted.</p> <p>CloudWatch needs raw data points to calculate percentile statistics. If you publish data using a statistic set instead, you can only retrieve percentile statistics for this data if one of the following conditions is true:</p> <ul> <li> <p>The SampleCount value of the statistic set is 1</p> </li> <li> <p>The Min and the Max values of the statistic set are equal</p> </li> </ul>"
},
"SetAlarmState":{
"name":"SetAlarmState",
@ -160,7 +230,7 @@
{"shape":"ResourceNotFound"},
{"shape":"InvalidFormatFault"}
],
"documentation":"<p>Temporarily sets the state of an alarm for testing purposes. When the updated state differs from the previous value, the action configured for the appropriate state is invoked. For example, if your alarm is configured to send an Amazon SNS message when an alarm is triggered, temporarily changing the alarm state to <code>ALARM</code> sends an Amazon SNS message. The alarm returns to its actual state (often within seconds). Because the alarm state change happens very quickly, it is typically only visible in the alarm's <b>History</b> tab in the Amazon CloudWatch console or through <a>DescribeAlarmHistory</a>.</p>"
"documentation":"<p>Temporarily sets the state of an alarm for testing purposes. When the updated state differs from the previous value, the action configured for the appropriate state is invoked. For example, if your alarm is configured to send an Amazon SNS message when an alarm is triggered, temporarily changing the alarm state to <code>ALARM</code> sends an SNS message. The alarm returns to its actual state (often within seconds). Because the alarm state change happens quickly, it is typically only visible in the alarm's <b>History</b> tab in the Amazon CloudWatch console or through <a>DescribeAlarmHistory</a>.</p>"
}
},
"shapes":{
@ -235,6 +305,87 @@
"LessThanOrEqualToThreshold"
]
},
"DashboardArn":{"type":"string"},
"DashboardBody":{"type":"string"},
"DashboardEntries":{
"type":"list",
"member":{"shape":"DashboardEntry"}
},
"DashboardEntry":{
"type":"structure",
"members":{
"DashboardName":{
"shape":"DashboardName",
"documentation":"<p>The name of the dashboard.</p>"
},
"DashboardArn":{
"shape":"DashboardArn",
"documentation":"<p>The Amazon Resource Name (ARN) of the dashboard.</p>"
},
"LastModified":{
"shape":"LastModified",
"documentation":"<p>The time stamp of when the dashboard was last modified, either by an API call or through the console. This number is expressed as the number of milliseconds since Jan 1, 1970 00:00:00 UTC.</p>"
},
"Size":{
"shape":"Size",
"documentation":"<p>The size of the dashboard, in bytes.</p>"
}
},
"documentation":"<p>Represents a specific dashboard.</p>"
},
"DashboardErrorMessage":{"type":"string"},
"DashboardInvalidInputError":{
"type":"structure",
"members":{
"message":{"shape":"DashboardErrorMessage"},
"dashboardValidationMessages":{"shape":"DashboardValidationMessages"}
},
"documentation":"<p>Some part of the dashboard data is invalid.</p>",
"error":{
"code":"InvalidParameterInput",
"httpStatusCode":400,
"senderFault":true
},
"exception":true
},
"DashboardName":{"type":"string"},
"DashboardNamePrefix":{"type":"string"},
"DashboardNames":{
"type":"list",
"member":{"shape":"DashboardName"}
},
"DashboardNotFoundError":{
"type":"structure",
"members":{
"message":{"shape":"DashboardErrorMessage"}
},
"documentation":"<p>The specified dashboard does not exist.</p>",
"error":{
"code":"ResourceNotFound",
"httpStatusCode":404,
"senderFault":true
},
"exception":true
},
"DashboardValidationMessage":{
"type":"structure",
"members":{
"DataPath":{
"shape":"DataPath",
"documentation":"<p>The data path related to the message.</p>"
},
"Message":{
"shape":"Message",
"documentation":"<p>A message describing the error or warning.</p>"
}
},
"documentation":"<p>An error or warning for the operation.</p>"
},
"DashboardValidationMessages":{
"type":"list",
"member":{"shape":"DashboardValidationMessage"}
},
"DataPath":{"type":"string"},
"Datapoint":{
"type":"structure",
"members":{
@ -271,7 +422,7 @@
"documentation":"<p>The percentile statistic for the data point.</p>"
}
},
"documentation":"<p>Encapsulates the statistical data that Amazon CloudWatch computes from metric data.</p>",
"documentation":"<p>Encapsulates the statistical data that CloudWatch computes from metric data.</p>",
"xmlOrder":[
"Timestamp",
"SampleCount",
@ -303,6 +454,20 @@
}
}
},
"DeleteDashboardsInput":{
"type":"structure",
"members":{
"DashboardNames":{
"shape":"DashboardNames",
"documentation":"<p>The dashboards to be deleted.</p>"
}
}
},
"DeleteDashboardsOutput":{
"type":"structure",
"members":{
}
},
"DescribeAlarmHistoryInput":{
"type":"structure",
"members":{
@ -400,7 +565,7 @@
},
"AlarmNamePrefix":{
"shape":"AlarmNamePrefix",
"documentation":"<p>The alarm name prefix. You cannot specify <code>AlarmNames</code> if this parameter is specified.</p>"
"documentation":"<p>The alarm name prefix. If this parameter is specified, you cannot specify <code>AlarmNames</code>.</p>"
},
"StateValue":{
"shape":"StateValue",
@ -535,6 +700,32 @@
"min":1
},
"FaultDescription":{"type":"string"},
"GetDashboardInput":{
"type":"structure",
"members":{
"DashboardName":{
"shape":"DashboardName",
"documentation":"<p>The name of the dashboard to be described.</p>"
}
}
},
"GetDashboardOutput":{
"type":"structure",
"members":{
"DashboardArn":{
"shape":"DashboardArn",
"documentation":"<p>The Amazon Resource Name (ARN) of the dashboard.</p>"
},
"DashboardBody":{
"shape":"DashboardBody",
"documentation":"<p>The detailed information about the dashboard, including what widgets are included and their location on the dashboard. For more information about the <code>DashboardBody</code> syntax, see <a>CloudWatch-Dashboard-Body-Structure</a>. </p>"
},
"DashboardName":{
"shape":"DashboardName",
"documentation":"<p>The name of the dashboard.</p>"
}
}
},
"GetMetricStatisticsInput":{
"type":"structure",
"required":[
@ -555,27 +746,27 @@
},
"Dimensions":{
"shape":"Dimensions",
"documentation":"<p>The dimensions. If the metric contains multiple dimensions, you must include a value for each dimension. CloudWatch treats each unique combination of dimensions as a separate metric. You can't retrieve statistics using combinations of dimensions that were not specially published. You must specify the same dimensions that were used when the metrics were created. For an example, see <a href=\"http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#dimension-combinations\">Dimension Combinations</a> in the <i>Amazon CloudWatch User Guide</i>. For more information on specifying dimensions, see <a href=\"http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html\">Publishing Metrics</a> in the <i>Amazon CloudWatch User Guide</i>.</p>"
"documentation":"<p>The dimensions. If the metric contains multiple dimensions, you must include a value for each dimension. CloudWatch treats each unique combination of dimensions as a separate metric. If a specific combination of dimensions was not published, you can't retrieve statistics for it. You must specify the same dimensions that were used when the metrics were created. For an example, see <a href=\"http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html#dimension-combinations\">Dimension Combinations</a> in the <i>Amazon CloudWatch User Guide</i>. For more information about specifying dimensions, see <a href=\"http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html\">Publishing Metrics</a> in the <i>Amazon CloudWatch User Guide</i>.</p>"
},
"StartTime":{
"shape":"Timestamp",
"documentation":"<p>The time stamp that determines the first data point to return. Note that start times are evaluated relative to the time that CloudWatch receives the request.</p> <p>The value specified is inclusive; results include data points with the specified time stamp. The time stamp must be in ISO 8601 UTC format (for example, 2016-10-03T23:00:00Z).</p> <p>CloudWatch rounds the specified time stamp as follows:</p> <ul> <li> <p>Start time less than 15 days ago - Round down to the nearest whole minute. For example, 12:32:34 is rounded down to 12:32:00.</p> </li> <li> <p>Start time between 15 and 63 days ago - Round down to the nearest 5-minute clock interval. For example, 12:32:34 is rounded down to 12:30:00.</p> </li> <li> <p>Start time greater than 63 days ago - Round down to the nearest 1-hour clock interval. For example, 12:32:34 is rounded down to 12:00:00.</p> </li> </ul>"
"documentation":"<p>The time stamp that determines the first data point to return. Start times are evaluated relative to the time that CloudWatch receives the request.</p> <p>The value specified is inclusive; results include data points with the specified time stamp. The time stamp must be in ISO 8601 UTC format (for example, 2016-10-03T23:00:00Z).</p> <p>CloudWatch rounds the specified time stamp as follows:</p> <ul> <li> <p>Start time less than 15 days ago - Round down to the nearest whole minute. For example, 12:32:34 is rounded down to 12:32:00.</p> </li> <li> <p>Start time between 15 and 63 days ago - Round down to the nearest 5-minute clock interval. For example, 12:32:34 is rounded down to 12:30:00.</p> </li> <li> <p>Start time greater than 63 days ago - Round down to the nearest 1-hour clock interval. For example, 12:32:34 is rounded down to 12:00:00.</p> </li> </ul>"
},
"EndTime":{
"shape":"Timestamp",
"documentation":"<p>The time stamp that determines the last data point to return.</p> <p>The value specified is exclusive; results will include data points up to the specified time stamp. The time stamp must be in ISO 8601 UTC format (for example, 2016-10-10T23:00:00Z).</p>"
"documentation":"<p>The time stamp that determines the last data point to return.</p> <p>The value specified is exclusive; results include data points up to the specified time stamp. The time stamp must be in ISO 8601 UTC format (for example, 2016-10-10T23:00:00Z).</p>"
},
"Period":{
"shape":"Period",
"documentation":"<p>The granularity, in seconds, of the returned data points. A period can be as short as one minute (60 seconds) and must be a multiple of 60. The default value is 60.</p> <p>If the <code>StartTime</code> parameter specifies a time stamp that is greater than 15 days ago, you must specify the period as follows or no data points in that time range is returned:</p> <ul> <li> <p>Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 minutes).</p> </li> <li> <p>Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 hour).</p> </li> </ul>"
"documentation":"<p>The granularity, in seconds, of the returned data points. A period can be as short as one minute (60 seconds) and must be a multiple of 60. </p> <p>If the <code>StartTime</code> parameter specifies a time stamp that is greater than 15 days ago, you must specify the period as follows or no data points in that time range is returned:</p> <ul> <li> <p>Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 minutes).</p> </li> <li> <p>Start time greater than 63 days ago - Use a multiple of 3600 seconds (1 hour).</p> </li> </ul>"
},
"Statistics":{
"shape":"Statistics",
"documentation":"<p>The metric statistics, other than percentile. For percentile statistics, use <code>ExtendedStatistic</code>.</p>"
"documentation":"<p>The metric statistics, other than percentile. For percentile statistics, use <code>ExtendedStatistics</code>. When calling <code>GetMetricStatistics</code>, you must specify either <code>Statistics</code> or <code>ExtendedStatistics</code>, but not both.</p>"
},
"ExtendedStatistics":{
"shape":"ExtendedStatistics",
"documentation":"<p>The percentile statistics. Specify values between p0.0 and p100.</p>"
"documentation":"<p>The percentile statistics. Specify values between p0.0 and p100. When calling <code>GetMetricStatistics</code>, you must specify either <code>Statistics</code> or <code>ExtendedStatistics</code>, but not both.</p>"
},
"Unit":{
"shape":"StandardUnit",
@ -670,7 +861,7 @@
"documentation":"<p/>"
}
},
"documentation":"<p>Parameters that cannot be used together were used together.</p>",
"documentation":"<p>Parameters were used together that cannot be used together.</p>",
"error":{
"code":"InvalidParameterCombination",
"httpStatusCode":400,
@ -694,6 +885,7 @@
},
"exception":true
},
"LastModified":{"type":"timestamp"},
"LimitExceededFault":{
"type":"structure",
"members":{
@ -710,6 +902,32 @@
},
"exception":true
},
"ListDashboardsInput":{
"type":"structure",
"members":{
"DashboardNamePrefix":{
"shape":"DashboardNamePrefix",
"documentation":"<p>If you specify this parameter, only the dashboards with names starting with the specified string are listed. The maximum length is 255, and valid characters are A-Z, a-z, 0-9, \".\", \"-\", and \"_\". </p>"
},
"NextToken":{
"shape":"NextToken",
"documentation":"<p>The token returned by a previous call to indicate that there is more data available.</p>"
}
}
},
"ListDashboardsOutput":{
"type":"structure",
"members":{
"DashboardEntries":{
"shape":"DashboardEntries",
"documentation":"<p>The list of matching dashboards.</p>"
},
"NextToken":{
"shape":"NextToken",
"documentation":"<p>The token that marks the start of the next batch of returned results.</p>"
}
}
},
"ListMetricsInput":{
"type":"structure",
"members":{
@ -753,6 +971,7 @@
"max":100,
"min":1
},
"Message":{"type":"string"},
"Metric":{
"type":"structure",
"members":{
@ -867,8 +1086,14 @@
"shape":"ComparisonOperator",
"documentation":"<p>The arithmetic operation to use when comparing the specified statistic and threshold. The specified statistic value is used as the first operand.</p>"
},
"TreatMissingData":{"shape":"TreatMissingData"},
"EvaluateLowSampleCountPercentile":{"shape":"EvaluateLowSampleCountPercentile"}
"TreatMissingData":{
"shape":"TreatMissingData",
"documentation":"<p>Sets how this alarm is to handle missing data points. If this parameter is omitted, the default behavior of <code>missing</code> is used.</p>"
},
"EvaluateLowSampleCountPercentile":{
"shape":"EvaluateLowSampleCountPercentile",
"documentation":"<p>Used only for alarms based on percentiles. If <code>ignore</code>, the alarm state does not change during periods with too few data points to be statistically significant. If <code>evaluate</code> or this parameter is not used, the alarm will always be evaluated and possibly change state no matter how many data points are available.</p>"
}
},
"documentation":"<p>Represents an alarm.</p>",
"xmlOrder":[
@ -924,7 +1149,7 @@
},
"Value":{
"shape":"DatapointValue",
"documentation":"<p>The value for the metric.</p> <p>Although the parameter accepts numbers of type Double, Amazon CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.</p>"
"documentation":"<p>The value for the metric.</p> <p>Although the parameter accepts numbers of type Double, CloudWatch rejects values that are either too small or too large. Values must be in the range of 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2). In addition, special values (for example, NaN, +Infinity, -Infinity) are not supported.</p>"
},
"StatisticValues":{
"shape":"StatisticSet",
@ -933,7 +1158,8 @@
"Unit":{
"shape":"StandardUnit",
"documentation":"<p>The unit of the metric.</p>"
}
},
"StorageResolution":{"shape":"StorageResolution"}
},
"documentation":"<p>Encapsulates the information sent to either create a metric or add new values to be aggregated into an existing metric.</p>"
},
@ -976,7 +1202,29 @@
},
"Period":{
"type":"integer",
"min":60
"min":1
},
"PutDashboardInput":{
"type":"structure",
"members":{
"DashboardName":{
"shape":"DashboardName",
"documentation":"<p>The name of the dashboard. If a dashboard with this name already exists, this call modifies that dashboard, replacing its current contents. Otherwise, a new dashboard is created. The maximum length is 255, and valid characters are A-Z, a-z, 0-9, \".\", \"-\", and \"_\".</p>"
},
"DashboardBody":{
"shape":"DashboardBody",
"documentation":"<p>The detailed information about the dashboard in JSON format, including the widgets to include and their location on the dashboard.</p> <p>For more information about the syntax, see <a>CloudWatch-Dashboard-Body-Structure</a>.</p>"
}
}
},
"PutDashboardOutput":{
"type":"structure",
"members":{
"DashboardValidationMessages":{
"shape":"DashboardValidationMessages",
"documentation":"<p>If the input for <code>PutDashboard</code> was correct and the dashboard was successfully created or modified, this result is empty.</p> <p>If this result includes only warning messages, then the input was valid enough for the dashboard to be created or modified, but some elements of the dashboard may not render.</p> <p>If this result includes error messages, the input was not valid and the operation failed.</p>"
}
}
},
"PutMetricAlarmInput":{
"type":"structure",
@ -1036,15 +1284,15 @@
},
"Period":{
"shape":"Period",
"documentation":"<p>The period, in seconds, over which the specified statistic is applied.</p>"
"documentation":"<p>The period, in seconds, over which the specified statistic is applied. An alarm's total current evaluation period can be no longer than one day, so this number multiplied by <code>EvaluationPeriods</code> must be 86,400 or less.</p>"
},
"Unit":{
"shape":"StandardUnit",
"documentation":"<p>The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately.</p> <p>If you specify a unit, you must use a unit that is appropriate for the metric. Otherwise, the Amazon CloudWatch alarm can get stuck in the <code>INSUFFICIENT DATA</code> state. </p>"
"documentation":"<p>The unit of measure for the statistic. For example, the units for the Amazon EC2 NetworkIn metric are Bytes because NetworkIn tracks the number of bytes that an instance receives on all network interfaces. You can also specify a unit when you create a custom metric. Units help provide conceptual meaning to your data. Metric data points that specify a unit of measure, such as Percent, are aggregated separately.</p> <p>If you specify a unit, you must use a unit that is appropriate for the metric. Otherwise, the CloudWatch alarm can get stuck in the <code>INSUFFICIENT DATA</code> state. </p>"
},
"EvaluationPeriods":{
"shape":"EvaluationPeriods",
"documentation":"<p>The number of periods over which data is compared to the specified threshold.</p>"
"documentation":"<p>The number of periods over which data is compared to the specified threshold. An alarm's total current evaluation period can be no longer than one day, so this number multiplied by <code>Period</code> must be 86,400 or less.</p>"
},
"Threshold":{
"shape":"Threshold",
@ -1060,7 +1308,7 @@
},
"EvaluateLowSampleCountPercentile":{
"shape":"EvaluateLowSampleCountPercentile",
"documentation":"<p> Used only for alarms based on percentiles. If you specify <code>ignore</code>, the alarm state will not change during periods with too few data points to be statistically significant. If you specify <code>evaluate</code> or omit this parameter, the alarm will always be evaluated and possibly change state no matter how many data points are available. For more information, see <a href=\"http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#percentiles-with-low-samples\">Percentile-Based CloudWatch Alarms and Low Data Samples</a>.</p> <p>Valid Values: <code>evaluate | ignore</code> </p>"
"documentation":"<p> Used only for alarms based on percentiles. If you specify <code>ignore</code>, the alarm state does not change during periods with too few data points to be statistically significant. If you specify <code>evaluate</code> or omit this parameter, the alarm is always evaluated and possibly changes state no matter how many data points are available. For more information, see <a href=\"http://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/AlarmThatSendsEmail.html#percentiles-with-low-samples\">Percentile-Based CloudWatch Alarms and Low Data Samples</a>.</p> <p>Valid Values: <code>evaluate | ignore</code> </p>"
}
}
},
@ -1133,6 +1381,7 @@
}
}
},
"Size":{"type":"long"},
"StandardUnit":{
"type":"string",
"enum":[
@ -1227,6 +1476,10 @@
"max":5,
"min":1
},
"StorageResolution":{
"type":"integer",
"min":1
},
"Threshold":{"type":"double"},
"Timestamp":{"type":"timestamp"},
"TreatMissingData":{
@ -1235,5 +1488,5 @@
"min":1
}
},
"documentation":"<p>Amazon CloudWatch monitors your Amazon Web Services (AWS) resources and the applications you run on AWS in real-time. You can use CloudWatch to collect and track metrics, which are the variables you want to measure for your resources and applications.</p> <p>CloudWatch alarms send notifications or automatically make changes to the resources you are monitoring based on rules that you define. For example, you can monitor the CPU usage and disk reads and writes of your Amazon Elastic Compute Cloud (Amazon EC2) instances and then use this data to determine whether you should launch additional instances to handle increased load. You can also use this data to stop under-used instances to save money.</p> <p>In addition to monitoring the built-in metrics that come with AWS, you can monitor your own custom metrics. With CloudWatch, you gain system-wide visibility into resource utilization, application performance, and operational health.</p>"
"documentation":"<p>Amazon CloudWatch monitors your Amazon Web Services (AWS) resources and the applications you run on AWS in real time. You can use CloudWatch to collect and track metrics, which are the variables you want to measure for your resources and applications.</p> <p>CloudWatch alarms send notifications or automatically change the resources you are monitoring based on rules that you define. For example, you can monitor the CPU usage and disk reads and writes of your Amazon EC2 instances. Then, use this data to determine whether you should launch additional instances to handle increased load. You can also use this data to stop under-used instances to save money.</p> <p>In addition to monitoring the built-in metrics that come with AWS, you can monitor your own custom metrics. With CloudWatch, you gain system-wide visibility into resource utilization, application performance, and operational health.</p>"
}

View file

@ -0,0 +1,3 @@
{
"pagination": {}
}

View file

@ -320,6 +320,24 @@
],
"documentation":"<p>Obtains information about the directories that belong to this account.</p> <p>You can retrieve information about specific directories by passing the directory identifiers in the <i>DirectoryIds</i> parameter. Otherwise, all directories that belong to the current account are returned.</p> <p>This operation supports pagination with the use of the <i>NextToken</i> request and response parameters. If more results are available, the <i>DescribeDirectoriesResult.NextToken</i> member contains a token that you pass in the next call to <a>DescribeDirectories</a> to retrieve the next set of items.</p> <p>You can also specify a maximum number of return results with the <i>Limit</i> parameter.</p>"
},
"DescribeDomainControllers":{
"name":"DescribeDomainControllers",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"DescribeDomainControllersRequest"},
"output":{"shape":"DescribeDomainControllersResult"},
"errors":[
{"shape":"EntityDoesNotExistException"},
{"shape":"InvalidNextTokenException"},
{"shape":"InvalidParameterException"},
{"shape":"ClientException"},
{"shape":"ServiceException"},
{"shape":"UnsupportedOperationException"}
],
"documentation":"<p>Provides information about any domain controllers in your directory.</p>"
},
"DescribeEventTopics":{
"name":"DescribeEventTopics",
"http":{
@ -618,6 +636,25 @@
],
"documentation":"<p>Updates a conditional forwarder that has been set up for your AWS directory.</p>"
},
"UpdateNumberOfDomainControllers":{
"name":"UpdateNumberOfDomainControllers",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"UpdateNumberOfDomainControllersRequest"},
"output":{"shape":"UpdateNumberOfDomainControllersResult"},
"errors":[
{"shape":"EntityDoesNotExistException"},
{"shape":"DirectoryUnavailableException"},
{"shape":"DomainControllerLimitExceededException"},
{"shape":"InvalidParameterException"},
{"shape":"UnsupportedOperationException"},
{"shape":"ClientException"},
{"shape":"ServiceException"}
],
"documentation":"<p>Adds or removes domain controllers to or from the directory. Based on the difference between current value and new value (provided through this API call), domain controllers will be added or removed. It may take up to 45 minutes for any new domain controllers to become fully active once the requested number of domain controllers is updated. During this time, you cannot make another update request.</p>"
},
"UpdateRadius":{
"name":"UpdateRadius",
"http":{
@ -1072,7 +1109,10 @@
"shape":"Description",
"documentation":"<p>A textual description for the directory. This label will appear on the AWS console <code>Directory Details</code> page after the directory is created.</p>"
},
"VpcSettings":{"shape":"DirectoryVpcSettings"}
"VpcSettings":{
"shape":"DirectoryVpcSettings",
"documentation":"<p>Contains VPC information for the <a>CreateDirectory</a> or <a>CreateMicrosoftAD</a> operation.</p>"
}
},
"documentation":"<p>Creates a Microsoft AD in the AWS cloud.</p>"
},
@ -1332,6 +1372,41 @@
},
"documentation":"<p>Contains the results of the <a>DescribeDirectories</a> operation.</p>"
},
"DescribeDomainControllersRequest":{
"type":"structure",
"required":["DirectoryId"],
"members":{
"DirectoryId":{
"shape":"DirectoryId",
"documentation":"<p>Identifier of the directory for which to retrieve the domain controller information.</p>"
},
"DomainControllerIds":{
"shape":"DomainControllerIds",
"documentation":"<p>A list of identifiers for the domain controllers whose information will be provided.</p>"
},
"NextToken":{
"shape":"NextToken",
"documentation":"<p>The <i>DescribeDomainControllers.NextToken</i> value from a previous call to <a>DescribeDomainControllers</a>. Pass null if this is the first call. </p>"
},
"Limit":{
"shape":"Limit",
"documentation":"<p>The maximum number of items to return.</p>"
}
}
},
"DescribeDomainControllersResult":{
"type":"structure",
"members":{
"DomainControllers":{
"shape":"DomainControllers",
"documentation":"<p>List of the <a>DomainController</a> objects that were retrieved.</p>"
},
"NextToken":{
"shape":"NextToken",
"documentation":"<p>If not null, more results are available. Pass this value for the <code>NextToken</code> parameter in a subsequent call to <a>DescribeDomainControllers</a> retrieve the next set of items.</p>"
}
}
},
"DescribeEventTopicsRequest":{
"type":"structure",
"members":{
@ -1434,6 +1509,10 @@
"min":0,
"pattern":"^([a-zA-Z0-9_])[\\\\a-zA-Z0-9_@#%*+=:?./!\\s-]*$"
},
"DesiredNumberOfDomainControllers":{
"type":"integer",
"min":2
},
"DirectoryConnectSettings":{
"type":"structure",
"required":[
@ -1566,6 +1645,10 @@
"SsoEnabled":{
"shape":"SsoEnabled",
"documentation":"<p>Indicates if single-sign on is enabled for the directory. For more information, see <a>EnableSso</a> and <a>DisableSso</a>.</p>"
},
"DesiredNumberOfDomainControllers":{
"shape":"DesiredNumberOfDomainControllers",
"documentation":"<p>The desired number of domain controllers in the directory if the directory is Microsoft AD.</p>"
}
},
"documentation":"<p>Contains information about an AWS Directory Service directory.</p>"
@ -1769,6 +1852,86 @@
"type":"list",
"member":{"shape":"IpAddr"}
},
"DomainController":{
"type":"structure",
"members":{
"DirectoryId":{
"shape":"DirectoryId",
"documentation":"<p>Identifier of the directory where the domain controller resides.</p>"
},
"DomainControllerId":{
"shape":"DomainControllerId",
"documentation":"<p>Identifies a specific domain controller in the directory.</p>"
},
"DnsIpAddr":{
"shape":"IpAddr",
"documentation":"<p>The IP address of the domain controller.</p>"
},
"VpcId":{
"shape":"VpcId",
"documentation":"<p>The identifier of the VPC that contains the domain controller.</p>"
},
"SubnetId":{
"shape":"SubnetId",
"documentation":"<p>Identifier of the subnet in the VPC that contains the domain controller.</p>"
},
"AvailabilityZone":{
"shape":"AvailabilityZone",
"documentation":"<p>The Availability Zone where the domain controller is located.</p>"
},
"Status":{
"shape":"DomainControllerStatus",
"documentation":"<p>The status of the domain controller.</p>"
},
"StatusReason":{
"shape":"DomainControllerStatusReason",
"documentation":"<p>A description of the domain controller state.</p>"
},
"LaunchTime":{
"shape":"LaunchTime",
"documentation":"<p>Specifies when the domain controller was created.</p>"
},
"StatusLastUpdatedDateTime":{
"shape":"LastUpdatedDateTime",
"documentation":"<p>The date and time that the status was last updated.</p>"
}
},
"documentation":"<p>Contains information about the domain controllers for a specified directory.</p>"
},
"DomainControllerId":{
"type":"string",
"pattern":"^dc-[0-9a-f]{10}$"
},
"DomainControllerIds":{
"type":"list",
"member":{"shape":"DomainControllerId"}
},
"DomainControllerLimitExceededException":{
"type":"structure",
"members":{
"Message":{"shape":"ExceptionMessage"},
"RequestId":{"shape":"RequestId"}
},
"documentation":"<p>The maximum allowed number of domain controllers per directory was exceeded. The default limit per directory is 20 domain controllers.</p>",
"exception":true
},
"DomainControllerStatus":{
"type":"string",
"enum":[
"Creating",
"Active",
"Impaired",
"Restoring",
"Deleting",
"Deleted",
"Failed"
]
},
"DomainControllerStatusReason":{"type":"string"},
"DomainControllers":{
"type":"list",
"member":{"shape":"DomainController"}
},
"EnableRadiusRequest":{
"type":"structure",
"required":[
@ -2741,6 +2904,28 @@
},
"documentation":"<p>The result of an UpdateConditionalForwarder request.</p>"
},
"UpdateNumberOfDomainControllersRequest":{
"type":"structure",
"required":[
"DirectoryId",
"DesiredNumber"
],
"members":{
"DirectoryId":{
"shape":"DirectoryId",
"documentation":"<p>Identifier of the directory to which the domain controllers will be added or removed.</p>"
},
"DesiredNumber":{
"shape":"DesiredNumberOfDomainControllers",
"documentation":"<p>The number of domain controllers desired in the directory.</p>"
}
}
},
"UpdateNumberOfDomainControllersResult":{
"type":"structure",
"members":{
}
},
"UpdateRadiusRequest":{
"type":"structure",
"required":[

View file

@ -131,6 +131,9 @@
},
"athena" : {
"endpoints" : {
"ap-northeast-1" : { },
"ap-southeast-1" : { },
"eu-west-1" : { },
"us-east-1" : { },
"us-east-2" : { },
"us-west-2" : { }
@ -274,10 +277,13 @@
"ap-northeast-1" : { },
"ap-southeast-1" : { },
"ap-southeast-2" : { },
"ca-central-1" : { },
"eu-central-1" : { },
"eu-west-1" : { },
"eu-west-2" : { },
"us-east-1" : { },
"us-east-2" : { },
"us-west-1" : { },
"us-west-2" : { }
}
},
@ -285,8 +291,10 @@
"endpoints" : {
"ap-northeast-1" : { },
"ap-northeast-2" : { },
"ap-south-1" : { },
"ap-southeast-1" : { },
"ap-southeast-2" : { },
"ca-central-1" : { },
"eu-central-1" : { },
"eu-west-1" : { },
"eu-west-2" : { },
@ -320,11 +328,14 @@
"ap-northeast-1" : { },
"ap-southeast-1" : { },
"ap-southeast-2" : { },
"ca-central-1" : { },
"eu-central-1" : { },
"eu-west-1" : { },
"eu-west-2" : { },
"sa-east-1" : { },
"us-east-1" : { },
"us-east-2" : { },
"us-west-1" : { },
"us-west-2" : { }
}
},
@ -1604,6 +1615,16 @@
"cn-north-1" : { }
}
},
"ecr" : {
"endpoints" : {
"cn-north-1" : { }
}
},
"ecs" : {
"endpoints" : {
"cn-north-1" : { }
}
},
"elasticache" : {
"endpoints" : {
"cn-north-1" : { }
@ -1709,6 +1730,11 @@
"cn-north-1" : { }
}
},
"ssm" : {
"endpoints" : {
"cn-north-1" : { }
}
},
"storagegateway" : {
"endpoints" : {
"cn-north-1" : { }
@ -1929,6 +1955,11 @@
}
}
},
"ssm" : {
"endpoints" : {
"us-gov-west-1" : { }
}
},
"streams.dynamodb" : {
"defaults" : {
"credentialScope" : {

View file

@ -139,7 +139,13 @@
{"shape":"ResourceNotFoundException"},
{"shape":"InvalidArgumentException"},
{"shape":"ProvisionedThroughputExceededException"},
{"shape":"ExpiredIteratorException"}
{"shape":"ExpiredIteratorException"},
{"shape":"KMSDisabledException"},
{"shape":"KMSInvalidStateException"},
{"shape":"KMSAccessDeniedException"},
{"shape":"KMSNotFoundException"},
{"shape":"KMSOptInRequired"},
{"shape":"KMSThrottlingException"}
],
"documentation":"<p>Gets data records from an Amazon Kinesis stream's shard.</p> <p>Specify a shard iterator using the <code>ShardIterator</code> parameter. The shard iterator specifies the position in the shard from which you want to start reading data records sequentially. If there are no records available in the portion of the shard that the iterator points to, <a>GetRecords</a> returns an empty list. Note that it might take multiple calls to get to a portion of the shard that contains records.</p> <p>You can scale by provisioning multiple shards per stream while considering service limits (for more information, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html\">Streams Limits</a> in the <i>Amazon Kinesis Streams Developer Guide</i>). Your application should have one thread per shard, each reading continuously from its stream. To read from a stream continually, call <a>GetRecords</a> in a loop. Use <a>GetShardIterator</a> to get the shard iterator to specify in the first <a>GetRecords</a> call. <a>GetRecords</a> returns a new shard iterator in <code>NextShardIterator</code>. Specify the shard iterator returned in <code>NextShardIterator</code> in subsequent calls to <a>GetRecords</a>. Note that if the shard has been closed, the shard iterator can't return more data and <a>GetRecords</a> returns <code>null</code> in <code>NextShardIterator</code>. You can terminate the loop when the shard is closed, or when the shard iterator reaches the record with the sequence number or other attribute that marks it as the last record to process.</p> <p>Each data record can be up to 1 MB in size, and each shard can read up to 2 MB per second. You can ensure that your calls don't exceed the maximum supported size or throughput by using the <code>Limit</code> parameter to specify the maximum number of records that <a>GetRecords</a> can return. Consider your average record size when determining this limit.</p> <p>The size of the data returned by <a>GetRecords</a> varies depending on the utilization of the shard. The maximum size of data that <a>GetRecords</a> can return is 10 MB. If a call returns this amount of data, subsequent calls made within the next 5 seconds throw <code>ProvisionedThroughputExceededException</code>. If there is insufficient provisioned throughput on the shard, subsequent calls made within the next 1 second throw <code>ProvisionedThroughputExceededException</code>. Note that <a>GetRecords</a> won't return any data when it throws an exception. For this reason, we recommend that you wait one second between calls to <a>GetRecords</a>; however, it's possible that the application will get exceptions for longer than 1 second.</p> <p>To detect whether the application is falling behind in processing, you can use the <code>MillisBehindLatest</code> response attribute. You can also monitor the stream using CloudWatch metrics and other mechanisms (see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/monitoring.html\">Monitoring</a> in the <i>Amazon Kinesis Streams Developer Guide</i>).</p> <p>Each Amazon Kinesis record includes a value, <code>ApproximateArrivalTimestamp</code>, that is set when a stream successfully receives and stores a record. This is commonly referred to as a server-side timestamp, whereas a client-side timestamp is set when a data producer creates or sends the record to a stream (a data producer is any data source putting data records into a stream, for example with <a>PutRecords</a>). The timestamp has millisecond precision. There are no guarantees about the timestamp accuracy, or that the timestamp is always increasing. For example, records in a shard or across a stream might have timestamps that are out of order.</p>"
},
@ -226,9 +232,15 @@
"errors":[
{"shape":"ResourceNotFoundException"},
{"shape":"InvalidArgumentException"},
{"shape":"ProvisionedThroughputExceededException"}
{"shape":"ProvisionedThroughputExceededException"},
{"shape":"KMSDisabledException"},
{"shape":"KMSInvalidStateException"},
{"shape":"KMSAccessDeniedException"},
{"shape":"KMSNotFoundException"},
{"shape":"KMSOptInRequired"},
{"shape":"KMSThrottlingException"}
],
"documentation":"<p>Writes a single data record into an Amazon Kinesis stream. Call <code>PutRecord</code> to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.</p> <p>You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself.</p> <p>The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.</p> <p>The partition key is used by Amazon Kinesis to distribute data across shards. Amazon Kinesis segregates the data records that belong to a stream into multiple shards, using the partition key associated with each data record to determine which shard a given data record belongs to.</p> <p>Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the <code>ExplicitHashKey</code> parameter. For more information, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream\">Adding Data to a Stream</a> in the <i>Amazon Kinesis Streams Developer Guide</i>.</p> <p> <code>PutRecord</code> returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record.</p> <p>Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the <code>SequenceNumberForOrdering</code> parameter. For more information, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream\">Adding Data to a Stream</a> in the <i>Amazon Kinesis Streams Developer Guide</i>.</p> <p>If a <code>PutRecord</code> request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, <code>PutRecord</code> throws <code>ProvisionedThroughputExceededException</code>. </p> <p>Data records are accessible for only 24 hours from the time that they are added to a stream.</p>"
"documentation":"<p>Writes a single data record into an Amazon Kinesis stream. Call <code>PutRecord</code> to send data into the stream for real-time ingestion and subsequent processing, one record at a time. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.</p> <p>You must specify the name of the stream that captures, stores, and transports the data; a partition key; and the data blob itself.</p> <p>The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.</p> <p>The partition key is used by Amazon Kinesis to distribute data across shards. Amazon Kinesis segregates the data records that belong to a stream into multiple shards, using the partition key associated with each data record to determine which shard a given data record belongs to.</p> <p>Partition keys are Unicode strings, with a maximum length limit of 256 characters for each key. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards using the hash key ranges of the shards. You can override hashing the partition key to determine the shard by explicitly specifying a hash value using the <code>ExplicitHashKey</code> parameter. For more information, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream\">Adding Data to a Stream</a> in the <i>Amazon Kinesis Streams Developer Guide</i>.</p> <p> <code>PutRecord</code> returns the shard ID of where the data record was placed and the sequence number that was assigned to the data record.</p> <p>Sequence numbers increase over time and are specific to a shard within a stream, not across all shards within a stream. To guarantee strictly increasing ordering, write serially to a shard and use the <code>SequenceNumberForOrdering</code> parameter. For more information, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream\">Adding Data to a Stream</a> in the <i>Amazon Kinesis Streams Developer Guide</i>.</p> <p>If a <code>PutRecord</code> request cannot be processed because of insufficient provisioned throughput on the shard involved in the request, <code>PutRecord</code> throws <code>ProvisionedThroughputExceededException</code>. </p> <p>By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use <a>IncreaseStreamRetentionPeriod</a> or <a>DecreaseStreamRetentionPeriod</a> to modify this retention period.</p>"
},
"PutRecords":{
"name":"PutRecords",
@ -241,9 +253,15 @@
"errors":[
{"shape":"ResourceNotFoundException"},
{"shape":"InvalidArgumentException"},
{"shape":"ProvisionedThroughputExceededException"}
{"shape":"ProvisionedThroughputExceededException"},
{"shape":"KMSDisabledException"},
{"shape":"KMSInvalidStateException"},
{"shape":"KMSAccessDeniedException"},
{"shape":"KMSNotFoundException"},
{"shape":"KMSOptInRequired"},
{"shape":"KMSThrottlingException"}
],
"documentation":"<p>Writes multiple data records into an Amazon Kinesis stream in a single call (also referred to as a <code>PutRecords</code> request). Use this operation to send data into the stream for data ingestion and processing. </p> <p>Each <code>PutRecords</code> request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.</p> <p>You must specify the name of the stream that captures, stores, and transports the data; and an array of request <code>Records</code>, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob.</p> <p>The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.</p> <p>The partition key is used by Amazon Kinesis as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream\">Adding Data to a Stream</a> in the <i>Amazon Kinesis Streams Developer Guide</i>.</p> <p>Each record in the <code>Records</code> array may include an optional parameter, <code>ExplicitHashKey</code>, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords\">Adding Multiple Records with PutRecords</a> in the <i>Amazon Kinesis Streams Developer Guide</i>.</p> <p>The <code>PutRecords</code> response includes an array of response <code>Records</code>. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response <code>Records</code> array always includes the same number of records as the request array.</p> <p>The response <code>Records</code> array includes both successfully and unsuccessfully processed records. Amazon Kinesis attempts to process all records in each <code>PutRecords</code> request. A single record failure does not stop the processing of subsequent records.</p> <p>A successfully-processed record includes <code>ShardId</code> and <code>SequenceNumber</code> values. The <code>ShardId</code> parameter identifies the shard in the stream where the record is stored. The <code>SequenceNumber</code> parameter is an identifier assigned to the put record, unique to all records in the stream.</p> <p>An unsuccessfully-processed record includes <code>ErrorCode</code> and <code>ErrorMessage</code> values. <code>ErrorCode</code> reflects the type of error and can be one of the following values: <code>ProvisionedThroughputExceededException</code> or <code>InternalFailure</code>. <code>ErrorMessage</code> provides more detailed information about the <code>ProvisionedThroughputExceededException</code> exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords\">Adding Multiple Records with PutRecords</a> in the <i>Amazon Kinesis Streams Developer Guide</i>.</p> <p>By default, data records are accessible for only 24 hours from the time that they are added to an Amazon Kinesis stream. This retention period can be modified using the <a>DecreaseStreamRetentionPeriod</a> and <a>IncreaseStreamRetentionPeriod</a> operations.</p>"
"documentation":"<p>Writes multiple data records into an Amazon Kinesis stream in a single call (also referred to as a <code>PutRecords</code> request). Use this operation to send data into the stream for data ingestion and processing. </p> <p>Each <code>PutRecords</code> request can support up to 500 records. Each record in the request can be as large as 1 MB, up to a limit of 5 MB for the entire request, including partition keys. Each shard can support writes up to 1,000 records per second, up to a maximum data write total of 1 MB per second.</p> <p>You must specify the name of the stream that captures, stores, and transports the data; and an array of request <code>Records</code>, with each record in the array requiring a partition key and data blob. The record size limit applies to the total size of the partition key and data blob.</p> <p>The data blob can be any type of data; for example, a segment from a log file, geographic/location data, website clickstream data, and so on.</p> <p>The partition key is used by Amazon Kinesis as input to a hash function that maps the partition key and associated data to a specific shard. An MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this hashing mechanism, all data records with the same partition key map to the same shard within the stream. For more information, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-add-data-to-stream\">Adding Data to a Stream</a> in the <i>Amazon Kinesis Streams Developer Guide</i>.</p> <p>Each record in the <code>Records</code> array may include an optional parameter, <code>ExplicitHashKey</code>, which overrides the partition key to shard mapping. This parameter allows a data producer to determine explicitly the shard where the record is stored. For more information, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/developing-producers-with-sdk.html#kinesis-using-sdk-java-putrecords\">Adding Multiple Records with PutRecords</a> in the <i>Amazon Kinesis Streams Developer Guide</i>.</p> <p>The <code>PutRecords</code> response includes an array of response <code>Records</code>. Each record in the response array directly correlates with a record in the request array using natural ordering, from the top to the bottom of the request and response. The response <code>Records</code> array always includes the same number of records as the request array.</p> <p>The response <code>Records</code> array includes both successfully and unsuccessfully processed records. Amazon Kinesis attempts to process all records in each <code>PutRecords</code> request. A single record failure does not stop the processing of subsequent records.</p> <p>A successfully-processed record includes <code>ShardId</code> and <code>SequenceNumber</code> values. The <code>ShardId</code> parameter identifies the shard in the stream where the record is stored. The <code>SequenceNumber</code> parameter is an identifier assigned to the put record, unique to all records in the stream.</p> <p>An unsuccessfully-processed record includes <code>ErrorCode</code> and <code>ErrorMessage</code> values. <code>ErrorCode</code> reflects the type of error and can be one of the following values: <code>ProvisionedThroughputExceededException</code> or <code>InternalFailure</code>. <code>ErrorMessage</code> provides more detailed information about the <code>ProvisionedThroughputExceededException</code> exception including the account ID, stream name, and shard ID of the record that was throttled. For more information about partially successful responses, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-add-data-to-stream.html#kinesis-using-sdk-java-putrecords\">Adding Multiple Records with PutRecords</a> in the <i>Amazon Kinesis Streams Developer Guide</i>.</p> <p>By default, data records are accessible for 24 hours from the time that they are added to a stream. You can use <a>IncreaseStreamRetentionPeriod</a> or <a>DecreaseStreamRetentionPeriod</a> to modify this retention period.</p>"
},
"RemoveTagsFromStream":{
"name":"RemoveTagsFromStream",
@ -275,6 +293,42 @@
],
"documentation":"<p>Splits a shard into two new shards in the Amazon Kinesis stream to increase the stream's capacity to ingest and transport data. <code>SplitShard</code> is called when there is a need to increase the overall capacity of a stream because of an expected increase in the volume of data records being ingested. </p> <p>You can also use <code>SplitShard</code> when a shard appears to be approaching its maximum utilization; for example, the producers sending data into the specific shard are suddenly sending more than previously anticipated. You can also call <code>SplitShard</code> to increase stream capacity, so that more Amazon Kinesis applications can simultaneously read data from the stream for real-time processing. </p> <p>You must specify the shard to be split and the new hash key, which is the position in the shard where the shard gets split in two. In many cases, the new hash key might simply be the average of the beginning and ending hash key, but it can be any hash key value in the range being mapped into the shard. For more information about splitting shards, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/kinesis-using-sdk-java-resharding-split.html\">Split a Shard</a> in the <i>Amazon Kinesis Streams Developer Guide</i>.</p> <p>You can use <a>DescribeStream</a> to determine the shard ID and hash key values for the <code>ShardToSplit</code> and <code>NewStartingHashKey</code> parameters that are specified in the <code>SplitShard</code> request.</p> <p> <code>SplitShard</code> is an asynchronous operation. Upon receiving a <code>SplitShard</code> request, Amazon Kinesis immediately returns a response and sets the stream status to <code>UPDATING</code>. After the operation is completed, Amazon Kinesis sets the stream status to <code>ACTIVE</code>. Read and write operations continue to work while the stream is in the <code>UPDATING</code> state. </p> <p>You can use <code>DescribeStream</code> to check the status of the stream, which is returned in <code>StreamStatus</code>. If the stream is in the <code>ACTIVE</code> state, you can call <code>SplitShard</code>. If a stream is in <code>CREATING</code> or <code>UPDATING</code> or <code>DELETING</code> states, <code>DescribeStream</code> returns a <code>ResourceInUseException</code>.</p> <p>If the specified stream does not exist, <code>DescribeStream</code> returns a <code>ResourceNotFoundException</code>. If you try to create more shards than are authorized for your account, you receive a <code>LimitExceededException</code>. </p> <p>For the default shard limit for an AWS account, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html\">Streams Limits</a> in the <i>Amazon Kinesis Streams Developer Guide</i>. If you need to increase this limit, <a href=\"http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html\">contact AWS Support</a>.</p> <p>If you try to operate on too many streams simultaneously using <a>CreateStream</a>, <a>DeleteStream</a>, <a>MergeShards</a>, and/or <a>SplitShard</a>, you receive a <code>LimitExceededException</code>. </p> <p> <code>SplitShard</code> has limit of 5 transactions per second per account.</p>"
},
"StartStreamEncryption":{
"name":"StartStreamEncryption",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"StartStreamEncryptionInput"},
"errors":[
{"shape":"InvalidArgumentException"},
{"shape":"LimitExceededException"},
{"shape":"ResourceInUseException"},
{"shape":"ResourceNotFoundException"},
{"shape":"KMSDisabledException"},
{"shape":"KMSInvalidStateException"},
{"shape":"KMSAccessDeniedException"},
{"shape":"KMSNotFoundException"},
{"shape":"KMSOptInRequired"},
{"shape":"KMSThrottlingException"}
],
"documentation":"<p>Enables or updates server-side encryption using an AWS KMS key for a specified stream. </p> <p>Starting encryption is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to <code>UPDATING</code>. After the update is complete, Amazon Kinesis sets the status of the stream back to <code>ACTIVE</code>. Updating or applying encryption normally takes a few seconds to complete but it can take minutes. You can continue to read and write data to your stream while its status is <code>UPDATING</code>. Once the status of the stream is <code>ACTIVE</code>, records written to the stream will begin to be encrypted. </p> <p>API Limits: You can successfully apply a new AWS KMS key for server-side encryption 25 times in a rolling 24 hour period.</p> <p>Note: It can take up to 5 seconds after the stream is in an <code>ACTIVE</code> status before all records written to the stream are encrypted. After youve enabled encryption, you can verify encryption was applied by inspecting the API response from <code>PutRecord</code> or <code>PutRecords</code>.</p>"
},
"StopStreamEncryption":{
"name":"StopStreamEncryption",
"http":{
"method":"POST",
"requestUri":"/"
},
"input":{"shape":"StopStreamEncryptionInput"},
"errors":[
{"shape":"InvalidArgumentException"},
{"shape":"LimitExceededException"},
{"shape":"ResourceInUseException"},
{"shape":"ResourceNotFoundException"}
],
"documentation":"<p>Disables server-side encryption for a specified stream. </p> <p>Stopping encryption is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to <code>UPDATING</code>. After the update is complete, Amazon Kinesis sets the status of the stream back to <code>ACTIVE</code>. Stopping encryption normally takes a few seconds to complete but it can take minutes. You can continue to read and write data to your stream while its status is <code>UPDATING</code>. Once the status of the stream is <code>ACTIVE</code> records written to the stream will no longer be encrypted by the Amazon Kinesis Streams service. </p> <p>API Limits: You can successfully disable server-side encryption 25 times in a rolling 24 hour period. </p> <p>Note: It can take up to 5 seconds after the stream is in an <code>ACTIVE</code> status before all records written to the stream are no longer subject to encryption. After youve disabled encryption, you can verify encryption was not applied by inspecting the API response from <code>PutRecord</code> or <code>PutRecords</code>.</p>"
},
"UpdateShardCount":{
"name":"UpdateShardCount",
"http":{
@ -289,7 +343,7 @@
{"shape":"ResourceInUseException"},
{"shape":"ResourceNotFoundException"}
],
"documentation":"<p>Updates the shard count of the specified stream to the specified number of shards.</p> <p>Updating the shard count is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to <code>UPDATING</code>. After the update is complete, Amazon Kinesis sets the status of the stream back to <code>ACTIVE</code>. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is <code>UPDATING</code>.</p> <p>To update the shard count, Amazon Kinesis performs splits and merges and individual shards. This can cause short-lived shards to be created, in addition to the final shards. We recommend that you double or halve the shard count, as this results in the fewest number of splits or merges.</p> <p>This operation has a rate limit of twice per rolling 24 hour period. You cannot scale above double your current shard count, scale below half your current shard count, or exceed the shard limits for your account.</p> <p>For the default limits for an AWS account, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html\">Streams Limits</a> in the <i>Amazon Kinesis Streams Developer Guide</i>. If you need to increase a limit, <a href=\"http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html\">contact AWS Support</a>.</p>"
"documentation":"<p>Updates the shard count of the specified stream to the specified number of shards.</p> <p>Updating the shard count is an asynchronous operation. Upon receiving the request, Amazon Kinesis returns immediately and sets the status of the stream to <code>UPDATING</code>. After the update is complete, Amazon Kinesis sets the status of the stream back to <code>ACTIVE</code>. Depending on the size of the stream, the scaling action could take a few minutes to complete. You can continue to read and write data to your stream while its status is <code>UPDATING</code>.</p> <p>To update the shard count, Amazon Kinesis performs splits or merges on individual shards. This can cause short-lived shards to be created, in addition to the final shards. We recommend that you double or halve the shard count, as this results in the fewest number of splits or merges.</p> <p>This operation has the following limits, which are per region per account unless otherwise noted:</p> <ul> <li> <p>scale more than twice per rolling 24 hour period</p> </li> <li> <p>scale up above double your current shard count</p> </li> <li> <p>scale down below half your current shard count</p> </li> <li> <p>scale up above 200 shards in a stream</p> </li> <li> <p>scale a stream with more than 200 shards down unless the result is less than 200 shards</p> </li> <li> <p>scale up above the shard limits for your account</p> </li> <li> <p/> </li> </ul> <p>For the default limits for an AWS account, see <a href=\"http://docs.aws.amazon.com/kinesis/latest/dev/service-sizes-and-limits.html\">Streams Limits</a> in the <i>Amazon Kinesis Streams Developer Guide</i>. If you need to increase a limit, <a href=\"http://docs.aws.amazon.com/general/latest/gr/aws_service_limits.html\">contact AWS Support</a>.</p>"
}
},
"shapes":{
@ -457,6 +511,13 @@
},
"documentation":"<p>Represents the input for <a>EnableEnhancedMonitoring</a>.</p>"
},
"EncryptionType":{
"type":"string",
"enum":[
"NONE",
"KMS"
]
},
"EnhancedMetrics":{
"type":"structure",
"members":{
@ -633,6 +694,77 @@
"documentation":"<p>A specified parameter exceeds its restrictions, is not supported, or can't be used. For more information, see the returned message.</p>",
"exception":true
},
"KMSAccessDeniedException":{
"type":"structure",
"members":{
"message":{
"shape":"ErrorMessage",
"documentation":"<p>A message that provides information about the error.</p>"
}
},
"documentation":"<p>The ciphertext references a key that doesn't exist or that you don't have access to.</p>",
"exception":true
},
"KMSDisabledException":{
"type":"structure",
"members":{
"message":{
"shape":"ErrorMessage",
"documentation":"<p>A message that provides information about the error.</p>"
}
},
"documentation":"<p>The request was rejected because the specified CMK isn't enabled.</p>",
"exception":true
},
"KMSInvalidStateException":{
"type":"structure",
"members":{
"message":{
"shape":"ErrorMessage",
"documentation":"<p>A message that provides information about the error.</p>"
}
},
"documentation":"<p>The request was rejected because the state of the specified resource isn't valid for this request. For more information, see <a href=\"http://docs.aws.amazon.com/kms/latest/developerguide/key-state.html\">How Key State Affects Use of a Customer Master Key</a> in the <i>AWS Key Management Service Developer Guide</i>.</p>",
"exception":true
},
"KMSNotFoundException":{
"type":"structure",
"members":{
"message":{
"shape":"ErrorMessage",
"documentation":"<p>A message that provides information about the error.</p>"
}
},
"documentation":"<p>The request was rejected because the specified entity or resource couldn't be found.</p>",
"exception":true
},
"KMSOptInRequired":{
"type":"structure",
"members":{
"message":{
"shape":"ErrorMessage",
"documentation":"<p>A message that provides information about the error.</p>"
}
},
"documentation":"<p>The AWS access key ID needs a subscription for the service.</p>",
"exception":true
},
"KMSThrottlingException":{
"type":"structure",
"members":{
"message":{
"shape":"ErrorMessage",
"documentation":"<p>A message that provides information about the error.</p>"
}
},
"documentation":"<p>The request was denied due to request throttling. For more information about throttling, see <a href=\"http://docs.aws.amazon.com/kms/latest/developerguide/limits.html#requests-per-second\">Limits</a> in the <i>AWS Key Management Service Developer Guide</i>.</p>",
"exception":true
},
"KeyId":{
"type":"string",
"max":2048,
"min":1
},
"LimitExceededException":{
"type":"structure",
"members":{
@ -835,6 +967,10 @@
"SequenceNumber":{
"shape":"SequenceNumber",
"documentation":"<p>The sequence number identifier that was assigned to the put data record. The sequence number for the record is unique across all records in the stream. A sequence number is the identifier associated with every record put into the stream.</p>"
},
"EncryptionType":{
"shape":"EncryptionType",
"documentation":"<p>The encryption type to use on the record. This parameter can be one of the following values:</p> <ul> <li> <p> <code>NONE</code>: Do not encrypt the records in the stream.</p> </li> <li> <p> <code>KMS</code>: Use server-side encryption on the records in the stream using a customer-managed KMS key.</p> </li> </ul>"
}
},
"documentation":"<p>Represents the output for <code>PutRecord</code>.</p>"
@ -868,6 +1004,10 @@
"Records":{
"shape":"PutRecordsResultEntryList",
"documentation":"<p>An array of successfully and unsuccessfully processed record results, correlated with the request by natural ordering. A record that is successfully added to a stream includes <code>SequenceNumber</code> and <code>ShardId</code> in the result. A record that fails to be added to a stream includes <code>ErrorCode</code> and <code>ErrorMessage</code> in the result.</p>"
},
"EncryptionType":{
"shape":"EncryptionType",
"documentation":"<p>The encryption type used on the records. This parameter can be one of the following values:</p> <ul> <li> <p> <code>NONE</code>: Do not encrypt the records.</p> </li> <li> <p> <code>KMS</code>: Use server-side encryption on the records using a customer-managed KMS key.</p> </li> </ul>"
}
},
"documentation":"<p> <code>PutRecords</code> results.</p>"
@ -938,7 +1078,7 @@
"members":{
"SequenceNumber":{
"shape":"SequenceNumber",
"documentation":"<p>The unique identifier of the record in the stream.</p>"
"documentation":"<p>The unique identifier of the record within its shard.</p>"
},
"ApproximateArrivalTimestamp":{
"shape":"Timestamp",
@ -951,6 +1091,10 @@
"PartitionKey":{
"shape":"PartitionKey",
"documentation":"<p>Identifies which shard in the stream the data record is assigned to.</p>"
},
"EncryptionType":{
"shape":"EncryptionType",
"documentation":"<p>The encryption type used on the record. This parameter can be one of the following values:</p> <ul> <li> <p> <code>NONE</code>: Do not encrypt the records in the stream.</p> </li> <li> <p> <code>KMS</code>: Use server-side encryption on the records in the stream using a customer-managed KMS key.</p> </li> </ul>"
}
},
"documentation":"<p>The unit of data of the Amazon Kinesis stream, which is composed of a sequence number, a partition key, and a data blob.</p>"
@ -1106,6 +1250,50 @@
},
"documentation":"<p>Represents the input for <code>SplitShard</code>.</p>"
},
"StartStreamEncryptionInput":{
"type":"structure",
"required":[
"StreamName",
"EncryptionType",
"KeyId"
],
"members":{
"StreamName":{
"shape":"StreamName",
"documentation":"<p>The name of the stream for which to start encrypting records.</p>"
},
"EncryptionType":{
"shape":"EncryptionType",
"documentation":"<p>The encryption type to use. This parameter can be one of the following values:</p> <ul> <li> <p> <code>NONE</code>: Not valid for this operation. An <code>InvalidOperationException</code> will be thrown.</p> </li> <li> <p> <code>KMS</code>: Use server-side encryption on the records in the stream using a customer-managed KMS key.</p> </li> </ul>"
},
"KeyId":{
"shape":"KeyId",
"documentation":"<p>The GUID for the customer-managed KMS key to use for encryption. You can also use a Kinesis-owned master key by specifying the alias <code>aws/kinesis</code>.</p>"
}
}
},
"StopStreamEncryptionInput":{
"type":"structure",
"required":[
"StreamName",
"EncryptionType",
"KeyId"
],
"members":{
"StreamName":{
"shape":"StreamName",
"documentation":"<p>The name of the stream on which to stop encrypting records.</p>"
},
"EncryptionType":{
"shape":"EncryptionType",
"documentation":"<p>The encryption type. This parameter can be one of the following values:</p> <ul> <li> <p> <code>NONE</code>: Not valid for this operation. An <code>InvalidOperationException</code> will be thrown.</p> </li> <li> <p> <code>KMS</code>: Use server-side encryption on the records in the stream using a customer-managed KMS key.</p> </li> </ul>"
},
"KeyId":{
"shape":"KeyId",
"documentation":"<p>The GUID for the customer-managed key that was used for encryption.</p>"
}
}
},
"StreamARN":{"type":"string"},
"StreamDescription":{
"type":"structure",
@ -1151,6 +1339,14 @@
"EnhancedMonitoring":{
"shape":"EnhancedMonitoringList",
"documentation":"<p>Represents the current enhanced monitoring settings of the stream.</p>"
},
"EncryptionType":{
"shape":"EncryptionType",
"documentation":"<p>The server-side encryption type used on the stream. This parameter can be one of the following values:</p> <ul> <li> <p> <code>NONE</code>: Do not encrypt the records in the stream.</p> </li> <li> <p> <code>KMS</code>: Use server-side encryption on the records in the stream using a customer-managed KMS key.</p> </li> </ul>"
},
"KeyId":{
"shape":"KeyId",
"documentation":"<p>The GUID for the customer-managed KMS key used for encryption on the stream.</p>"
}
},
"documentation":"<p>Represents the output for <a>DescribeStream</a>.</p>"

View file

@ -1446,6 +1446,13 @@
},
"documentation":"<p>Contains information about each entry in the key list.</p>"
},
"KeyManagerType":{
"type":"string",
"enum":[
"AWS",
"CUSTOMER"
]
},
"KeyMetadata":{
"type":"structure",
"required":["KeyId"],
@ -1497,6 +1504,10 @@
"ExpirationModel":{
"shape":"ExpirationModelType",
"documentation":"<p>Specifies whether the CMK's key material expires. This value is present only when <code>Origin</code> is <code>EXTERNAL</code>, otherwise this value is omitted.</p>"
},
"KeyManager":{
"shape":"KeyManagerType",
"documentation":"<p>The CMK's manager. CMKs are either customer-managed or AWS-managed. For more information about the difference, see <a href=\"http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys\">Customer Master Keys</a> in the <i>AWS Key Management Service Developer Guide</i>.</p>"
}
},
"documentation":"<p>Contains metadata about a customer master key (CMK).</p> <p>This data type is used as a response element for the <a>CreateKey</a> and <a>DescribeKey</a> operations.</p>"

View file

@ -2551,7 +2551,8 @@
"messages":{
"shape":"ErrorMessages",
"documentation":"<p>Descriptive message for the error response.</p>"
}
},
"message":{"shape":"ErrorMessage"}
},
"documentation":"<p>This exception contains a list of messages that might contain one or more error messages. Each error message indicates one error in the change batch.</p>",
"exception":true

View file

@ -165,7 +165,7 @@
{"shape":"ResourceDataSyncAlreadyExistsException"},
{"shape":"ResourceDataSyncInvalidConfigurationException"}
],
"documentation":"<p>Creates a resource data sync configuration to a single bucket in Amazon S3. This is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data to the Amazon S3 bucket. To check the status of the sync, use the <a href=\"API_ListResourceDataSync.html\">ListResourceDataSync</a> operation.</p> <p>By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. To view an example of a restrictive Amazon S3 bucket policy for Resource Data Sync, see <a href=\"http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-datasync-create.html\">Creating a Resource Data Sync</a>.</p>"
"documentation":"<p>Creates a resource data sync configuration to a single bucket in Amazon S3. This is an asynchronous operation that returns immediately. After a successful initial sync is completed, the system continuously syncs data to the Amazon S3 bucket. To check the status of the sync, use the <a href=\"API_ListResourceDataSync.html\">ListResourceDataSync</a> operation.</p> <p>By default, data is not encrypted in Amazon S3. We strongly recommend that you enable encryption in Amazon S3 to ensure secure data storage. We also recommend that you secure access to the Amazon S3 bucket by creating a restrictive bucket policy. To view an example of a restrictive Amazon S3 bucket policy for Resource Data Sync, see <a href=\"http://docs.aws.amazon.com/systems-manager/latest/userguide/sysman-inventory-configuring.html#sysman-inventory-datasync\">Configuring Resource Data Sync for Inventory</a>.</p>"
},
"DeleteActivation":{
"name":"DeleteActivation",
@ -453,9 +453,10 @@
"errors":[
{"shape":"InvalidResourceId"},
{"shape":"DoesNotExistException"},
{"shape":"UnsupportedOperatingSystem"},
{"shape":"InternalServerError"}
],
"documentation":"<p>Retrieves the current effective patches (the patch and the approval state) for the specified patch baseline.</p>"
"documentation":"<p>Retrieves the current effective patches (the patch and the approval state) for the specified patch baseline. Note that this API applies only to Windows patch baselines.</p>"
},
"DescribeInstanceAssociationsStatus":{
"name":"DescribeInstanceAssociationsStatus",
@ -715,7 +716,7 @@
"errors":[
{"shape":"InternalServerError"}
],
"documentation":"<p>Retrieves the default patch baseline.</p>"
"documentation":"<p>Retrieves the default patch baseline. Note that Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.</p>"
},
"GetDeployablePatchSnapshotForInstance":{
"name":"GetDeployablePatchSnapshotForInstance",
@ -726,9 +727,10 @@
"input":{"shape":"GetDeployablePatchSnapshotForInstanceRequest"},
"output":{"shape":"GetDeployablePatchSnapshotForInstanceResult"},
"errors":[
{"shape":"InternalServerError"}
{"shape":"InternalServerError"},
{"shape":"UnsupportedOperatingSystem"}
],
"documentation":"<p>Retrieves the current snapshot for the patch baseline the instance uses. This API is primarily used by the AWS-ApplyPatchBaseline Systems Manager document. </p>"
"documentation":"<p>Retrieves the current snapshot for the patch baseline the instance uses. This API is primarily used by the AWS-RunPatchBaseline Systems Manager document. </p>"
},
"GetDocument":{
"name":"GetDocument",
@ -2519,6 +2521,10 @@
"type":"structure",
"required":["Name"],
"members":{
"OperatingSystem":{
"shape":"OperatingSystem",
"documentation":"<p>Defines the operating system the patch baseline applies to. Supported operating systems include WINDOWS, AMAZON_LINUX, UBUNTU and REDHAT_ENTERPRISE_LINUX. The Default value is WINDOWS.</p>"
},
"Name":{
"shape":"BaselineName",
"documentation":"<p>The name of the patch baseline.</p>"
@ -2535,6 +2541,10 @@
"shape":"PatchIdList",
"documentation":"<p>A list of explicitly approved patches for the baseline.</p>"
},
"ApprovedPatchesComplianceLevel":{
"shape":"PatchComplianceLevel",
"documentation":"<p>Defines the compliance level for approved patches. This means that if an approved patch is reported as missing, this is the severity of the compliance violation. Valid compliance severity levels include the following: CRITICAL, HIGH, MEDIUM, LOW, INFORMATIONAL, UNSPECIFIED. The default value is UNSPECIFIED.</p>"
},
"RejectedPatches":{
"shape":"PatchIdList",
"documentation":"<p>A list of explicitly rejected patches for the baseline.</p>"
@ -3606,6 +3616,10 @@
"documentation":"<p>The maximum number of patch groups to return (per page).</p>",
"box":true
},
"Filters":{
"shape":"PatchOrchestratorFilterList",
"documentation":"<p>One or more filters. Use a filter to return a more specific list of results.</p>"
},
"NextToken":{
"shape":"NextToken",
"documentation":"<p>The token for the next set of items to return. (You received this token from a previous call.)</p>"
@ -4145,6 +4159,10 @@
"GetDefaultPatchBaselineRequest":{
"type":"structure",
"members":{
"OperatingSystem":{
"shape":"OperatingSystem",
"documentation":"<p>Returns the default patch baseline for the specified operating system.</p>"
}
}
},
"GetDefaultPatchBaselineResult":{
@ -4153,6 +4171,10 @@
"BaselineId":{
"shape":"BaselineId",
"documentation":"<p>The ID of the default patch baseline.</p>"
},
"OperatingSystem":{
"shape":"OperatingSystem",
"documentation":"<p>The operating system for the returned patch baseline. </p>"
}
}
},
@ -4187,6 +4209,10 @@
"SnapshotDownloadUrl":{
"shape":"SnapshotDownloadUrl",
"documentation":"<p>A pre-signed Amazon S3 URL that can be used to download the patch snapshot.</p>"
},
"Product":{
"shape":"Product",
"documentation":"<p>Returns the specific operating system (for example Windows Server 2012 or Amazon Linux 2015.09) on the instance for the specified patch snapshot.</p>"
}
}
},
@ -4607,6 +4633,10 @@
"PatchGroup":{
"shape":"PatchGroup",
"documentation":"<p>The name of the patch group whose patch baseline should be retrieved.</p>"
},
"OperatingSystem":{
"shape":"OperatingSystem",
"documentation":"<p>Returns he operating system rule specified for patch groups using the patch baseline.</p>"
}
}
},
@ -4620,6 +4650,10 @@
"PatchGroup":{
"shape":"PatchGroup",
"documentation":"<p>The name of the patch group.</p>"
},
"OperatingSystem":{
"shape":"OperatingSystem",
"documentation":"<p>The operating system rule specified for patch groups using the patch baseline.</p>"
}
}
},
@ -4644,6 +4678,10 @@
"shape":"BaselineName",
"documentation":"<p>The name of the patch baseline.</p>"
},
"OperatingSystem":{
"shape":"OperatingSystem",
"documentation":"<p>Returns the operating system specified for the patch baseline.</p>"
},
"GlobalFilters":{
"shape":"PatchFilterGroup",
"documentation":"<p>A set of global filters used to exclude patches from the baseline.</p>"
@ -4656,6 +4694,10 @@
"shape":"PatchIdList",
"documentation":"<p>A list of explicitly approved patches for the baseline.</p>"
},
"ApprovedPatchesComplianceLevel":{
"shape":"PatchComplianceLevel",
"documentation":"<p>Returns the specified compliance severity level for approved patches in the patch baseline.</p>"
},
"RejectedPatches":{
"shape":"PatchIdList",
"documentation":"<p>A list of explicitly rejected patches for the baseline.</p>"
@ -5070,11 +5112,11 @@
"documentation":"<p>The number of patches from the patch baseline that aren't applicable for the instance and hence aren't installed on the instance.</p>"
},
"OperationStartTime":{
"shape":"PatchOperationStartTime",
"shape":"DateTime",
"documentation":"<p>The time the most recent patching operation was started on the instance.</p>"
},
"OperationEndTime":{
"shape":"PatchOperationEndTime",
"shape":"DateTime",
"documentation":"<p>The time the most recent patching operation completed on the instance.</p>"
},
"Operation":{
@ -6570,6 +6612,15 @@
"Invocation"
]
},
"OperatingSystem":{
"type":"string",
"enum":[
"WINDOWS",
"AMAZON_LINUX",
"UBUNTU",
"REDHAT_ENTERPRISE_LINUX"
]
},
"OwnerInformation":{
"type":"string",
"max":128,
@ -6914,13 +6965,17 @@
"shape":"BaselineName",
"documentation":"<p>The name of the patch baseline.</p>"
},
"OperatingSystem":{
"shape":"OperatingSystem",
"documentation":"<p>Defines the operating system the patch baseline applies to. Supported operating systems include WINDOWS, AMAZON_LINUX, UBUNTU and REDHAT_ENTERPRISE_LINUX. The Default value is WINDOWS. </p>"
},
"BaselineDescription":{
"shape":"BaselineDescription",
"documentation":"<p>The description of the patch baseline.</p>"
},
"DefaultBaseline":{
"shape":"DefaultBaseline",
"documentation":"<p>Whether this is the default baseline.</p>"
"documentation":"<p>Whether this is the default baseline. Note that Systems Manager supports creating multiple default patch baselines. For example, you can create a default patch baseline for each operating system.</p>"
}
},
"documentation":"<p>Defines the basic information about a patch baseline.</p>"
@ -6952,7 +7007,7 @@
},
"KBId":{
"shape":"PatchKbNumber",
"documentation":"<p>The Microsoft Knowledge Base ID of the patch.</p>"
"documentation":"<p>The operating system-specific ID of the patch.</p>"
},
"Classification":{
"shape":"PatchClassification",
@ -6967,8 +7022,8 @@
"documentation":"<p>The state of the patch on the instance (INSTALLED, INSTALLED_OTHER, MISSING, NOT_APPLICABLE or FAILED).</p>"
},
"InstalledTime":{
"shape":"PatchInstalledTime",
"documentation":"<p>The date/time the patch was installed on the instance.</p>"
"shape":"DateTime",
"documentation":"<p>The date/time the patch was installed on the instance. Note that not all operating systems provide this level of information.</p>"
}
},
"documentation":"<p>Information about the state of a patch on a particular instance as it relates to the patch baseline used to patch the instance.</p>"
@ -6987,6 +7042,17 @@
"FAILED"
]
},
"PatchComplianceLevel":{
"type":"string",
"enum":[
"CRITICAL",
"HIGH",
"MEDIUM",
"LOW",
"INFORMATIONAL",
"UNSPECIFIED"
]
},
"PatchComplianceMaxResults":{
"type":"integer",
"max":100,
@ -7039,7 +7105,10 @@
"PRODUCT",
"CLASSIFICATION",
"MSRC_SEVERITY",
"PATCH_ID"
"PATCH_ID",
"SECTION",
"PRIORITY",
"SEVERITY"
]
},
"PatchFilterList":{
@ -7089,7 +7158,8 @@
},
"PatchId":{
"type":"string",
"pattern":"(^KB[0-9]{1,7}$)|(^MS[0-9]{2}\\-[0-9]{3}$)"
"max":100,
"min":1
},
"PatchIdList":{
"type":"list",
@ -7099,7 +7169,6 @@
},
"PatchInstalledCount":{"type":"integer"},
"PatchInstalledOtherCount":{"type":"integer"},
"PatchInstalledTime":{"type":"timestamp"},
"PatchKbNumber":{"type":"string"},
"PatchLanguage":{"type":"string"},
"PatchList":{
@ -7110,8 +7179,6 @@
"PatchMsrcNumber":{"type":"string"},
"PatchMsrcSeverity":{"type":"string"},
"PatchNotApplicableCount":{"type":"integer"},
"PatchOperationEndTime":{"type":"timestamp"},
"PatchOperationStartTime":{"type":"timestamp"},
"PatchOperationType":{
"type":"string",
"enum":[
@ -7166,6 +7233,10 @@
"shape":"PatchFilterGroup",
"documentation":"<p>The patch filter group that defines the criteria for the rule.</p>"
},
"ComplianceLevel":{
"shape":"PatchComplianceLevel",
"documentation":"<p>A compliance severity level for all approved patches in a patch baseline. Valid compliance severity levels include the following: Unspecified, Critical, High, Medium, Low, and Informational.</p>"
},
"ApproveAfterDays":{
"shape":"ApproveAfterDays",
"documentation":"<p>The number of days after the release date of each patch matched by the rule the patch is marked as approved in the patch baseline.</p>",
@ -7199,6 +7270,10 @@
"shape":"PatchDeploymentStatus",
"documentation":"<p>The approval status of a patch (APPROVED, PENDING_APPROVAL, EXPLICIT_APPROVED, EXPLICIT_REJECTED).</p>"
},
"ComplianceLevel":{
"shape":"PatchComplianceLevel",
"documentation":"<p>The compliance severity level for a patch.</p>"
},
"ApprovalDate":{
"shape":"DateTime",
"documentation":"<p>The date the patch was approved (or will be approved if the status is PENDING_APPROVAL).</p>"
@ -7230,6 +7305,7 @@
"locationName":"PlatformType"
}
},
"Product":{"type":"string"},
"PutInventoryRequest":{
"type":"structure",
"required":[
@ -8036,6 +8112,14 @@
"documentation":"<p>Inventory item type schema version has to match supported versions in the service. Check output of GetInventorySchema to see the available schema version for each type.</p>",
"exception":true
},
"UnsupportedOperatingSystem":{
"type":"structure",
"members":{
"Message":{"shape":"String"}
},
"documentation":"<p>The operating systems you specified is not supported, or the operation is not supported for the operating system. Valid operating systems include: Windows, AmazonLinux, RedhatEnterpriseLinux, and Ubuntu.</p>",
"exception":true
},
"UnsupportedParameterType":{
"type":"structure",
"members":{
@ -8299,6 +8383,10 @@
"shape":"PatchIdList",
"documentation":"<p>A list of explicitly approved patches for the baseline.</p>"
},
"ApprovedPatchesComplianceLevel":{
"shape":"PatchComplianceLevel",
"documentation":"<p>Assigns a new compliance severity level to an existing patch baseline.</p>"
},
"RejectedPatches":{
"shape":"PatchIdList",
"documentation":"<p>A list of explicitly rejected patches for the baseline.</p>"
@ -8320,6 +8408,10 @@
"shape":"BaselineName",
"documentation":"<p>The name of the patch baseline.</p>"
},
"OperatingSystem":{
"shape":"OperatingSystem",
"documentation":"<p>The operating system rule used by the updated patch baseline.</p>"
},
"GlobalFilters":{
"shape":"PatchFilterGroup",
"documentation":"<p>A set of global filters used to exclude patches from the baseline.</p>"
@ -8332,6 +8424,10 @@
"shape":"PatchIdList",
"documentation":"<p>A list of explicitly approved patches for the baseline.</p>"
},
"ApprovedPatchesComplianceLevel":{
"shape":"PatchComplianceLevel",
"documentation":"<p>The compliance severity level assigned to the patch baseline after the update completed.</p>"
},
"RejectedPatches":{
"shape":"PatchIdList",
"documentation":"<p>A list of explicitly rejected patches for the baseline.</p>"

View file

@ -178,7 +178,7 @@ class SharedExampleDocumenter(object):
section.write("datetime(%s)," % datetime_str)
def _get_comment(self, path, comments):
key = re.sub('^\.', '', ''.join(path))
key = re.sub(r'^\.', '', ''.join(path))
if comments and key in comments:
return '# ' + comments[key]
else:

View file

@ -266,15 +266,18 @@ class EndpointCreator(object):
def create_endpoint(self, service_model, region_name, endpoint_url,
verify=None, response_parser_factory=None,
timeout=DEFAULT_TIMEOUT,
max_pool_connections=MAX_POOL_CONNECTIONS):
max_pool_connections=MAX_POOL_CONNECTIONS,
proxies=None):
if not is_valid_endpoint_url(endpoint_url):
raise ValueError("Invalid endpoint: %s" % endpoint_url)
if proxies is None:
proxies = self._get_proxies(endpoint_url)
return Endpoint(
endpoint_url,
endpoint_prefix=service_model.endpoint_prefix,
event_emitter=self._event_emitter,
proxies=self._get_proxies(endpoint_url),
proxies=proxies,
verify=self._get_verify_value(verify),
timeout=timeout,
max_pool_connections=max_pool_connections,

View file

@ -54,7 +54,7 @@ REGISTER_LAST = object()
# to be as long as 255 characters, and bucket names can contain any
# combination of uppercase letters, lowercase letters, numbers, periods
# (.), hyphens (-), and underscores (_).
VALID_BUCKET = re.compile('^[a-zA-Z0-9.\-_]{1,255}$')
VALID_BUCKET = re.compile(r'^[a-zA-Z0-9.\-_]{1,255}$')
VERSION_ID_SUFFIX = re.compile(r'\?versionId=[^\s]+$')
@ -671,7 +671,7 @@ def check_openssl_supports_tls_version_1_2(**kwargs):
import ssl
try:
openssl_version_tuple = ssl.OPENSSL_VERSION_INFO
if openssl_version_tuple[0] < 1 or openssl_version_tuple[2] < 1:
if openssl_version_tuple < (1, 0, 1):
warnings.warn(
'Currently installed openssl version: %s does not '
'support TLS 1.2, which is required for use of iot-data. '

View file

@ -39,7 +39,7 @@ METADATA_SECURITY_CREDENTIALS_URL = (
# These are chars that do not need to be urlencoded.
# Based on rfc2986, section 2.3
SAFE_CHARS = '-._~'
LABEL_RE = re.compile('[a-z0-9][a-z0-9\-]*[a-z0-9]')
LABEL_RE = re.compile(r'[a-z0-9][a-z0-9\-]*[a-z0-9]')
RESTRICTED_REGIONS = [
'us-gov-west-1',
'fips-us-gov-west-1',
@ -79,7 +79,7 @@ def get_service_module_name(service_model):
'serviceFullName', service_model.service_name))
name = name.replace('Amazon', '')
name = name.replace('AWS', '')
name = re.sub('\W+', '', name)
name = re.sub(r'\W+', '', name)
return name
@ -637,7 +637,7 @@ def is_valid_endpoint_url(endpoint_url):
if hostname[-1] == ".":
hostname = hostname[:-1]
allowed = re.compile(
"^((?!-)[A-Z\d-]{1,63}(?<!-)\.)*((?!-)[A-Z\d-]{1,63}(?<!-))$",
r"^((?!-)[A-Z\d-]{1,63}(?<!-)\.)*((?!-)[A-Z\d-]{1,63}(?<!-))$",
re.IGNORECASE)
return allowed.match(hostname)

View file

@ -54,7 +54,7 @@ copyright = u'2013, Mitch Garnaat'
# The short X.Y version.
version = '1.5.'
# The full version, including alpha/beta/rc tags.
release = '1.5.78'
release = '1.5.80'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.

View file

@ -10,7 +10,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from nose.tools import assert_equals
from nose.tools import assert_equal
import botocore.session
@ -78,4 +78,4 @@ def test_client_has_correct_class_name():
def _assert_class_name_matches_ref_class_name(client, ref_class_name):
assert_equals(client.__class__.__name__, ref_class_name)
assert_equal(client.__class__.__name__, ref_class_name)

View file

@ -13,7 +13,7 @@
from tests import create_session
import mock
from nose.tools import assert_equals, assert_raises
from nose.tools import assert_equal, assert_raises
from botocore.client import ClientEndpointBridge
from botocore.exceptions import NoRegionError
@ -465,7 +465,7 @@ def _test_single_service_region(service_name, region_name,
bridge = ClientEndpointBridge(resolver, None, None)
result = bridge.resolve(service_name, region_name)
expected = 'https://%s' % expected_endpoint
assert_equals(result['endpoint_url'], expected)
assert_equal(result['endpoint_url'], expected)
# Ensure that all S3 regions use s3v4 instead of v4
@ -491,7 +491,7 @@ def _test_single_service_partition_endpoint(service_name, expected_endpoint,
resolver):
bridge = ClientEndpointBridge(resolver)
result = bridge.resolve(service_name)
assert_equals(result['endpoint_url'], expected_endpoint)
assert_equal(result['endpoint_url'], expected_endpoint)
def test_non_partition_endpoint_requires_region():

View file

@ -0,0 +1,65 @@
"""
Regression test for six issue #98 (https://github.com/benjaminp/six/issues/98)
"""
from mock import patch
import sys
import threading
import time
from botocore.vendored import six
_original_setattr = six.moves.__class__.__setattr__
def _wrapped_setattr(key, value):
# Monkey patch six.moves.__setattr__ to simulate
# a poorly-timed thread context switch
time.sleep(0.1)
return _original_setattr(six.moves, key, value)
def _reload_six():
# Issue #98 is caused by a race condition in six._LazyDescr.__get__
# which is only called once per moved module. Reload six so all the
# moved modules are reset.
if sys.version_info < (3, 0):
reload(six)
elif sys.version_info < (3, 4):
import imp
imp.reload(six)
else:
import importlib
importlib.reload(six)
class _ExampleThread(threading.Thread):
def __init__(self):
super(_ExampleThread, self).__init__()
self.daemon = False
self.exc_info = None
def run(self):
try:
# Simulate use of six by
# botocore.configloader.raw_config_parse()
# Should raise AttributeError if six < 1.9.0
six.moves.configparser.RawConfigParser()
except Exception:
self.exc_info = sys.exc_info()
def test_six_thread_safety():
_reload_six()
with patch('botocore.vendored.six.moves.__class__.__setattr__',
wraps=_wrapped_setattr):
threads = []
for i in range(2):
t = _ExampleThread()
threads.append(t)
t.start()
while threads:
t = threads.pop()
t.join()
if t.exc_info:
six.reraise(*t.exc_info)

View file

@ -14,7 +14,7 @@ import os
import mock
from pprint import pformat
import warnings
from nose.tools import assert_equals, assert_true
from nose.tools import assert_equal, assert_true
from botocore import xform_name
import botocore.session
@ -246,9 +246,9 @@ def _make_client_call(client, operation_name, kwargs):
method = getattr(client, operation_name)
with warnings.catch_warnings(record=True) as caught_warnings:
response = method(**kwargs)
assert_equals(len(caught_warnings), 0,
"Warnings were emitted during smoke test: %s"
% caught_warnings)
assert_equal(len(caught_warnings), 0,
"Warnings were emitted during smoke test: %s"
% caught_warnings)
assert_true('Errors' not in response)

View file

@ -225,6 +225,20 @@ class TestSigV2(unittest.TestCase):
result, ('Foo=%E2%9C%93',
u'VCtWuwaOL0yMffAT8W4y0AFW3W4KUykBqah9S40rB+Q='))
def test_get(self):
request = Request()
request.url = '/'
request.method = 'GET'
request.params = {'Foo': u'\u2713'}
self.signer.add_auth(request)
self.assertEqual(request.params['AWSAccessKeyId'], 'foo')
self.assertEqual(request.params['Foo'], u'\u2713')
self.assertEqual(request.params['Timestamp'], '2014-06-20T08:40:23Z')
self.assertEqual(request.params['Signature'],
u'Un97klqZCONP65bA1+Iv4H3AcB2I40I4DBvw5ZERFPw=')
self.assertEqual(request.params['SignatureMethod'], 'HmacSHA256')
self.assertEqual(request.params['SignatureVersion'], '2')
class TestSigV3(unittest.TestCase):
@ -638,8 +652,8 @@ class TestS3SigV2Presign(BasePresignTest):
self.assertEqual(query_string['AWSAccessKeyId'], self.access_key)
self.assertEqual(query_string['Expires'],
str(int(self.current_epoch_time) + self.expires))
self.assertEquals(query_string['Signature'],
'ZRSgywstwIruKLTLt/Bcrf9H1K4=')
self.assertEqual(query_string['Signature'],
'ZRSgywstwIruKLTLt/Bcrf9H1K4=')
def test_presign_with_x_amz_headers(self):
self.request.headers['x-amz-security-token'] = 'foo'
@ -648,8 +662,8 @@ class TestS3SigV2Presign(BasePresignTest):
query_string = self.get_parsed_query_string(self.request)
self.assertEqual(query_string['x-amz-security-token'], 'foo')
self.assertEqual(query_string['x-amz-acl'], 'read-only')
self.assertEquals(query_string['Signature'],
'5oyMAGiUk1E5Ry2BnFr6cIS3Gus=')
self.assertEqual(query_string['Signature'],
'5oyMAGiUk1E5Ry2BnFr6cIS3Gus=')
def test_presign_with_content_headers(self):
self.request.headers['content-type'] = 'txt'
@ -658,16 +672,16 @@ class TestS3SigV2Presign(BasePresignTest):
query_string = self.get_parsed_query_string(self.request)
self.assertEqual(query_string['content-type'], 'txt')
self.assertEqual(query_string['content-md5'], 'foo')
self.assertEquals(query_string['Signature'],
'/YQRFdQGywXP74WrOx2ET/RUqz8=')
self.assertEqual(query_string['Signature'],
'/YQRFdQGywXP74WrOx2ET/RUqz8=')
def test_presign_with_unused_headers(self):
self.request.headers['user-agent'] = 'botocore'
self.auth.add_auth(self.request)
query_string = self.get_parsed_query_string(self.request)
self.assertNotIn('user-agent', query_string)
self.assertEquals(query_string['Signature'],
'ZRSgywstwIruKLTLt/Bcrf9H1K4=')
self.assertEqual(query_string['Signature'],
'ZRSgywstwIruKLTLt/Bcrf9H1K4=')
class TestSigV4Presign(BasePresignTest):

View file

@ -128,7 +128,35 @@ class TestCreateClientArgs(unittest.TestCase):
m.return_value.create_endpoint.assert_called_with(
mock.ANY, endpoint_url='https://ec2/', region_name='us-west-2',
response_parser_factory=None, timeout=(60, 60), verify=True,
max_pool_connections=20
max_pool_connections=20, proxies=None
)
def test_proxies_from_client_config_forwarded_to_endpoint_creator(self):
args_create = args.ClientArgsCreator(
mock.Mock(), None, None, None, None)
proxies = {'http': 'http://foo.bar:1234',
'https': 'https://foo.bar:4321'}
config = botocore.config.Config(proxies=proxies)
service_model = mock.Mock()
service_model.metadata = {
'serviceFullName': 'MyService',
'protocol': 'query'
}
service_model.operation_names = []
bridge = mock.Mock()
bridge.resolve.return_value = {
'region_name': 'us-west-2', 'signature_version': 'v4',
'endpoint_url': 'https://ec2/',
'signing_name': 'ec2', 'signing_region': 'us-west-2',
'metadata': {}}
with mock.patch('botocore.args.EndpointCreator') as m:
args_create.get_client_args(
service_model, 'us-west-2', True, 'https://ec2/', True,
None, {}, config, bridge)
m.return_value.create_endpoint.assert_called_with(
mock.ANY, endpoint_url='https://ec2/', region_name='us-west-2',
response_parser_factory=None, timeout=(60, 60), verify=True,
proxies=proxies, max_pool_connections=10
)
def test_s3_with_endpoint_url_still_resolves_region(self):

View file

@ -334,7 +334,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
'myservice', 'us-west-2', credentials=self.credentials)
self.assertEqual(service_client.meta.region_name, 'us-west-2')
call_args = mock_signer.call_args
self.assertEquals(credential_scope_region, call_args[0][1])
self.assertEqual(credential_scope_region, call_args[0][1])
def test_client_uses_signing_region_from_credential_scope(self):
with mock.patch('botocore.args.RequestSigner') as mock_signer:
@ -355,7 +355,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
# Ensure that we use the credential scope region for signing,
# and not the resolved region name.
call_args = mock_signer.call_args
self.assertEquals(credential_scope_region, call_args[0][1])
self.assertEqual(credential_scope_region, call_args[0][1])
def test_client_uses_signing_name_from_credential_scope(self):
with mock.patch('botocore.args.RequestSigner') as mock_signer:
@ -371,8 +371,8 @@ class TestAutoGeneratedClient(unittest.TestCase):
service_name='myservice', region_name='us-west-2',
credentials=self.credentials)
call_args = mock_signer.call_args
self.assertEquals('myservice', call_args[0][0])
self.assertEquals('override', call_args[0][2])
self.assertEqual('myservice', call_args[0][0])
self.assertEqual('override', call_args[0][2])
def test_client_uses_given_region_name_and_endpoint_url_when_present(self):
with mock.patch('botocore.args.RequestSigner') as mock_signer:
@ -390,7 +390,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
credentials=self.credentials, endpoint_url='https://foo')
self.assertEqual(service_client.meta.region_name, 'us-west-2')
call_args = mock_signer.call_args
self.assertEquals('us-west-2', call_args[0][1])
self.assertEqual('us-west-2', call_args[0][1])
def test_client_uses_signing_name_from_model_if_present_if_resolved(self):
self.service_description['metadata']['signingName'] = 'otherName'
@ -407,7 +407,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
credentials=self.credentials, endpoint_url='https://foo')
self.assertEqual(service_client.meta.region_name, 'us-west-2')
call_args = mock_signer.call_args[0]
self.assertEquals('otherName', call_args[2])
self.assertEqual('otherName', call_args[2])
def test_client_uses_signing_name_even_with_no_resolve(self):
self.service_description['metadata']['signingName'] = 'otherName'
@ -419,7 +419,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
credentials=self.credentials, endpoint_url='https://foo')
self.assertEqual(service_client.meta.region_name, 'us-west-2')
call_args = mock_signer.call_args[0]
self.assertEquals('otherName', call_args[2])
self.assertEqual('otherName', call_args[2])
@mock.patch('botocore.args.RequestSigner')
def test_client_signature_no_override(self, request_signer):
@ -639,7 +639,7 @@ class TestAutoGeneratedClient(unittest.TestCase):
self.resolver.construct_endpoint.return_value = None
creator = self.create_client_creator()
client = creator.create_client('myservice', region_name='invalid')
self.assertEquals('invalid', client.meta.region_name)
self.assertEqual('invalid', client.meta.region_name)
def test_client_with_response_parser_factory(self):
factory = mock.Mock()

View file

@ -139,6 +139,12 @@ class TestEndpointFeatures(TestEndpointBase):
# but that feels like testing too much implementation detail.
self.assertEqual(endpoint.max_pool_connections, 50)
def test_can_specify_proxies(self):
proxies = {'http': 'http://foo.bar:1234'}
endpoint = Endpoint('https://ec2.us-west-2.amazonaws.com', 'ec2',
self.event_emitter, proxies=proxies)
self.assertEqual(endpoint.proxies, proxies)
class TestRetryInterface(TestEndpointBase):
def setUp(self):

View file

@ -11,7 +11,7 @@
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from nose.tools import assert_equals
from nose.tools import assert_equal
from botocore import exceptions
@ -19,7 +19,7 @@ from botocore import exceptions
def test_client_error_can_handle_missing_code_or_message():
response = {'Error': {}}
expect = 'An error occurred (Unknown) when calling the blackhole operation: Unknown'
assert_equals(str(exceptions.ClientError(response, 'blackhole')), expect)
assert_equal(str(exceptions.ClientError(response, 'blackhole')), expect)
def test_client_error_has_operation_name_set():
@ -31,7 +31,7 @@ def test_client_error_has_operation_name_set():
def test_client_error_set_correct_operation_name():
response = {'Error': {}}
exception = exceptions.ClientError(response, 'blackhole')
assert_equals(exception.operation_name, 'blackhole')
assert_equal(exception.operation_name, 'blackhole')
def test_retry_info_added_when_present():

View file

@ -36,4 +36,4 @@ class TestIdempotencyInjection(unittest.TestCase):
# RequiredKey is provided, should not be replaced
params = {'RequiredKey': 'already populated'}
generate_idempotent_uuid(params, self.mock_model)
self.assertEquals(params['RequiredKey'], 'already populated')
self.assertEqual(params['RequiredKey'], 'already populated')

View file

@ -23,7 +23,7 @@ class TestEndpointResolver(unittest.TestCase):
{
'partition': 'aws',
'dnsSuffix': 'amazonaws.com',
'regionRegex': '^(us|eu)\-\w+$',
'regionRegex': r'^(us|eu)\-\w+$',
'defaults': {
'hostname': '{service}.{region}.{dnsSuffix}'
},
@ -86,7 +86,7 @@ class TestEndpointResolver(unittest.TestCase):
{
'partition': 'foo',
'dnsSuffix': 'foo.com',
'regionRegex': '^(foo)\-\w+$',
'regionRegex': r'^(foo)\-\w+$',
'defaults': {
'hostname': '{service}.{region}.{dnsSuffix}',
'protocols': ['http'],
@ -123,29 +123,29 @@ class TestEndpointResolver(unittest.TestCase):
def test_returns_empty_list_when_listing_for_different_partition(self):
resolver = regions.EndpointResolver(self._template())
self.assertEquals([], resolver.get_available_endpoints('ec2', 'bar'))
self.assertEqual([], resolver.get_available_endpoints('ec2', 'bar'))
def test_returns_empty_list_when_no_service_found(self):
resolver = regions.EndpointResolver(self._template())
self.assertEquals([], resolver.get_available_endpoints('what?'))
self.assertEqual([], resolver.get_available_endpoints('what?'))
def test_gets_endpoint_names(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.get_available_endpoints(
'ec2', allow_non_regional=True)
self.assertEquals(['d', 'eu-baz', 'us-bar', 'us-foo'], sorted(result))
self.assertEqual(['d', 'eu-baz', 'us-bar', 'us-foo'], sorted(result))
def test_gets_endpoint_names_for_partition(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.get_available_endpoints(
'ec2', allow_non_regional=True, partition_name='foo')
self.assertEquals(['foo-1', 'foo-2', 'foo-3'], sorted(result))
self.assertEqual(['foo-1', 'foo-2', 'foo-3'], sorted(result))
def test_list_regional_endpoints_only(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.get_available_endpoints(
'ec2', allow_non_regional=False)
self.assertEquals(['eu-baz', 'us-bar', 'us-foo'], sorted(result))
self.assertEqual(['eu-baz', 'us-bar', 'us-foo'], sorted(result))
def test_returns_none_when_no_match(self):
resolver = regions.EndpointResolver(self._template())
@ -154,65 +154,65 @@ class TestEndpointResolver(unittest.TestCase):
def test_constructs_regionalized_endpoints_for_exact_matches(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized', 'eu-baz')
self.assertEquals('not-regionalized.eu-baz.amazonaws.com',
self.assertEqual('not-regionalized.eu-baz.amazonaws.com',
result['hostname'])
self.assertEquals('aws', result['partition'])
self.assertEquals('eu-baz', result['endpointName'])
self.assertEqual('aws', result['partition'])
self.assertEqual('eu-baz', result['endpointName'])
def test_constructs_partition_endpoints_for_real_partition_region(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized', 'us-bar')
self.assertEquals('not-regionalized', result['hostname'])
self.assertEquals('aws', result['partition'])
self.assertEquals('aws', result['endpointName'])
self.assertEqual('not-regionalized', result['hostname'])
self.assertEqual('aws', result['partition'])
self.assertEqual('aws', result['endpointName'])
def test_constructs_partition_endpoints_for_regex_match(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized', 'us-abc')
self.assertEquals('not-regionalized', result['hostname'])
self.assertEqual('not-regionalized', result['hostname'])
def test_constructs_endpoints_for_regionalized_regex_match(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('s3', 'us-abc')
self.assertEquals('s3.us-abc.amazonaws.com', result['hostname'])
self.assertEqual('s3.us-abc.amazonaws.com', result['hostname'])
def test_constructs_endpoints_for_unknown_service_but_known_region(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('unknown', 'us-foo')
self.assertEquals('unknown.us-foo.amazonaws.com', result['hostname'])
self.assertEqual('unknown.us-foo.amazonaws.com', result['hostname'])
def test_merges_service_keys(self):
resolver = regions.EndpointResolver(self._template())
us_foo = resolver.construct_endpoint('merge', 'us-foo')
us_bar = resolver.construct_endpoint('merge', 'us-bar')
self.assertEquals(['http'], us_foo['protocols'])
self.assertEquals(['v4'], us_foo['signatureVersions'])
self.assertEquals(['https'], us_bar['protocols'])
self.assertEquals(['v2'], us_bar['signatureVersions'])
self.assertEqual(['http'], us_foo['protocols'])
self.assertEqual(['v4'], us_foo['signatureVersions'])
self.assertEqual(['https'], us_bar['protocols'])
self.assertEqual(['v2'], us_bar['signatureVersions'])
def test_merges_partition_default_keys_with_no_overwrite(self):
resolver = regions.EndpointResolver(self._template())
resolved = resolver.construct_endpoint('ec2', 'foo-1')
self.assertEquals('baz', resolved['foo'])
self.assertEquals(['http'], resolved['protocols'])
self.assertEqual('baz', resolved['foo'])
self.assertEqual(['http'], resolved['protocols'])
def test_merges_partition_default_keys_with_overwrite(self):
resolver = regions.EndpointResolver(self._template())
resolved = resolver.construct_endpoint('ec2', 'foo-2')
self.assertEquals('bar', resolved['foo'])
self.assertEquals(['http'], resolved['protocols'])
self.assertEqual('bar', resolved['foo'])
self.assertEqual(['http'], resolved['protocols'])
def test_gives_hostname_and_common_name_unaltered(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('s3', 'eu-baz')
self.assertEquals('s3.eu-baz.amazonaws.com', result['sslCommonName'])
self.assertEquals('foo', result['hostname'])
self.assertEqual('s3.eu-baz.amazonaws.com', result['sslCommonName'])
self.assertEqual('foo', result['hostname'])
def tests_uses_partition_endpoint_when_no_region_provided(self):
resolver = regions.EndpointResolver(self._template())
result = resolver.construct_endpoint('not-regionalized')
self.assertEquals('not-regionalized', result['hostname'])
self.assertEquals('aws', result['endpointName'])
self.assertEqual('not-regionalized', result['hostname'])
self.assertEqual('aws', result['endpointName'])
def test_returns_dns_suffix_if_available(self):
resolver = regions.EndpointResolver(self._template())

View file

@ -328,7 +328,7 @@ class TestSessionPartitionFiles(BaseSessionTest):
mock_resolver = mock.Mock()
mock_resolver.get_available_partitions.return_value = ['foo']
self.session.register_component('endpoint_resolver', mock_resolver)
self.assertEquals(['foo'], self.session.get_available_partitions())
self.assertEqual(['foo'], self.session.get_available_partitions())
def test_proxies_list_endpoints_to_resolver(self):
resolver = mock.Mock()