diff --git a/PKG-INFO b/PKG-INFO index 57193da6..1850b9eb 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.8.28 +Version: 1.8.36 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index 57193da6..1850b9eb 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.8.28 +Version: 1.8.36 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/SOURCES.txt b/botocore.egg-info/SOURCES.txt index b3258966..a48c52c5 100644 --- a/botocore.egg-info/SOURCES.txt +++ b/botocore.egg-info/SOURCES.txt @@ -59,6 +59,8 @@ botocore/data/appsync/2017-07-25/paginators-1.json botocore/data/appsync/2017-07-25/service-2.json botocore/data/athena/2017-05-18/paginators-1.json botocore/data/athena/2017-05-18/service-2.json +botocore/data/autoscaling-plans/2018-01-06/paginators-1.json +botocore/data/autoscaling-plans/2018-01-06/service-2.json botocore/data/autoscaling/2011-01-01/examples-1.json botocore/data/autoscaling/2011-01-01/paginators-1.json botocore/data/autoscaling/2011-01-01/service-2.json @@ -447,6 +449,8 @@ botocore/data/support/2013-04-15/paginators-1.json botocore/data/support/2013-04-15/service-2.json botocore/data/swf/2012-01-25/paginators-1.json botocore/data/swf/2012-01-25/service-2.json +botocore/data/transcribe/2017-10-26/paginators-1.json +botocore/data/transcribe/2017-10-26/service-2.json botocore/data/translate/2017-07-01/paginators-1.json botocore/data/translate/2017-07-01/service-2.json botocore/data/waf-regional/2016-11-28/examples-1.json diff --git a/botocore/__init__.py b/botocore/__init__.py index 326bbcf2..14ee2a37 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.8.28' +__version__ = '1.8.36' class NullHandler(logging.Handler): diff --git a/botocore/client.py b/botocore/client.py index f23d0a3c..f1fca3fd 100644 --- a/botocore/client.py +++ b/botocore/client.py @@ -139,7 +139,7 @@ class ClientCreator(object): # Also make sure that the hostname gets switched to # s3-accelerate.amazonaws.com client.meta.events.register_first( - 'request-created.s3', switch_host_s3_accelerate) + 'before-sign.s3', switch_host_s3_accelerate) self._set_s3_presign_signature_version( client.meta, client_config, scoped_config) @@ -618,6 +618,16 @@ class BaseClient(object): def _convert_to_request_dict(self, api_params, operation_model, context=None): + api_params = self._emit_api_params( + api_params, operation_model, context) + request_dict = self._serializer.serialize_to_request( + api_params, operation_model) + prepare_request_dict(request_dict, endpoint_url=self._endpoint.host, + user_agent=self._client_config.user_agent, + context=context) + return request_dict + + def _emit_api_params(self, api_params, operation_model, context): # Given the API params provided by the user and the operation_model # we can serialize the request to a request_dict. operation_name = operation_model.name @@ -639,13 +649,7 @@ class BaseClient(object): endpoint_prefix=self._service_model.endpoint_prefix, operation_name=operation_name), params=api_params, model=operation_model, context=context) - - request_dict = self._serializer.serialize_to_request( - api_params, operation_model) - prepare_request_dict(request_dict, endpoint_url=self._endpoint.host, - user_agent=self._client_config.user_agent, - context=context) - return request_dict + return api_params def get_paginator(self, operation_name): """Create a paginator for an operation. diff --git a/botocore/credentials.py b/botocore/credentials.py index d846b1e5..370c6d80 100644 --- a/botocore/credentials.py +++ b/botocore/credentials.py @@ -1207,12 +1207,13 @@ class AssumeRoleProvider(CredentialProvider): def load(self): self._loaded_config = self._load_config() - if self._has_assume_role_config_vars(): + profiles = self._loaded_config.get('profiles', {}) + profile = profiles.get(self._profile_name, {}) + if self._has_assume_role_config_vars(profile): return self._load_creds_via_assume_role(self._profile_name) - def _has_assume_role_config_vars(self): - profiles = self._loaded_config.get('profiles', {}) - return self.ROLE_CONFIG_VAR in profiles.get(self._profile_name, {}) + def _has_assume_role_config_vars(self, profile): + return self.ROLE_CONFIG_VAR in profile def _load_creds_via_assume_role(self, profile_name): role_config = self._get_role_config(profile_name) @@ -1310,27 +1311,46 @@ class AssumeRoleProvider(CredentialProvider): 'valid.' % (credential_source, parent_profile) )) - def _validate_source_profile(self, parent_profile, source_profile): + def _source_profile_has_credentials(self, profile): + return any([ + self._has_static_credentials(profile), + self._has_assume_role_config_vars(profile), + ]) + + def _validate_source_profile(self, parent_profile_name, + source_profile_name): profiles = self._loaded_config.get('profiles', {}) - if source_profile not in profiles: + if source_profile_name not in profiles: raise InvalidConfigError( error_msg=( 'The source_profile "%s" referenced in ' 'the profile "%s" does not exist.' % ( - source_profile, parent_profile) + source_profile_name, parent_profile_name) + ) + ) + + source_profile = profiles[source_profile_name] + + # Ensure the profile has valid credential type + if not self._source_profile_has_credentials(source_profile): + raise InvalidConfigError( + error_msg=( + 'The source_profile "%s" must specify either static ' + 'credentials or an assume role configuration' % ( + source_profile_name) ) ) # Make sure we aren't going into an infinite loop. If we haven't # visited the profile yet, we're good. - if source_profile not in self._visited_profiles: + if source_profile_name not in self._visited_profiles: return # If we have visited the profile and the profile isn't simply # referencing itself, that's an infinite loop. - if source_profile != parent_profile: + if source_profile_name != parent_profile_name: raise InfiniteLoopConfigError( - source_profile=source_profile, + source_profile=source_profile_name, visited_profiles=self._visited_profiles ) @@ -1339,9 +1359,9 @@ class AssumeRoleProvider(CredentialProvider): # profile. This will only ever work for the top level assume # role because the static credentials will otherwise take # precedence. - if not self._has_static_credentials(profiles[source_profile]): + if not self._has_static_credentials(source_profile): raise InfiniteLoopConfigError( - source_profile=source_profile, + source_profile=source_profile_name, visited_profiles=self._visited_profiles ) diff --git a/botocore/data/alexaforbusiness/2017-11-09/paginators-1.json b/botocore/data/alexaforbusiness/2017-11-09/paginators-1.json index ea142457..a91b700d 100644 --- a/botocore/data/alexaforbusiness/2017-11-09/paginators-1.json +++ b/botocore/data/alexaforbusiness/2017-11-09/paginators-1.json @@ -1,3 +1,46 @@ { - "pagination": {} + "pagination": { + "ListSkills": { + "result_key": "SkillSummaries", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "SearchUsers": { + "result_key": "Users", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTags": { + "result_key": "Tags", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "SearchProfiles": { + "result_key": "Profiles", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "SearchSkillGroups": { + "result_key": "SkillGroups", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "SearchDevices": { + "result_key": "Devices", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "SearchRooms": { + "result_key": "Rooms", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + } + } } diff --git a/botocore/data/alexaforbusiness/2017-11-09/service-2.json b/botocore/data/alexaforbusiness/2017-11-09/service-2.json index 2c172636..bd4ac445 100644 --- a/botocore/data/alexaforbusiness/2017-11-09/service-2.json +++ b/botocore/data/alexaforbusiness/2017-11-09/service-2.json @@ -805,7 +805,11 @@ }, "DeviceStatus":{ "shape":"DeviceStatus", - "documentation":"

The status of a device.

" + "documentation":"

The status of a device. If the status is not READY, check the DeviceStatusInfo for details.

" + }, + "DeviceStatusInfo":{ + "shape":"DeviceStatusInfo", + "documentation":"

Detailed information about a device's status.

" } }, "documentation":"

A device with attributes.

" @@ -848,6 +852,10 @@ "RoomName":{ "shape":"RoomName", "documentation":"

The name of the room associated with a device.

" + }, + "DeviceStatusInfo":{ + "shape":"DeviceStatusInfo", + "documentation":"

Detailed information about a device's status.

" } }, "documentation":"

Device attributes.

" @@ -874,6 +882,37 @@ "WAS_OFFLINE" ] }, + "DeviceStatusDetail":{ + "type":"structure", + "members":{ + "Code":{ + "shape":"DeviceStatusDetailCode", + "documentation":"

The device status detail code.

" + } + }, + "documentation":"

Details of a device’s status.

" + }, + "DeviceStatusDetailCode":{ + "type":"string", + "enum":[ + "DEVICE_SOFTWARE_UPDATE_NEEDED", + "DEVICE_WAS_OFFLINE" + ] + }, + "DeviceStatusDetails":{ + "type":"list", + "member":{"shape":"DeviceStatusDetail"} + }, + "DeviceStatusInfo":{ + "type":"structure", + "members":{ + "DeviceStatusDetails":{ + "shape":"DeviceStatusDetails", + "documentation":"

One or more device status detail descriptions.

" + } + }, + "documentation":"

Detailed information about a device's status.

" + }, "DeviceType":{ "type":"string", "pattern":"[a-zA-Z0-9]{1,200}" @@ -1497,7 +1536,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

The filters to use to list a specified set of devices. Supported filter keys are DeviceName, DeviceStatus, RoomName, DeviceType, DeviceSerialNumber, and UnassociatedOnly.

" + "documentation":"

The filters to use to list a specified set of devices. Supported filter keys are DeviceName, DeviceStatus, DeviceStatusDetailCode, RoomName, DeviceType, DeviceSerialNumber, and UnassociatedOnly.

" }, "SortCriteria":{ "shape":"SortList", diff --git a/botocore/data/application-autoscaling/2016-02-06/service-2.json b/botocore/data/application-autoscaling/2016-02-06/service-2.json index 09b992a5..608b62d6 100644 --- a/botocore/data/application-autoscaling/2016-02-06/service-2.json +++ b/botocore/data/application-autoscaling/2016-02-06/service-2.json @@ -75,7 +75,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Provides descriptive information about the scalable targets in the specified namespace.

You can filter the results using the ResourceIds and ScalableDimension parameters.

To create a scalable target or update an existing one, see RegisterScalableTarget. If you are no longer using a scalable target, you can deregister it using DeregisterScalableTarget.

" + "documentation":"

Gets information about the scalable targets in the specified namespace.

You can filter the results using the ResourceIds and ScalableDimension parameters.

To create a scalable target or update an existing one, see RegisterScalableTarget. If you are no longer using a scalable target, you can deregister it using DeregisterScalableTarget.

" }, "DescribeScalingActivities":{ "name":"DescribeScalingActivities", @@ -142,7 +142,7 @@ {"shape":"FailedResourceAccessException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates or updates a policy for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scaling policy applies to the scalable target identified by those three attributes. You cannot create a scaling policy without first registering a scalable target using RegisterScalableTarget.

To update a policy, specify its policy name and the parameters that you want to change. Any parameters that you don't specify are not changed by this update request.

You can view the scaling policies for a service namespace using DescribeScalingPolicies. If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy.

" + "documentation":"

Creates or updates a policy for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scaling policy applies to the scalable target identified by those three attributes. You cannot create a scaling policy until you register the scalable target using RegisterScalableTarget.

To update a policy, specify its policy name and the parameters that you want to change. Any parameters that you don't specify are not changed by this update request.

You can view the scaling policies for a service namespace using DescribeScalingPolicies. If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy.

" }, "PutScheduledAction":{ "name":"PutScheduledAction", @@ -159,7 +159,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Creates or updates a scheduled action for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scheduled action applies to the scalable target identified by those three attributes. You cannot create a scheduled action without first registering a scalable target using RegisterScalableTarget.

To update an action, specify its name and the parameters that you want to change. If you don't specify start and end times, the old values are deleted. Any other parameters that you don't specify are not changed by this update request.

You can view the scheduled actions using DescribeScheduledActions. If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction.

" + "documentation":"

Creates or updates a scheduled action for an Application Auto Scaling scalable target.

Each scalable target is identified by a service namespace, resource ID, and scalable dimension. A scheduled action applies to the scalable target identified by those three attributes. You cannot create a scheduled action until you register the scalable target using RegisterScalableTarget.

To update an action, specify its name and the parameters that you want to change. If you don't specify start and end times, the old values are deleted. Any other parameters that you don't specify are not changed by this update request.

You can view the scheduled actions using DescribeScheduledActions. If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction.

" }, "RegisterScalableTarget":{ "name":"RegisterScalableTarget", @@ -175,7 +175,7 @@ {"shape":"ConcurrentUpdateException"}, {"shape":"InternalServiceException"} ], - "documentation":"

Registers or updates a scalable target. A scalable target is a resource that Application Auto Scaling can scale out or scale in. After you have registered a scalable target, you can use this operation to update the minimum and maximum values for your scalable dimension.

After you register a scalable target, you can create and apply scaling policies using PutScalingPolicy. You can view the scaling policies for a service namespace using DescribeScalableTargets. If you are no longer using a scalable target, you can deregister it using DeregisterScalableTarget.

" + "documentation":"

Registers or updates a scalable target. A scalable target is a resource that Application Auto Scaling can scale out or scale in. After you have registered a scalable target, you can use this operation to update the minimum and maximum values for its scalable dimension.

After you register a scalable target, you can create and apply scaling policies using PutScalingPolicy. You can view the scaling policies for a service namespace using DescribeScalableTargets. If you no longer need a scalable target, you can deregister it using DeregisterScalableTarget.

" } }, "shapes":{ @@ -357,7 +357,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of scalable target results. This value can be between 1 and 50. The default value is 50.

If this parameter is used, the operation returns up to MaxResults results at a time, along with a NextToken value. To get the next set of results, include the NextToken value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a NextToken value, if applicable.

" + "documentation":"

The maximum number of scalable targets. This value can be between 1 and 50. The default value is 50.

If this parameter is used, the operation returns up to MaxResults results at a time, along with a NextToken value. To get the next set of results, include the NextToken value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a NextToken value, if applicable.

" }, "NextToken":{ "shape":"XmlString", @@ -370,7 +370,7 @@ "members":{ "ScalableTargets":{ "shape":"ScalableTargets", - "documentation":"

The list of scalable targets that matches the request parameters.

" + "documentation":"

The scalable targets that match the request parameters.

" }, "NextToken":{ "shape":"XmlString", @@ -396,7 +396,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of scalable target results. This value can be between 1 and 50. The default value is 50.

If this parameter is used, the operation returns up to MaxResults results at a time, along with a NextToken value. To get the next set of results, include the NextToken value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a NextToken value, if applicable.

" + "documentation":"

The maximum number of scalable targets. This value can be between 1 and 50. The default value is 50.

If this parameter is used, the operation returns up to MaxResults results at a time, along with a NextToken value. To get the next set of results, include the NextToken value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a NextToken value, if applicable.

" }, "NextToken":{ "shape":"XmlString", @@ -439,7 +439,7 @@ }, "MaxResults":{ "shape":"MaxResults", - "documentation":"

The maximum number of scalable target results. This value can be between 1 and 50. The default value is 50.

If this parameter is used, the operation returns up to MaxResults results at a time, along with a NextToken value. To get the next set of results, include the NextToken value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a NextToken value, if applicable.

" + "documentation":"

The maximum number of scalable targets. This value can be between 1 and 50. The default value is 50.

If this parameter is used, the operation returns up to MaxResults results at a time, along with a NextToken value. To get the next set of results, include the NextToken value in a subsequent call. If this parameter is not used, the operation returns up to 50 results and a NextToken value, if applicable.

" }, "NextToken":{ "shape":"XmlString", @@ -510,7 +510,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Failed access to resources caused an exception. This exception is thrown when Application Auto Scaling is unable to retrieve the alarms associated with a scaling policy due to a client error, for example, if the role ARN specified for a scalable target does not have permission to call the CloudWatch DescribeAlarms API operation on behalf of your account.

", + "documentation":"

Failed access to resources caused an exception. This exception is thrown when Application Auto Scaling is unable to retrieve the alarms associated with a scaling policy due to a client error, for example, if the role ARN specified for a scalable target does not have permission to call the CloudWatch DescribeAlarms on your behalf.

", "exception":true }, "InternalServiceException":{ @@ -534,7 +534,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

Your account exceeded a limit. This exception is thrown when a per-account resource limit is exceeded. For more information, see Application Auto Scaling Limits.

", + "documentation":"

A per-account resource limit is exceeded. For more information, see Application Auto Scaling Limits.

", "exception":true }, "MaxResults":{"type":"integer"}, @@ -593,7 +593,9 @@ "RDSReaderAverageDatabaseConnections", "EC2SpotFleetRequestAverageCPUUtilization", "EC2SpotFleetRequestAverageNetworkIn", - "EC2SpotFleetRequestAverageNetworkOut" + "EC2SpotFleetRequestAverageNetworkOut", + "ECSServiceAverageCPUUtilization", + "ECSServiceAverageMemoryUtilization" ] }, "MetricUnit":{"type":"string"}, @@ -603,7 +605,7 @@ "members":{ "Message":{"shape":"ErrorMessage"} }, - "documentation":"

The specified object could not be found. For any Put or Register API operation, which depends on the existence of a scalable target, this exception is thrown if the scalable target with the specified service namespace, resource ID, and scalable dimension does not exist. For any Delete or Deregister API operation, this exception is thrown if the resource that is to be deleted or deregistered cannot be found.

", + "documentation":"

The specified object could not be found. For any operation that depends on the existence of a scalable target, this exception is thrown if the scalable target with the specified service namespace, resource ID, and scalable dimension does not exist. For any operation that deletes or deregisters a resource, this exception is thrown if the resource cannot be found.

", "exception":true }, "PolicyName":{ @@ -625,11 +627,11 @@ "members":{ "PredefinedMetricType":{ "shape":"MetricType", - "documentation":"

The metric type. The ALBRequestCountPerTarget metric type applies only to Spot fleet requests.

" + "documentation":"

The metric type. The ALBRequestCountPerTarget metric type applies only to Spot fleet requests and ECS services.

" }, "ResourceLabel":{ "shape":"ResourceLabel", - "documentation":"

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Spot fleet request.

The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:

" + "documentation":"

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Spot fleet request or ECS service.

The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:

" } }, "documentation":"

Configures a predefined metric for a target tracking policy.

" @@ -661,7 +663,7 @@ }, "PolicyType":{ "shape":"PolicyType", - "documentation":"

The policy type. If you are creating a new policy, this parameter is required. If you are updating a policy, this parameter is not required.

For DynamoDB, only TargetTrackingScaling is supported. For any other service, only StepScaling is supported.

" + "documentation":"

The policy type. This parameter is required if you are creating a policy.

For DynamoDB, only TargetTrackingScaling is supported. For Amazon ECS, Spot Fleet, and Amazon RDS, both StepScaling and TargetTrackingScaling are supported. For any other service, only StepScaling is supported.

" }, "StepScalingPolicyConfiguration":{ "shape":"StepScalingPolicyConfiguration", @@ -669,7 +671,7 @@ }, "TargetTrackingScalingPolicyConfiguration":{ "shape":"TargetTrackingScalingPolicyConfiguration", - "documentation":"

A target tracking policy.

This parameter is required if you are creating a new policy and the policy type is TargetTrackingScaling.

" + "documentation":"

A target tracking policy.

This parameter is required if you are creating a policy and the policy type is TargetTrackingScaling.

" } } }, @@ -713,7 +715,7 @@ }, "ScalableDimension":{ "shape":"ScalableDimension", - "documentation":"

The scalable dimension. This string consists of the service namespace, resource type, and scaling property.

" + "documentation":"

The scalable dimension. This parameter is required if you are creating a scheduled action. This string consists of the service namespace, resource type, and scaling property.

" }, "StartTime":{ "shape":"TimestampType", @@ -756,15 +758,15 @@ }, "MinCapacity":{ "shape":"ResourceCapacity", - "documentation":"

The minimum value to scale to in response to a scale in event. This parameter is required if you are registering a scalable target and optional if you are updating one.

" + "documentation":"

The minimum value to scale to in response to a scale in event. This parameter is required if you are registering a scalable target.

" }, "MaxCapacity":{ "shape":"ResourceCapacity", - "documentation":"

The maximum value to scale to in response to a scale out event. This parameter is required if you are registering a scalable target and optional if you are updating one.

" + "documentation":"

The maximum value to scale to in response to a scale out event. This parameter is required if you are registering a scalable target.

" }, "RoleARN":{ "shape":"ResourceIdMaxLen1600", - "documentation":"

The ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

With Amazon RDS resources, permissions are granted using a service-linked role. For more information, see Service-Linked Roles for Application Auto Scaling.

For resources that are not supported using a service-linked role, this parameter is required when you register a scalable target and optional when you update one.

" + "documentation":"

Application Auto Scaling creates a service-linked role that grants it permissions to modify the scalable target on your behalf. For more information, see Service-Linked Roles for Application Auto Scaling.

For resources that are not supported using a service-linked role, this parameter is required and must specify the ARN of an IAM role that allows Application Auto Scaling to modify the scalable target on your behalf.

" } } }, @@ -1172,5 +1174,5 @@ "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" } }, - "documentation":"

With Application Auto Scaling, you can automatically scale your AWS resources. The experience is similar to that of Auto Scaling. You can use Application Auto Scaling to accomplish the following tasks:

Application Auto Scaling can scale the following AWS resources:

For a list of supported regions, see AWS Regions and Endpoints: Application Auto Scaling in the AWS General Reference.

" + "documentation":"

With Application Auto Scaling, you can automatically scale your AWS resources. The experience is similar to that of Auto Scaling. You can use Application Auto Scaling to accomplish the following tasks:

Application Auto Scaling can scale the following AWS resources:

For a list of supported regions, see AWS Regions and Endpoints: Application Auto Scaling in the AWS General Reference.

" } diff --git a/botocore/data/autoscaling-plans/2018-01-06/paginators-1.json b/botocore/data/autoscaling-plans/2018-01-06/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/autoscaling-plans/2018-01-06/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/autoscaling-plans/2018-01-06/service-2.json b/botocore/data/autoscaling-plans/2018-01-06/service-2.json new file mode 100644 index 00000000..b624a87b --- /dev/null +++ b/botocore/data/autoscaling-plans/2018-01-06/service-2.json @@ -0,0 +1,665 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2018-01-06", + "endpointPrefix":"autoscaling", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"AWS Auto Scaling Plans", + "serviceId":"Auto Scaling Plans", + "signatureVersion":"v4", + "signingName":"autoscaling-plans", + "targetPrefix":"AnyScaleScalingPlannerFrontendService", + "uid":"autoscaling-plans-2018-01-06" + }, + "operations":{ + "CreateScalingPlan":{ + "name":"CreateScalingPlan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"CreateScalingPlanRequest"}, + "output":{"shape":"CreateScalingPlanResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"LimitExceededException"}, + {"shape":"ConcurrentUpdateException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Creates a scaling plan.

A scaling plan contains a set of instructions used to configure dynamic scaling for the scalable resources in your application. AWS Auto Scaling creates target tracking scaling policies based on the scaling instructions in your scaling plan.

" + }, + "DeleteScalingPlan":{ + "name":"DeleteScalingPlan", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteScalingPlanRequest"}, + "output":{"shape":"DeleteScalingPlanResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"ObjectNotFoundException"}, + {"shape":"ConcurrentUpdateException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Deletes the specified scaling plan.

" + }, + "DescribeScalingPlanResources":{ + "name":"DescribeScalingPlanResources", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScalingPlanResourcesRequest"}, + "output":{"shape":"DescribeScalingPlanResourcesResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ConcurrentUpdateException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Describes the scalable resources in the specified scaling plan.

" + }, + "DescribeScalingPlans":{ + "name":"DescribeScalingPlans", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DescribeScalingPlansRequest"}, + "output":{"shape":"DescribeScalingPlansResponse"}, + "errors":[ + {"shape":"ValidationException"}, + {"shape":"InvalidNextTokenException"}, + {"shape":"ConcurrentUpdateException"}, + {"shape":"InternalServiceException"} + ], + "documentation":"

Describes the specified scaling plans or all of your scaling plans.

" + } + }, + "shapes":{ + "ApplicationSource":{ + "type":"structure", + "members":{ + "CloudFormationStackARN":{ + "shape":"XmlString", + "documentation":"

The Amazon Resource Name (ARN) of a CloudFormation stack.

" + } + }, + "documentation":"

Represents an application source.

" + }, + "ApplicationSources":{ + "type":"list", + "member":{"shape":"ApplicationSource"} + }, + "ConcurrentUpdateException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Concurrent updates caused an exception, for example, if you request an update to a scaling plan that already has a pending update.

", + "exception":true + }, + "Cooldown":{"type":"integer"}, + "CreateScalingPlanRequest":{ + "type":"structure", + "required":[ + "ScalingPlanName", + "ApplicationSource", + "ScalingInstructions" + ], + "members":{ + "ScalingPlanName":{ + "shape":"ScalingPlanName", + "documentation":"

The name of the scaling plan.

" + }, + "ApplicationSource":{ + "shape":"ApplicationSource", + "documentation":"

The source for the application.

" + }, + "ScalingInstructions":{ + "shape":"ScalingInstructions", + "documentation":"

The scaling instructions.

" + } + } + }, + "CreateScalingPlanResponse":{ + "type":"structure", + "required":["ScalingPlanVersion"], + "members":{ + "ScalingPlanVersion":{ + "shape":"ScalingPlanVersion", + "documentation":"

The version of the scaling plan. This value is always 1.

" + } + } + }, + "CustomizedScalingMetricSpecification":{ + "type":"structure", + "required":[ + "MetricName", + "Namespace", + "Statistic" + ], + "members":{ + "MetricName":{ + "shape":"MetricName", + "documentation":"

The name of the metric.

" + }, + "Namespace":{ + "shape":"MetricNamespace", + "documentation":"

The namespace of the metric.

" + }, + "Dimensions":{ + "shape":"MetricDimensions", + "documentation":"

The dimensions of the metric.

" + }, + "Statistic":{ + "shape":"MetricStatistic", + "documentation":"

The statistic of the metric.

" + }, + "Unit":{ + "shape":"MetricUnit", + "documentation":"

The unit of the metric.

" + } + }, + "documentation":"

Represents a customized metric for a target tracking policy.

" + }, + "DeleteScalingPlanRequest":{ + "type":"structure", + "required":[ + "ScalingPlanName", + "ScalingPlanVersion" + ], + "members":{ + "ScalingPlanName":{ + "shape":"ScalingPlanName", + "documentation":"

The name of the scaling plan.

" + }, + "ScalingPlanVersion":{ + "shape":"ScalingPlanVersion", + "documentation":"

The version of the scaling plan.

" + } + } + }, + "DeleteScalingPlanResponse":{ + "type":"structure", + "members":{ + } + }, + "DescribeScalingPlanResourcesRequest":{ + "type":"structure", + "required":[ + "ScalingPlanName", + "ScalingPlanVersion" + ], + "members":{ + "ScalingPlanName":{ + "shape":"ScalingPlanName", + "documentation":"

The name of the scaling plan.

" + }, + "ScalingPlanVersion":{ + "shape":"ScalingPlanVersion", + "documentation":"

The version of the scaling plan.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of scalable resources to return. This value can be between 1 and 50. The default value is 50.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results.

" + } + } + }, + "DescribeScalingPlanResourcesResponse":{ + "type":"structure", + "members":{ + "ScalingPlanResources":{ + "shape":"ScalingPlanResources", + "documentation":"

Information about the scalable resources.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token required to get the next set of results. This value is null if there are no more results to return.

" + } + } + }, + "DescribeScalingPlansRequest":{ + "type":"structure", + "members":{ + "ScalingPlanNames":{ + "shape":"ScalingPlanNames", + "documentation":"

The names of the scaling plans (up to 10). If you specify application sources, you cannot specify scaling plan names.

" + }, + "ScalingPlanVersion":{ + "shape":"ScalingPlanVersion", + "documentation":"

The version of the scaling plan. If you specify a scaling plan version, you must also specify a scaling plan name.

" + }, + "ApplicationSources":{ + "shape":"ApplicationSources", + "documentation":"

The sources for the applications (up to 10). If you specify scaling plan names, you cannot specify application sources.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of scalable resources to return. This value can be between 1 and 50. The default value is 50.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token for the next set of results.

" + } + } + }, + "DescribeScalingPlansResponse":{ + "type":"structure", + "members":{ + "ScalingPlans":{ + "shape":"ScalingPlans", + "documentation":"

Information about the scaling plans.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The token required to get the next set of results. This value is null if there are no more results to return.

" + } + } + }, + "DisableScaleIn":{"type":"boolean"}, + "ErrorMessage":{"type":"string"}, + "InternalServiceException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The service encountered an internal error.

", + "exception":true + }, + "InvalidNextTokenException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The token provided is not valid.

", + "exception":true + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

Your account exceeded a limit. This exception is thrown when a per-account resource limit is exceeded.

", + "exception":true + }, + "MaxResults":{"type":"integer"}, + "MetricDimension":{ + "type":"structure", + "required":[ + "Name", + "Value" + ], + "members":{ + "Name":{ + "shape":"MetricDimensionName", + "documentation":"

The name of the dimension.

" + }, + "Value":{ + "shape":"MetricDimensionValue", + "documentation":"

The value of the dimension.

" + } + }, + "documentation":"

Represents a dimension for a customized metric.

" + }, + "MetricDimensionName":{"type":"string"}, + "MetricDimensionValue":{"type":"string"}, + "MetricDimensions":{ + "type":"list", + "member":{"shape":"MetricDimension"} + }, + "MetricName":{"type":"string"}, + "MetricNamespace":{"type":"string"}, + "MetricScale":{"type":"double"}, + "MetricStatistic":{ + "type":"string", + "enum":[ + "Average", + "Minimum", + "Maximum", + "SampleCount", + "Sum" + ] + }, + "MetricUnit":{"type":"string"}, + "NextToken":{"type":"string"}, + "ObjectNotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

The specified object could not be found.

", + "exception":true + }, + "PolicyName":{ + "type":"string", + "max":256, + "min":1, + "pattern":"\\p{Print}+" + }, + "PolicyType":{ + "type":"string", + "enum":["TargetTrackingScaling"] + }, + "PredefinedScalingMetricSpecification":{ + "type":"structure", + "required":["PredefinedScalingMetricType"], + "members":{ + "PredefinedScalingMetricType":{ + "shape":"ScalingMetricType", + "documentation":"

The metric type. The ALBRequestCountPerTarget metric type applies only to Auto Scaling groups, Sport Fleet requests, and ECS services.

" + }, + "ResourceLabel":{ + "shape":"ResourceLabel", + "documentation":"

Identifies the resource associated with the metric type. You can't specify a resource label unless the metric type is ALBRequestCountPerTarget and there is a target group attached to the Auto Scaling group, Spot Fleet request, or ECS service.

The format is app/<load-balancer-name>/<load-balancer-id>/targetgroup/<target-group-name>/<target-group-id>, where:

" + } + }, + "documentation":"

Represents a predefined metric for a target tracking policy.

" + }, + "ResourceCapacity":{"type":"integer"}, + "ResourceIdMaxLen1600":{ + "type":"string", + "max":1600, + "min":1, + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + }, + "ResourceLabel":{ + "type":"string", + "max":1023, + "min":1 + }, + "ScalableDimension":{ + "type":"string", + "enum":[ + "autoscaling:autoScalingGroup:DesiredCapacity", + "ecs:service:DesiredCount", + "ec2:spot-fleet-request:TargetCapacity", + "rds:cluster:ReadReplicaCount", + "dynamodb:table:ReadCapacityUnits", + "dynamodb:table:WriteCapacityUnits", + "dynamodb:index:ReadCapacityUnits", + "dynamodb:index:WriteCapacityUnits" + ] + }, + "ScalingInstruction":{ + "type":"structure", + "required":[ + "ServiceNamespace", + "ResourceId", + "ScalableDimension", + "MinCapacity", + "MaxCapacity", + "TargetTrackingConfigurations" + ], + "members":{ + "ServiceNamespace":{ + "shape":"ServiceNamespace", + "documentation":"

The namespace of the AWS service.

" + }, + "ResourceId":{ + "shape":"ResourceIdMaxLen1600", + "documentation":"

The ID of the resource. This string consists of the resource type and unique identifier.

" + }, + "ScalableDimension":{ + "shape":"ScalableDimension", + "documentation":"

The scalable dimension associated with the resource.

" + }, + "MinCapacity":{ + "shape":"ResourceCapacity", + "documentation":"

The minimum value to scale to in response to a scale in event.

" + }, + "MaxCapacity":{ + "shape":"ResourceCapacity", + "documentation":"

The maximum value to scale to in response to a scale out event.

" + }, + "TargetTrackingConfigurations":{ + "shape":"TargetTrackingConfigurations", + "documentation":"

The target tracking scaling policies (up to 10).

" + } + }, + "documentation":"

Specifies the scaling configuration for a scalable resource.

" + }, + "ScalingInstructions":{ + "type":"list", + "member":{"shape":"ScalingInstruction"} + }, + "ScalingMetricType":{ + "type":"string", + "enum":[ + "ASGAverageCPUUtilization", + "ASGAverageNetworkIn", + "ASGAverageNetworkOut", + "DynamoDBReadCapacityUtilization", + "DynamoDBWriteCapacityUtilization", + "ECSServiceAverageCPUUtilization", + "ECSServiceAverageMemoryUtilization", + "ALBRequestCountPerTarget", + "RDSReaderAverageCPUUtilization", + "RDSReaderAverageDatabaseConnections", + "EC2SpotFleetRequestAverageCPUUtilization", + "EC2SpotFleetRequestAverageNetworkIn", + "EC2SpotFleetRequestAverageNetworkOut" + ] + }, + "ScalingPlan":{ + "type":"structure", + "required":[ + "ScalingPlanName", + "ScalingPlanVersion", + "ApplicationSource", + "ScalingInstructions", + "StatusCode" + ], + "members":{ + "ScalingPlanName":{ + "shape":"ScalingPlanName", + "documentation":"

The name of the scaling plan.

" + }, + "ScalingPlanVersion":{ + "shape":"ScalingPlanVersion", + "documentation":"

The version of the scaling plan.

" + }, + "ApplicationSource":{ + "shape":"ApplicationSource", + "documentation":"

The application source.

" + }, + "ScalingInstructions":{ + "shape":"ScalingInstructions", + "documentation":"

The scaling instructions.

" + }, + "StatusCode":{ + "shape":"ScalingPlanStatusCode", + "documentation":"

The status of the scaling plan.

" + }, + "StatusMessage":{ + "shape":"XmlString", + "documentation":"

A simple message about the current status of the scaling plan.

" + }, + "CreationTime":{ + "shape":"TimestampType", + "documentation":"

The Unix timestamp when the scaling plan was created.

" + } + }, + "documentation":"

Represents a scaling plan.

" + }, + "ScalingPlanName":{ + "type":"string", + "max":128, + "min":1, + "pattern":"[\\p{Print}&&[^|]]+" + }, + "ScalingPlanNames":{ + "type":"list", + "member":{"shape":"ScalingPlanName"} + }, + "ScalingPlanResource":{ + "type":"structure", + "required":[ + "ScalingPlanName", + "ScalingPlanVersion", + "ServiceNamespace", + "ResourceId", + "ScalableDimension", + "ScalingStatusCode" + ], + "members":{ + "ScalingPlanName":{ + "shape":"ScalingPlanName", + "documentation":"

The name of the scaling plan.

" + }, + "ScalingPlanVersion":{ + "shape":"ScalingPlanVersion", + "documentation":"

The version of the scaling plan.

" + }, + "ServiceNamespace":{ + "shape":"ServiceNamespace", + "documentation":"

The namespace of the AWS service.

" + }, + "ResourceId":{ + "shape":"ResourceIdMaxLen1600", + "documentation":"

The ID of the resource. This string consists of the resource type and unique identifier.

" + }, + "ScalableDimension":{ + "shape":"ScalableDimension", + "documentation":"

The scalable dimension for the resource.

" + }, + "ScalingPolicies":{ + "shape":"ScalingPolicies", + "documentation":"

The scaling policies.

" + }, + "ScalingStatusCode":{ + "shape":"ScalingStatusCode", + "documentation":"

The scaling status of the resource.

" + }, + "ScalingStatusMessage":{ + "shape":"XmlString", + "documentation":"

A simple message about the current scaling status of the resource.

" + } + }, + "documentation":"

Represents a scalable resource.

" + }, + "ScalingPlanResources":{ + "type":"list", + "member":{"shape":"ScalingPlanResource"} + }, + "ScalingPlanStatusCode":{ + "type":"string", + "enum":[ + "Active", + "ActiveWithProblems", + "CreationInProgress", + "CreationFailed", + "DeletionInProgress", + "DeletionFailed" + ] + }, + "ScalingPlanVersion":{"type":"long"}, + "ScalingPlans":{ + "type":"list", + "member":{"shape":"ScalingPlan"} + }, + "ScalingPolicies":{ + "type":"list", + "member":{"shape":"ScalingPolicy"} + }, + "ScalingPolicy":{ + "type":"structure", + "required":[ + "PolicyName", + "PolicyType" + ], + "members":{ + "PolicyName":{ + "shape":"PolicyName", + "documentation":"

The name of the scaling policy.

" + }, + "PolicyType":{ + "shape":"PolicyType", + "documentation":"

The type of scaling policy.

" + }, + "TargetTrackingConfiguration":{ + "shape":"TargetTrackingConfiguration", + "documentation":"

The target tracking scaling policy.

" + } + }, + "documentation":"

Represents a scaling policy.

" + }, + "ScalingStatusCode":{ + "type":"string", + "enum":[ + "Inactive", + "PartiallyActive", + "Active" + ] + }, + "ServiceNamespace":{ + "type":"string", + "enum":[ + "autoscaling", + "ecs", + "ec2", + "rds", + "dynamodb" + ] + }, + "TargetTrackingConfiguration":{ + "type":"structure", + "required":["TargetValue"], + "members":{ + "PredefinedScalingMetricSpecification":{ + "shape":"PredefinedScalingMetricSpecification", + "documentation":"

A predefined metric.

" + }, + "CustomizedScalingMetricSpecification":{ + "shape":"CustomizedScalingMetricSpecification", + "documentation":"

A customized metric.

" + }, + "TargetValue":{ + "shape":"MetricScale", + "documentation":"

The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 (Base 10) or 2e-360 to 2e360 (Base 2).

" + }, + "DisableScaleIn":{ + "shape":"DisableScaleIn", + "documentation":"

Indicates whether scale in by the target tracking policy is disabled. If the value is true, scale in is disabled and the target tracking policy won't remove capacity from the scalable resource. Otherwise, scale in is enabled and the target tracking policy can remove capacity from the scalable resource. The default value is false.

" + }, + "ScaleOutCooldown":{ + "shape":"Cooldown", + "documentation":"

The amount of time, in seconds, after a scale out activity completes before another scale out activity can start. This value is not used if the scalable resource is an Auto Scaling group.

While the cooldown period is in effect, the capacity that has been added by the previous scale out event that initiated the cooldown is calculated as part of the desired capacity for the next scale out. The intention is to continuously (but not excessively) scale out.

" + }, + "ScaleInCooldown":{ + "shape":"Cooldown", + "documentation":"

The amount of time, in seconds, after a scale in activity completes before another scale in activity can start. This value is not used if the scalable resource is an Auto Scaling group.

The cooldown period is used to block subsequent scale in requests until it has expired. The intention is to scale in conservatively to protect your application's availability. However, if another alarm triggers a scale out policy during the cooldown period after a scale-in, AWS Auto Scaling scales out your scalable target immediately.

" + }, + "EstimatedInstanceWarmup":{ + "shape":"Cooldown", + "documentation":"

The estimated time, in seconds, until a newly launched instance can contribute to the CloudWatch metrics. This value is used only if the resource is an Auto Scaling group.

" + } + }, + "documentation":"

Represents a target tracking scaling policy.

" + }, + "TargetTrackingConfigurations":{ + "type":"list", + "member":{"shape":"TargetTrackingConfiguration"} + }, + "TimestampType":{"type":"timestamp"}, + "ValidationException":{ + "type":"structure", + "members":{ + "Message":{"shape":"ErrorMessage"} + }, + "documentation":"

An exception was thrown for a validation issue. Review the parameters provided.

", + "exception":true + }, + "XmlString":{ + "type":"string", + "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + } + }, + "documentation":"

Use AWS Auto Scaling to quickly discover all the scalable AWS resources for your application and configure dynamic scaling for your scalable resources.

To get started, create a scaling plan with a set of instructions used to configure dynamic scaling for the scalable resources in your application. AWS Auto Scaling creates target tracking scaling policies for the scalable resources in your scaling plan. Target tracking scaling policies adjust the capacity of your scalable resource as required to maintain resource utilization at the target value that you specified.

" +} diff --git a/botocore/data/budgets/2016-10-20/service-2.json b/botocore/data/budgets/2016-10-20/service-2.json index 0a5cbec6..c5aa7d7c 100644 --- a/botocore/data/budgets/2016-10-20/service-2.json +++ b/botocore/data/budgets/2016-10-20/service-2.json @@ -331,6 +331,14 @@ "IncludeSupport":{ "shape":"NullableBoolean", "documentation":"A boolean value whether to include support costs in the cost budget." + }, + "IncludeDiscount":{ + "shape":"NullableBoolean", + "documentation":"A boolean value whether to include discounts in the cost budget." + }, + "UseAmortized":{ + "shape":"NullableBoolean", + "documentation":"A boolean value whether to include amortized costs in the cost budget." } }, "documentation":"This includes the options for getting the cost of a budget." diff --git a/botocore/data/cloud9/2017-09-23/paginators-1.json b/botocore/data/cloud9/2017-09-23/paginators-1.json index ea142457..1c4c2ff5 100644 --- a/botocore/data/cloud9/2017-09-23/paginators-1.json +++ b/botocore/data/cloud9/2017-09-23/paginators-1.json @@ -1,3 +1,16 @@ { - "pagination": {} + "pagination": { + "DescribeEnvironmentMemberships": { + "result_key": "memberships", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "ListEnvironments": { + "result_key": "environmentIds", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + } + } } diff --git a/botocore/data/clouddirectory/2016-05-10/paginators-1.json b/botocore/data/clouddirectory/2016-05-10/paginators-1.json index ea142457..22cc439e 100644 --- a/botocore/data/clouddirectory/2016-05-10/paginators-1.json +++ b/botocore/data/clouddirectory/2016-05-10/paginators-1.json @@ -1,3 +1,100 @@ { - "pagination": {} + "pagination": { + "ListObjectParentPaths": { + "result_key": "PathToObjectIdentifiersList", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListFacetNames": { + "result_key": "FacetNames", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListPublishedSchemaArns": { + "result_key": "SchemaArns", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDirectories": { + "result_key": "Directories", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListDevelopmentSchemaArns": { + "result_key": "SchemaArns", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTypedLinkFacetNames": { + "result_key": "FacetNames", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListIndex": { + "result_key": "IndexAttachments", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListFacetAttributes": { + "result_key": "Attributes", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListObjectPolicies": { + "result_key": "AttachedPolicyIds", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTagsForResource": { + "result_key": "Tags", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListAttachedIndices": { + "result_key": "IndexAttachments", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "LookupPolicy": { + "result_key": "PolicyToPathList", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListPolicyAttachments": { + "result_key": "ObjectIdentifiers", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListObjectAttributes": { + "result_key": "Attributes", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListAppliedSchemaArns": { + "result_key": "SchemaArns", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTypedLinkFacetAttributes": { + "result_key": "Attributes", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + } + } } diff --git a/botocore/data/cloudhsmv2/2017-04-28/paginators-1.json b/botocore/data/cloudhsmv2/2017-04-28/paginators-1.json index ea142457..19c403f0 100644 --- a/botocore/data/cloudhsmv2/2017-04-28/paginators-1.json +++ b/botocore/data/cloudhsmv2/2017-04-28/paginators-1.json @@ -1,3 +1,22 @@ { - "pagination": {} + "pagination": { + "DescribeBackups": { + "result_key": "Backups", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "DescribeClusters": { + "result_key": "Clusters", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTags": { + "result_key": "TagList", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + } + } } diff --git a/botocore/data/cloudwatch/2010-08-01/paginators-1.json b/botocore/data/cloudwatch/2010-08-01/paginators-1.json index 7e9f823f..b0bf527f 100644 --- a/botocore/data/cloudwatch/2010-08-01/paginators-1.json +++ b/botocore/data/cloudwatch/2010-08-01/paginators-1.json @@ -12,6 +12,11 @@ "limit_key": "MaxRecords", "result_key": "MetricAlarms" }, + "ListDashboards": { + "input_token": "NextToken", + "output_token": "NextToken", + "result_key": "DashboardEntries" + }, "ListMetrics": { "input_token": "NextToken", "output_token": "NextToken", diff --git a/botocore/data/codebuild/2016-10-06/service-2.json b/botocore/data/codebuild/2016-10-06/service-2.json index 06108b04..d2d29133 100644 --- a/botocore/data/codebuild/2016-10-06/service-2.json +++ b/botocore/data/codebuild/2016-10-06/service-2.json @@ -79,7 +79,7 @@ {"shape":"ResourceAlreadyExistsException"}, {"shape":"ResourceNotFoundException"} ], - "documentation":"

For an existing AWS CodeBuild build project that has its source code stored in a GitHub repository, enables AWS CodeBuild to begin automatically rebuilding the source code every time a code change is pushed to the repository.

If you enable webhooks for an AWS CodeBuild project, and the project is used as a build step in AWS CodePipeline, then two identical builds will be created for each commit. One build is triggered through webhooks, and one through AWS CodePipeline. Because billing is on a per-build basis, you will be billed for both builds. Therefore, if you are using AWS CodePipeline, we recommend that you disable webhooks in CodeBuild. In the AWS CodeBuild console, clear the Webhook box. For more information, see step 9 in Change a Build Project’s Settings.

" + "documentation":"

For an existing AWS CodeBuild build project that has its source code stored in a GitHub repository, enables AWS CodeBuild to begin automatically rebuilding the source code every time a code change is pushed to the repository.

If you enable webhooks for an AWS CodeBuild project, and the project is used as a build step in AWS CodePipeline, then two identical builds will be created for each commit. One build is triggered through webhooks, and one through AWS CodePipeline. Because billing is on a per-build basis, you will be billed for both builds. Therefore, if you are using AWS CodePipeline, we recommend that you disable webhooks in CodeBuild. In the AWS CodeBuild console, clear the Webhook box. For more information, see step 9 in Change a Build Project's Settings.

" }, "DeleteProject":{ "name":"DeleteProject", @@ -721,6 +721,10 @@ "type":"list", "member":{"shape":"EnvironmentVariable"} }, + "GitCloneDepth":{ + "type":"integer", + "min":0 + }, "ImageVersions":{ "type":"list", "member":{"shape":"String"} @@ -1099,6 +1103,10 @@ "privilegedMode":{ "shape":"WrapperBoolean", "documentation":"

If set to true, enables running the Docker daemon inside a Docker container; otherwise, false or not specified (the default). This value must be set to true only if this build project will be used to build Docker images, and the specified build environment image is not one provided by AWS CodeBuild with Docker support. Otherwise, all associated builds that attempt to interact with the Docker daemon will fail. Note that you must also start the Docker daemon so that your builds can interact with it as needed. One way to do this is to initialize the Docker daemon in the install phase of your build spec by running the following build commands. (Do not run the following build commands if the specified build environment image is provided by AWS CodeBuild with Docker support.)

- nohup /usr/local/bin/dockerd --host=unix:///var/run/docker.sock --host=tcp://0.0.0.0:2375 --storage-driver=overlay& - timeout -t 15 sh -c \"until docker info; do echo .; sleep 1; done\"

" + }, + "certificate":{ + "shape":"String", + "documentation":"

The certificate to use with this build project.

" } }, "documentation":"

Information about the build environment of the build project.

" @@ -1135,6 +1143,10 @@ "shape":"String", "documentation":"

Information about the location of the source code to be built. Valid values include:

" }, + "gitCloneDepth":{ + "shape":"GitCloneDepth", + "documentation":"

Information about the git clone depth for the build project.

" + }, "buildspec":{ "shape":"String", "documentation":"

The build spec declaration to use for the builds in this build project.

If this value is not specified, a build spec must be included along with the source code to be built.

" @@ -1142,6 +1154,10 @@ "auth":{ "shape":"SourceAuth", "documentation":"

Information about the authorization settings for AWS CodeBuild to access the source code to be built.

This information is for the AWS CodeBuild console's use only. Your code should not get or set this information directly (unless the build project's source type value is BITBUCKET or GITHUB).

" + }, + "insecureSsl":{ + "shape":"WrapperBoolean", + "documentation":"

Enable this flag to ignore SSL warnings while connecting to the project source code.

" } }, "documentation":"

Information about the build input source code for the build project.

" @@ -1202,7 +1218,8 @@ "CODEPIPELINE", "GITHUB", "S3", - "BITBUCKET" + "BITBUCKET", + "GITHUB_ENTERPRISE" ] }, "StartBuildInput":{ @@ -1225,6 +1242,10 @@ "shape":"EnvironmentVariables", "documentation":"

A set of environment variables that overrides, for this build only, the latest ones already defined in the build project.

" }, + "gitCloneDepthOverride":{ + "shape":"GitCloneDepth", + "documentation":"

The user-defined depth of history, with a minimum value of 0, that overrides, for this build only, any previous depth of history defined in the build project.

" + }, "buildspecOverride":{ "shape":"String", "documentation":"

A build spec declaration that overrides, for this build only, the latest one already defined in the build project.

" @@ -1399,6 +1420,14 @@ "url":{ "shape":"NonEmptyString", "documentation":"

The URL to the webhook.

" + }, + "payloadUrl":{ + "shape":"NonEmptyString", + "documentation":"

This is the server endpoint that will receive the webhook payload.

" + }, + "secret":{ + "shape":"NonEmptyString", + "documentation":"

Use this secret while creating a webhook in GitHub for Enterprise. The secret allows webhook requests sent by GitHub for Enterprise to be authenticated by AWS CodeBuild.

" } }, "documentation":"

Information about a webhook in GitHub that connects repository events to a build project in AWS CodeBuild.

" diff --git a/botocore/data/codecommit/2015-04-13/paginators-1.json b/botocore/data/codecommit/2015-04-13/paginators-1.json index dce8bf1a..b3310fca 100644 --- a/botocore/data/codecommit/2015-04-13/paginators-1.json +++ b/botocore/data/codecommit/2015-04-13/paginators-1.json @@ -9,6 +9,36 @@ "input_token": "nextToken", "output_token": "nextToken", "result_key": "repositories" + }, + "GetCommentsForComparedCommit": { + "result_key": "commentsForComparedCommitData", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "DescribePullRequestEvents": { + "result_key": "pullRequestEvents", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "GetCommentsForPullRequest": { + "result_key": "commentsForPullRequestData", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "ListPullRequests": { + "result_key": "pullRequestIds", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "GetDifferences": { + "result_key": "differences", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/botocore/data/comprehend/2017-11-27/paginators-1.json b/botocore/data/comprehend/2017-11-27/paginators-1.json index ea142457..2d021747 100644 --- a/botocore/data/comprehend/2017-11-27/paginators-1.json +++ b/botocore/data/comprehend/2017-11-27/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "ListTopicsDetectionJobs": { + "result_key": "TopicsDetectionJobPropertiesList", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + } + } } diff --git a/botocore/data/devicefarm/2015-06-23/service-2.json b/botocore/data/devicefarm/2015-06-23/service-2.json index 7a7f1318..32b18f89 100644 --- a/botocore/data/devicefarm/2015-06-23/service-2.json +++ b/botocore/data/devicefarm/2015-06-23/service-2.json @@ -474,7 +474,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Gets information about jobs.

" + "documentation":"

Gets information about jobs for a given test run.

" }, "ListNetworkProfiles":{ "name":"ListNetworkProfiles", @@ -621,7 +621,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Gets information about suites.

" + "documentation":"

Gets information about test suites for a given job.

" }, "ListTests":{ "name":"ListTests", @@ -637,7 +637,7 @@ {"shape":"LimitExceededException"}, {"shape":"ServiceAccountException"} ], - "documentation":"

Gets information about tests.

" + "documentation":"

Gets information about tests in a given test suite.

" }, "ListUniqueProblems":{ "name":"ListUniqueProblems", @@ -1169,17 +1169,29 @@ "shape":"Boolean", "documentation":"

Set to true if you want to access devices remotely for debugging in your remote access session.

" }, + "remoteRecordEnabled":{ + "shape":"Boolean", + "documentation":"

Set to true to enable remote recording for the remote access session.

" + }, + "remoteRecordAppArn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) for the app to be recorded in the remote access session.

" + }, "name":{ "shape":"Name", "documentation":"

The name of the remote access session that you wish to create.

" }, "clientId":{ "shape":"ClientId", - "documentation":"

Unique identifier for the client. If you want access to multiple devices on the same client, you should pass the same clientId value in each call to CreateRemoteAccessSession. This is required only if remoteDebugEnabled is set to true true.

" + "documentation":"

Unique identifier for the client. If you want access to multiple devices on the same client, you should pass the same clientId value in each call to CreateRemoteAccessSession. This is required only if remoteDebugEnabled is set to true.

" }, "configuration":{ "shape":"CreateRemoteAccessSessionConfiguration", "documentation":"

The configuration information for the remote access session request.

" + }, + "interactionMode":{ + "shape":"InteractionMode", + "documentation":"

The interaction mode of the remote access session. Valid values are:

" } }, "documentation":"

Creates and submits a request to start a remote access session.

" @@ -1373,6 +1385,10 @@ "shape":"String", "documentation":"

The device's model name.

" }, + "modelId":{ + "shape":"String", + "documentation":"

The device's model ID.

" + }, "formFactor":{ "shape":"DeviceFormFactor", "documentation":"

The device's form factor.

Allowed values include:

" @@ -1948,6 +1964,16 @@ "documentation":"

Represents the response from the server after AWS Device Farm makes a request to install to a remote access session.

" }, "Integer":{"type":"integer"}, + "InteractionMode":{ + "type":"string", + "enum":[ + "INTERACTIVE", + "NO_VIDEO", + "VIDEO_ONLY" + ], + "max":64, + "min":0 + }, "IosPaths":{ "type":"list", "member":{"shape":"String"} @@ -2125,7 +2151,7 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

The jobs' ARNs.

" + "documentation":"

The run's Amazon Resource Name (ARN).

" }, "nextToken":{ "shape":"PaginationToken", @@ -2370,7 +2396,7 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

The suites' ARNs.

" + "documentation":"

The job's Amazon Resource Name (ARN).

" }, "nextToken":{ "shape":"PaginationToken", @@ -2399,7 +2425,7 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

The tests' ARNs.

" + "documentation":"

The test suite's Amazon Resource Name (ARN).

" }, "nextToken":{ "shape":"PaginationToken", @@ -2953,6 +2979,14 @@ "shape":"Boolean", "documentation":"

This flag is set to true if remote debugging is enabled for the remote access session.

" }, + "remoteRecordEnabled":{ + "shape":"Boolean", + "documentation":"

This flag is set to true if remote recording is enabled for the remote access session.

" + }, + "remoteRecordAppArn":{ + "shape":"AmazonResourceName", + "documentation":"

The Amazon Resource Name (ARN) for the app to be recorded in the remote access session.

" + }, "hostAddress":{ "shape":"HostAddress", "documentation":"

IP address of the EC2 host where you need to connect to remotely debug devices. Only returned if remote debugging is enabled for the remote access session.

" @@ -2976,6 +3010,10 @@ "deviceUdid":{ "shape":"String", "documentation":"

Unique device identifier for the remote device. Only returned if remote debugging is enabled for the remote access session.

" + }, + "interactionMode":{ + "shape":"InteractionMode", + "documentation":"

The interaction mode of the remote access session. Valid values are:

" } }, "documentation":"

Represents information about the remote access session.

" @@ -3130,9 +3168,45 @@ "shape":"ExecutionResultCode", "documentation":"

Supporting field for the result field. Set only if result is SKIPPED. PARSING_FAILED if the result is skipped because of test package parsing failure.

" }, + "seed":{ + "shape":"Integer", + "documentation":"

For fuzz tests, this is a seed to use for randomizing the UI fuzz test. Using the same seed value between tests ensures identical event sequences.

" + }, + "appUpload":{ + "shape":"AmazonResourceName", + "documentation":"

An app to upload or that has been uploaded.

" + }, + "eventCount":{ + "shape":"Integer", + "documentation":"

For fuzz tests, this is the number of events, between 1 and 10000, that the UI fuzz test should perform.

" + }, + "jobTimeoutMinutes":{ + "shape":"JobTimeoutMinutes", + "documentation":"

The number of minutes the job will execute before it times out.

" + }, + "devicePoolArn":{ + "shape":"AmazonResourceName", + "documentation":"

The ARN of the device pool for the run.

" + }, + "locale":{ + "shape":"String", + "documentation":"

Information about the locale that is used for the run.

" + }, + "radios":{ + "shape":"Radios", + "documentation":"

Information about the radio states for the run.

" + }, + "location":{ + "shape":"Location", + "documentation":"

Information about the location that is used for the run.

" + }, "customerArtifactPaths":{ "shape":"CustomerArtifactPaths", "documentation":"

Output CustomerArtifactPaths object for the test run.

" + }, + "webUrl":{ + "shape":"String", + "documentation":"

A pre-signed Amazon S3 URL that can be used with a corresponding GET request to download the symbol file for the run.

" } }, "documentation":"

Represents a test run on a set of devices with a given app package, test parameters, etc.

" @@ -3468,6 +3542,7 @@ "enum":[ "BUILTIN_FUZZ", "BUILTIN_EXPLORER", + "WEB_PERFORMANCE_PROFILE", "APPIUM_JAVA_JUNIT", "APPIUM_JAVA_TESTNG", "APPIUM_PYTHON", @@ -3479,7 +3554,9 @@ "UIAUTOMATION", "UIAUTOMATOR", "XCTEST", - "XCTEST_UI" + "XCTEST_UI", + "REMOTE_ACCESS_RECORD", + "REMOTE_ACCESS_REPLAY" ] }, "Tests":{ @@ -3571,7 +3648,7 @@ "members":{ "arn":{ "shape":"AmazonResourceName", - "documentation":"

The Amazon Resource Name (ARN) of the project that you wish to update network profile settings.

" + "documentation":"

The Amazon Resource Name (ARN) of the project for which you want to update network profile settings.

" }, "name":{ "shape":"Name", diff --git a/botocore/data/dms/2016-01-01/paginators-1.json b/botocore/data/dms/2016-01-01/paginators-1.json index ea142457..be68c9ea 100644 --- a/botocore/data/dms/2016-01-01/paginators-1.json +++ b/botocore/data/dms/2016-01-01/paginators-1.json @@ -1,3 +1,82 @@ { - "pagination": {} + "pagination": { + "DescribeSchemas": { + "result_key": "Schemas", + "output_token": "Marker", + "input_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeCertificates": { + "result_key": "Certificates", + "output_token": "Marker", + "input_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeEndpoints": { + "result_key": "Endpoints", + "output_token": "Marker", + "input_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeEventSubscriptions": { + "result_key": "EventSubscriptionsList", + "output_token": "Marker", + "input_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeEndpointTypes": { + "result_key": "SupportedEndpointTypes", + "output_token": "Marker", + "input_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeReplicationInstances": { + "result_key": "ReplicationInstances", + "output_token": "Marker", + "input_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeTableStatistics": { + "result_key": "TableStatistics", + "output_token": "Marker", + "input_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeConnections": { + "result_key": "Connections", + "output_token": "Marker", + "input_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeReplicationTaskAssessmentResults": { + "result_key": "ReplicationTaskAssessmentResults", + "output_token": "Marker", + "input_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeEvents": { + "result_key": "Events", + "output_token": "Marker", + "input_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeOrderableReplicationInstances": { + "result_key": "OrderableReplicationInstances", + "output_token": "Marker", + "input_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeReplicationSubnetGroups": { + "result_key": "ReplicationSubnetGroups", + "output_token": "Marker", + "input_token": "Marker", + "limit_key": "MaxRecords" + }, + "DescribeReplicationTasks": { + "result_key": "ReplicationTasks", + "output_token": "Marker", + "input_token": "Marker", + "limit_key": "MaxRecords" + } + } } diff --git a/botocore/data/ds/2015-04-16/paginators-1.json b/botocore/data/ds/2015-04-16/paginators-1.json index ea142457..409e2250 100644 --- a/botocore/data/ds/2015-04-16/paginators-1.json +++ b/botocore/data/ds/2015-04-16/paginators-1.json @@ -1,3 +1,10 @@ { - "pagination": {} + "pagination": { + "DescribeDomainControllers": { + "result_key": "DomainControllers", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "Limit" + } + } } diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index 8dcde66b..302e3a02 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -650,7 +650,7 @@ }, "input":{"shape":"CreateVpcPeeringConnectionRequest"}, "output":{"shape":"CreateVpcPeeringConnectionResult"}, - "documentation":"

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another AWS account and can be in a different region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

" + "documentation":"

Requests a VPC peering connection between two VPCs: a requester VPC that you own and an accepter VPC with which to create the connection. The accepter VPC can belong to another AWS account and can be in a different region to the requester VPC. The requester VPC and accepter VPC cannot have overlapping CIDR blocks.

Limitations and rules apply to a VPC peering connection. For more information, see the limitations section in the VPC Peering Guide.

The owner of the accepter VPC must accept the peering request to activate the peering connection. The VPC peering connection request expires after 7 days, after which it cannot be accepted or rejected.

If you create a VPC peering connection request between VPCs with overlapping CIDR blocks, the VPC peering connection has a status of failed.

" }, "CreateVpnConnection":{ "name":"CreateVpnConnection", @@ -2231,7 +2231,7 @@ }, "input":{"shape":"ModifyVpcEndpointServicePermissionsRequest"}, "output":{"shape":"ModifyVpcEndpointServicePermissionsResult"}, - "documentation":"

Modifies the permissions for your VPC endpoint service. You can add or remove permissions for service consumers (IAM users, IAM roles, and AWS accounts) to discover your endpoint service.

" + "documentation":"

Modifies the permissions for your VPC endpoint service. You can add or remove permissions for service consumers (IAM users, IAM roles, and AWS accounts) to connect to your endpoint service.

" }, "ModifyVpcPeeringConnectionOptions":{ "name":"ModifyVpcPeeringConnectionOptions", @@ -5801,7 +5801,7 @@ }, "ServiceName":{ "shape":"String", - "documentation":"

The service name. To get a list of available services, use the DescribeVpcEndpointServices request.

" + "documentation":"

The service name. To get a list of available services, use the DescribeVpcEndpointServices request, or get the name from the service provider.

" }, "PolicyDocument":{ "shape":"String", @@ -6905,7 +6905,7 @@ "members":{ "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters. Filter names and values are case-sensitive.

", + "documentation":"

One or more filters. Filter names and values are case-sensitive.

", "locationName":"Filter" }, "PublicIps":{ @@ -8034,7 +8034,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "NextToken":{ @@ -9296,7 +9296,7 @@ }, "Filters":{ "shape":"FilterList", - "documentation":"

One or more filters.

", + "documentation":"

One or more filters.

", "locationName":"Filter" }, "MaxResults":{ @@ -13995,7 +13995,7 @@ "members":{ "State":{ "shape":"AttachmentStatus", - "documentation":"

The current state of the attachment.

", + "documentation":"

The current state of the attachment. For an Internet gateway, the state is available when attached to a VPC; otherwise, this value is not returned.

", "locationName":"state" }, "VpcId":{ @@ -19480,7 +19480,7 @@ }, "ImageId":{ "shape":"String", - "documentation":"

The ID of the AMI, which you can get by calling DescribeImages.

" + "documentation":"

The ID of the AMI, which you can get by calling DescribeImages. An AMI is required to launch an instance and must be specified here or in a launch template.

" }, "InstanceType":{ "shape":"InstanceType", @@ -22204,7 +22204,7 @@ }, "GroupName":{ "shape":"String", - "documentation":"

The name of the security group. In a request, use this parameter for a security group in EC2-Classic or a default VPC only. For a security group in a nondefault VPC, use the security group ID.

", + "documentation":"

The name of the security group. In a request, use this parameter for a security group in EC2-Classic or a default VPC only. For a security group in a nondefault VPC, use the security group ID.

For a referenced security group in another VPC, this value is not returned if the referenced security group is deleted.

", "locationName":"groupName" }, "PeeringStatus":{ @@ -22214,7 +22214,7 @@ }, "UserId":{ "shape":"String", - "documentation":"

The ID of an AWS account. For a referenced security group in another VPC, the account ID of the referenced security group is returned.

[EC2-Classic] Required when adding or removing rules that reference a security group in another AWS account.

", + "documentation":"

The ID of an AWS account.

For a referenced security group in another VPC, the account ID of the referenced security group is returned in the response. If the referenced security group is deleted, this value is not returned.

[EC2-Classic] Required when adding or removing rules that reference a security group in another AWS account.

", "locationName":"userId" }, "VpcId":{ @@ -23440,5 +23440,5 @@ ] } }, - "documentation":"Amazon Elastic Compute Cloud

Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity in the Amazon Web Services (AWS) cloud. Using Amazon EC2 eliminates your need to invest in hardware up front, so you can develop and deploy applications faster.

" + "documentation":"Amazon Elastic Compute Cloud

Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity in the AWS Cloud. Using Amazon EC2 eliminates your need to invest in hardware up front, so you can develop and deploy applications faster.

" } diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 30a69e44..07ad6683 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -180,6 +180,22 @@ "us-west-2" : { } } }, + "autoscaling-plans" : { + "defaults" : { + "credentialScope" : { + "service" : "autoscaling-plans" + }, + "hostname" : "autoscaling.{region}.amazonaws.com", + "protocols" : [ "http", "https" ] + }, + "endpoints" : { + "ap-southeast-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "batch" : { "endpoints" : { "ap-northeast-1" : { }, @@ -543,6 +559,7 @@ "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, + "eu-west-3" : { }, "sa-east-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -824,10 +841,13 @@ "firehose" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, "eu-central-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, + "us-west-1" : { }, "us-west-2" : { } } }, @@ -872,6 +892,7 @@ }, "glue" : { "endpoints" : { + "ap-northeast-1" : { }, "eu-west-1" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1246,6 +1267,7 @@ "endpoints" : { "eu-west-1" : { }, "us-east-1" : { }, + "us-east-2" : { }, "us-west-2" : { } } }, @@ -1386,6 +1408,7 @@ "ap-northeast-2" : { }, "ap-south-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-3" : { }, @@ -2078,6 +2101,11 @@ "us-gov-west-1" : { } } }, + "ecs" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "elasticache" : { "endpoints" : { "us-gov-west-1" : { } diff --git a/botocore/data/es/2015-01-01/paginators-1.json b/botocore/data/es/2015-01-01/paginators-1.json index ea142457..b6fefece 100644 --- a/botocore/data/es/2015-01-01/paginators-1.json +++ b/botocore/data/es/2015-01-01/paginators-1.json @@ -1,3 +1,16 @@ { - "pagination": {} + "pagination": { + "ListElasticsearchInstanceTypes": { + "result_key": "ElasticsearchInstanceTypes", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListElasticsearchVersions": { + "result_key": "ElasticsearchVersions", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + } + } } diff --git a/botocore/data/glue/2017-03-31/paginators-1.json b/botocore/data/glue/2017-03-31/paginators-1.json index ea142457..e90ffafe 100644 --- a/botocore/data/glue/2017-03-31/paginators-1.json +++ b/botocore/data/glue/2017-03-31/paginators-1.json @@ -1,3 +1,82 @@ { - "pagination": {} + "pagination": { + "GetJobs": { + "result_key": "Jobs", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "GetPartitions": { + "result_key": "Partitions", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "GetDatabases": { + "result_key": "DatabaseList", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "GetClassifiers": { + "result_key": "Classifiers", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "GetTableVersions": { + "result_key": "TableVersions", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "GetCrawlers": { + "result_key": "Crawlers", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "GetDevEndpoints": { + "result_key": "DevEndpoints", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "GetJobRuns": { + "result_key": "JobRuns", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "GetTriggers": { + "result_key": "Triggers", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "GetTables": { + "result_key": "TableList", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "GetUserDefinedFunctions": { + "result_key": "UserDefinedFunctions", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "GetCrawlerMetrics": { + "result_key": "CrawlerMetricsList", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "GetConnections": { + "result_key": "ConnectionList", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + } + } } diff --git a/botocore/data/glue/2017-03-31/service-2.json b/botocore/data/glue/2017-03-31/service-2.json index b6dd1bb5..a8681aec 100644 --- a/botocore/data/glue/2017-03-31/service-2.json +++ b/botocore/data/glue/2017-03-31/service-2.json @@ -75,6 +75,22 @@ ], "documentation":"

Deletes multiple tables at once.

" }, + "BatchDeleteTableVersion":{ + "name":"BatchDeleteTableVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"BatchDeleteTableVersionRequest"}, + "output":{"shape":"BatchDeleteTableVersionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Deletes a specified batch of versions of a table.

" + }, "BatchGetPartition":{ "name":"BatchGetPartition", "http":{ @@ -132,7 +148,8 @@ "errors":[ {"shape":"AlreadyExistsException"}, {"shape":"InvalidInputException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"ResourceNumberLimitExceededException"} ], "documentation":"

Creates a connection definition in the Data Catalog.

" }, @@ -291,7 +308,8 @@ {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, {"shape":"EntityNotFoundException"}, - {"shape":"OperationTimeoutException"} + {"shape":"OperationTimeoutException"}, + {"shape":"ResourceNumberLimitExceededException"} ], "documentation":"

Creates a new function definition in the Data Catalog.

" }, @@ -418,6 +436,22 @@ ], "documentation":"

Removes a table definition from the Data Catalog.

" }, + "DeleteTableVersion":{ + "name":"DeleteTableVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"DeleteTableVersionRequest"}, + "output":{"shape":"DeleteTableVersionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Deletes a specified version of a table.

" + }, "DeleteTrigger":{ "name":"DeleteTrigger", "http":{ @@ -780,6 +814,22 @@ ], "documentation":"

Retrieves the Table definition in a Data Catalog for a specified table.

" }, + "GetTableVersion":{ + "name":"GetTableVersion", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTableVersionRequest"}, + "output":{"shape":"GetTableVersionResponse"}, + "errors":[ + {"shape":"EntityNotFoundException"}, + {"shape":"InvalidInputException"}, + {"shape":"InternalServiceException"}, + {"shape":"OperationTimeoutException"} + ], + "documentation":"

Retrieves a specified version of a table.

" + }, "GetTableVersions":{ "name":"GetTableVersions", "http":{ @@ -1167,7 +1217,8 @@ {"shape":"InvalidInputException"}, {"shape":"InternalServiceException"}, {"shape":"OperationTimeoutException"}, - {"shape":"ConcurrentModificationException"} + {"shape":"ConcurrentModificationException"}, + {"shape":"ResourceNumberLimitExceededException"} ], "documentation":"

Updates a metadata table in the Data Catalog.

" }, @@ -1369,7 +1420,7 @@ }, "DatabaseName":{ "shape":"NameString", - "documentation":"

The name of the catalog database where the tables to delete reside.

" + "documentation":"

The name of the catalog database where the tables to delete reside. For Hive compatibility, this name is entirely lowercase.

" }, "TablesToDelete":{ "shape":"BatchDeleteTableNameList", @@ -1386,6 +1437,47 @@ } } }, + "BatchDeleteTableVersionList":{ + "type":"list", + "member":{"shape":"VersionString"}, + "max":100, + "min":0 + }, + "BatchDeleteTableVersionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "VersionIds" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table. For Hive compatibility, this name is entirely lowercase.

" + }, + "VersionIds":{ + "shape":"BatchDeleteTableVersionList", + "documentation":"

A list of the IDs of versions to be deleted.

" + } + } + }, + "BatchDeleteTableVersionResponse":{ + "type":"structure", + "members":{ + "Errors":{ + "shape":"TableVersionErrors", + "documentation":"

A list of errors encountered while trying to delete the specified table versions.

" + } + } + }, "BatchGetPartitionRequest":{ "type":"structure", "required":[ @@ -1508,6 +1600,7 @@ "member":{"shape":"BatchStopJobRunSuccessfulSubmission"} }, "Boolean":{"type":"boolean"}, + "BooleanNullable":{"type":"boolean"}, "BooleanValue":{"type":"boolean"}, "BoundedPartitionValueList":{ "type":"list", @@ -1798,6 +1891,11 @@ }, "ConnectionInput":{ "type":"structure", + "required":[ + "Name", + "ConnectionType", + "ConnectionProperties" + ], "members":{ "Name":{ "shape":"NameString", @@ -2424,7 +2522,7 @@ }, "DatabaseName":{ "shape":"NameString", - "documentation":"

The catalog database in which to create the new table.

" + "documentation":"

The catalog database in which to create the new table. For Hive compatibility, this name is entirely lowercase.

" }, "TableInput":{ "shape":"TableInput", @@ -2549,7 +2647,7 @@ "members":{ "Name":{ "shape":"NameString", - "documentation":"

Name of the database.

" + "documentation":"

Name of the database. For Hive compatibility, this is folded to lowercase when it is stored.

" }, "Description":{ "shape":"DescriptionString", @@ -2576,7 +2674,7 @@ "members":{ "Name":{ "shape":"NameString", - "documentation":"

Name of the database.

" + "documentation":"

Name of the database. For Hive compatibility, this is folded to lowercase when it is stored.

" }, "Description":{ "shape":"DescriptionString", @@ -2591,7 +2689,7 @@ "documentation":"

A list of key-value pairs that define parameters and properties of the database.

" } }, - "documentation":"

The structure used to create or updata a database.

" + "documentation":"

The structure used to create or update a database.

" }, "DatabaseList":{ "type":"list", @@ -2671,7 +2769,7 @@ }, "Name":{ "shape":"NameString", - "documentation":"

The name of the Database to delete.

" + "documentation":"

The name of the Database to delete. For Hive compatibility, this must be all lowercase.

" } } }, @@ -2758,11 +2856,11 @@ }, "DatabaseName":{ "shape":"NameString", - "documentation":"

The name of the catalog database in which the table resides.

" + "documentation":"

The name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.

" }, "Name":{ "shape":"NameString", - "documentation":"

The name of the table to be deleted.

" + "documentation":"

The name of the table to be deleted. For Hive compatibility, this name is entirely lowercase.

" } } }, @@ -2771,6 +2869,37 @@ "members":{ } }, + "DeleteTableVersionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName", + "VersionId" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table. For Hive compatibility, this name is entirely lowercase.

" + }, + "VersionId":{ + "shape":"VersionString", + "documentation":"

The ID of the table version to be deleted.

" + } + } + }, + "DeleteTableVersionResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteTriggerRequest":{ "type":"structure", "required":["Name"], @@ -3202,7 +3331,7 @@ }, "Name":{ "shape":"NameString", - "documentation":"

The name of the database to retrieve.

" + "documentation":"

The name of the database to retrieve. For Hive compatibility, this should be all lowercase.

" } } }, @@ -3587,11 +3716,11 @@ }, "DatabaseName":{ "shape":"NameString", - "documentation":"

The name of the database in the catalog in which the table resides.

" + "documentation":"

The name of the database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.

" }, "Name":{ "shape":"NameString", - "documentation":"

The name of the table for which to retrieve the definition.

" + "documentation":"

The name of the table for which to retrieve the definition. For Hive compatibility, this name is entirely lowercase.

" } } }, @@ -3604,6 +3733,40 @@ } } }, + "GetTableVersionRequest":{ + "type":"structure", + "required":[ + "DatabaseName", + "TableName" + ], + "members":{ + "CatalogId":{ + "shape":"CatalogIdString", + "documentation":"

The ID of the Data Catalog where the tables reside. If none is supplied, the AWS account ID is used by default.

" + }, + "DatabaseName":{ + "shape":"NameString", + "documentation":"

The database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.

" + }, + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table. For Hive compatibility, this name is entirely lowercase.

" + }, + "VersionId":{ + "shape":"VersionString", + "documentation":"

The ID value of the table version to be retrieved.

" + } + } + }, + "GetTableVersionResponse":{ + "type":"structure", + "members":{ + "TableVersion":{ + "shape":"TableVersion", + "documentation":"

The requested table version.

" + } + } + }, "GetTableVersionsList":{ "type":"list", "member":{"shape":"TableVersion"} @@ -3621,11 +3784,11 @@ }, "DatabaseName":{ "shape":"NameString", - "documentation":"

The database in the catalog in which the table resides.

" + "documentation":"

The database in the catalog in which the table resides. For Hive compatibility, this name is entirely lowercase.

" }, "TableName":{ "shape":"NameString", - "documentation":"

The name of the table.

" + "documentation":"

The name of the table. For Hive compatibility, this name is entirely lowercase.

" }, "NextToken":{ "shape":"Token", @@ -3660,7 +3823,7 @@ }, "DatabaseName":{ "shape":"NameString", - "documentation":"

The database in the catalog whose tables to list.

" + "documentation":"

The database in the catalog whose tables to list. For Hive compatibility, this name is entirely lowercase.

" }, "Expression":{ "shape":"FilterString", @@ -4376,7 +4539,7 @@ }, "ParametersMapValue":{ "type":"string", - "max":51200 + "max":512000 }, "Partition":{ "type":"structure", @@ -4499,7 +4662,7 @@ }, "AvailabilityZone":{ "shape":"NameString", - "documentation":"

The connection's availability zone.

" + "documentation":"

The connection's availability zone. This field is deprecated and has no effect.

" } }, "documentation":"

Specifies the physical requirements for a connection.

" @@ -4959,11 +5122,11 @@ "members":{ "Name":{ "shape":"NameString", - "documentation":"

Name of the table.

" + "documentation":"

Name of the table. For Hive compatibility, this must be entirely lowercase.

" }, "DatabaseName":{ "shape":"NameString", - "documentation":"

Name of the metadata database where the table metadata resides.

" + "documentation":"

Name of the metadata database where the table metadata resides. For Hive compatibility, this must be all lowercase.

" }, "Description":{ "shape":"DescriptionString", @@ -5029,7 +5192,7 @@ "members":{ "TableName":{ "shape":"NameString", - "documentation":"

Name of the table.

" + "documentation":"

Name of the table. For Hive compatibility, this must be entirely lowercase.

" }, "ErrorDetail":{ "shape":"ErrorDetail", @@ -5048,7 +5211,7 @@ "members":{ "Name":{ "shape":"NameString", - "documentation":"

Name of the table.

" + "documentation":"

Name of the table. For Hive compatibility, this is folded to lowercase when it is stored.

" }, "Description":{ "shape":"DescriptionString", @@ -5125,6 +5288,28 @@ }, "documentation":"

Specifies a version of a table.

" }, + "TableVersionError":{ + "type":"structure", + "members":{ + "TableName":{ + "shape":"NameString", + "documentation":"

The name of the table in question.

" + }, + "VersionId":{ + "shape":"VersionString", + "documentation":"

The ID value of the version in question.

" + }, + "ErrorDetail":{ + "shape":"ErrorDetail", + "documentation":"

Detail about the error.

" + } + }, + "documentation":"

An error record for table-version operations.

" + }, + "TableVersionErrors":{ + "type":"list", + "member":{"shape":"TableVersionError"} + }, "Timestamp":{"type":"timestamp"}, "TimestampValue":{"type":"timestamp"}, "Token":{"type":"string"}, @@ -5362,7 +5547,7 @@ }, "Name":{ "shape":"NameString", - "documentation":"

The name of the metadata database to update in the catalog.

" + "documentation":"

The name of the database to update in the catalog. For Hive compatibility, this is folded to lowercase.

" }, "DatabaseInput":{ "shape":"DatabaseInput", @@ -5500,11 +5685,15 @@ }, "DatabaseName":{ "shape":"NameString", - "documentation":"

The name of the catalog database in which the table resides.

" + "documentation":"

The name of the catalog database in which the table resides. For Hive compatibility, this name is entirely lowercase.

" }, "TableInput":{ "shape":"TableInput", "documentation":"

An updated TableInput object to define the metadata table in the catalog.

" + }, + "SkipArchive":{ + "shape":"BooleanNullable", + "documentation":"

By default, UpdateTable always creates an archived version of the table before updating it. If skipArchive is set to true, however, UpdateTable does not create the archived version.

" } } }, @@ -5689,7 +5878,7 @@ }, "ViewTextString":{ "type":"string", - "max":2048 + "max":409600 }, "XMLClassifier":{ "type":"structure", diff --git a/botocore/data/guardduty/2017-11-28/service-2.json b/botocore/data/guardduty/2017-11-28/service-2.json index b199d31d..39d0dbf9 100644 --- a/botocore/data/guardduty/2017-11-28/service-2.json +++ b/botocore/data/guardduty/2017-11-28/service-2.json @@ -891,6 +891,32 @@ "type" : "structure", "members" : { } }, + "AccessKeyDetails" : { + "type" : "structure", + "members" : { + "AccessKeyId" : { + "shape" : "__string", + "locationName" : "accessKeyId", + "documentation" : "Access key ID of the user." + }, + "PrincipalId" : { + "shape" : "__string", + "locationName" : "principalId", + "documentation" : "The principal ID of the user." + }, + "UserName" : { + "shape" : "__string", + "locationName" : "userName", + "documentation" : "The name of the user." + }, + "UserType" : { + "shape" : "__string", + "locationName" : "userType", + "documentation" : "The type of the user." + } + }, + "documentation" : "The IAM access key details (IAM user information) of a user that engaged in the activity that prompted GuardDuty to generate a finding." + }, "AccountDetail" : { "type" : "structure", "members" : { @@ -2704,6 +2730,10 @@ "Resource" : { "type" : "structure", "members" : { + "AccessKeyDetails" : { + "shape" : "AccessKeyDetails", + "locationName" : "accessKeyDetails" + }, "InstanceDetails" : { "shape" : "InstanceDetails", "locationName" : "instanceDetails" diff --git a/botocore/data/inspector/2016-02-16/paginators-1.json b/botocore/data/inspector/2016-02-16/paginators-1.json index ea142457..83d2ad2c 100644 --- a/botocore/data/inspector/2016-02-16/paginators-1.json +++ b/botocore/data/inspector/2016-02-16/paginators-1.json @@ -1,3 +1,52 @@ { - "pagination": {} + "pagination": { + "ListFindings": { + "result_key": "findingArns", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "ListAssessmentTemplates": { + "result_key": "assessmentTemplateArns", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "PreviewAgents": { + "result_key": "agentPreviews", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "ListEventSubscriptions": { + "result_key": "subscriptions", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "ListRulesPackages": { + "result_key": "rulesPackageArns", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "ListAssessmentRunAgents": { + "result_key": "assessmentRunAgents", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "ListAssessmentRuns": { + "result_key": "assessmentRunArns", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "ListAssessmentTargets": { + "result_key": "assessmentTargetArns", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + } + } } diff --git a/botocore/data/lambda/2015-03-31/service-2.json b/botocore/data/lambda/2015-03-31/service-2.json index f5b6ab5c..f4c6de3f 100644 --- a/botocore/data/lambda/2015-03-31/service-2.json +++ b/botocore/data/lambda/2015-03-31/service-2.json @@ -5,6 +5,7 @@ "endpointPrefix":"lambda", "protocol":"rest-json", "serviceFullName":"AWS Lambda", + "serviceId":"Lambda", "signatureVersion":"v4", "uid":"lambda-2015-03-31" }, @@ -24,7 +25,8 @@ {"shape":"ResourceConflictException"}, {"shape":"InvalidParameterValueException"}, {"shape":"PolicyLengthExceededException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"PreconditionFailedException"} ], "documentation":"

Adds a permission to the resource policy associated with the specified AWS Lambda function. You use resource policies to grant permissions to event sources that use push model. In a push model, event sources (such as Amazon S3 and custom applications) invoke your Lambda function. Each permission you add to the resource policy allows an event source, permission to invoke the Lambda function.

For information about the push model, see AWS Lambda: How it Works.

If you are using versioning, the permissions you add are specific to the Lambda function version or alias you specify in the AddPermission request via the Qualifier parameter. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

This operation requires permission for the lambda:AddPermission action.

" }, @@ -146,7 +148,7 @@ {"shape":"TooManyRequestsException"}, {"shape":"InvalidParameterValueException"} ], - "documentation":"

Removes concurrent execution limits from this function.

" + "documentation":"

Removes concurrent execution limits from this function. For more information, see concurrent-executions.

" }, "GetAccountSettings":{ "name":"GetAccountSettings", @@ -278,7 +280,7 @@ {"shape":"KMSNotFoundException"}, {"shape":"InvalidRuntimeException"} ], - "documentation":"

Invokes a specific Lambda function. For an example, see Create the Lambda Function and Test It Manually.

If you are using the versioning feature, you can invoke the specific function version by providing function version or alias name that is pointing to the function version using the Qualifier parameter in the request. If you don't provide the Qualifier parameter, the $LATEST version of the Lambda function is invoked. Invocations occur at least once in response to an event and functions must be idempotent to handle this. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

This operation requires permission for the lambda:InvokeFunction action.

" + "documentation":"

Invokes a specific Lambda function. For an example, see Create the Lambda Function and Test It Manually.

If you are using the versioning feature, you can invoke the specific function version by providing function version or alias name that is pointing to the function version using the Qualifier parameter in the request. If you don't provide the Qualifier parameter, the $LATEST version of the Lambda function is invoked. Invocations occur at least once in response to an event and functions must be idempotent to handle this. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

This operation requires permission for the lambda:InvokeFunction action.

The TooManyRequestsException noted below will return the following: ConcurrentInvocationLimitExceeded will be returned if you have no functions with reserved concurrency and have exceeded your account concurrent limit or if a function without reserved concurrency exceeds the account's unreserved concurrency limit. ReservedFunctionConcurrentInvocationLimitExceeded will be returned when a function with reserved concurrency exceeds its configured concurrency limit.

" }, "InvokeAsync":{ "name":"InvokeAsync", @@ -395,7 +397,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"}, - {"shape":"CodeStorageExceededException"} + {"shape":"CodeStorageExceededException"}, + {"shape":"PreconditionFailedException"} ], "documentation":"

Publishes a version of your function from the current snapshot of $LATEST. That is, AWS Lambda takes a snapshot of the function code and configuration information from $LATEST and publishes a new version. The code and configuration cannot be modified after publication. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

" }, @@ -414,7 +417,7 @@ {"shape":"ResourceNotFoundException"}, {"shape":"TooManyRequestsException"} ], - "documentation":"

Sets a limit on the number of concurrent executions available to this function. It is a subset of your account's total concurrent execution limit per region. Note that Lambda automatically reserves a buffer of 100 concurrent executions for functions without any reserved concurrency limit. This means if your account limit is 1000, you have a total of 900 available to allocate to individual functions.

" + "documentation":"

Sets a limit on the number of concurrent executions available to this function. It is a subset of your account's total concurrent execution limit per region. Note that Lambda automatically reserves a buffer of 100 concurrent executions for functions without any reserved concurrency limit. This means if your account limit is 1000, you have a total of 900 available to allocate to individual functions. For more information, see concurrent-executions.

" }, "RemovePermission":{ "name":"RemovePermission", @@ -428,7 +431,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"PreconditionFailedException"} ], "documentation":"

You can remove individual permissions from an resource policy associated with a Lambda function by providing a statement ID that you provided when you added the permission.

If you are using versioning, the permissions you remove are specific to the Lambda function version or alias you specify in the AddPermission request via the Qualifier parameter. For more information about versioning, see AWS Lambda Function Versioning and Aliases.

Note that removal of a permission will cause an active event source to lose permission to the function.

You need permission for the lambda:RemovePermission action.

" }, @@ -477,7 +481,8 @@ {"shape":"ServiceException"}, {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, - {"shape":"TooManyRequestsException"} + {"shape":"TooManyRequestsException"}, + {"shape":"PreconditionFailedException"} ], "documentation":"

Using this API you can update the function version to which the alias points and the alias description. For more information, see Introduction to AWS Lambda Aliases.

This requires permission for the lambda:UpdateAlias action.

" }, @@ -513,7 +518,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"}, - {"shape":"CodeStorageExceededException"} + {"shape":"CodeStorageExceededException"}, + {"shape":"PreconditionFailedException"} ], "documentation":"

Updates the code for the specified Lambda function. This operation must only be used on an existing Lambda function and cannot be used to update the function configuration.

If you are using the versioning feature, note this API will always update the $LATEST version of your Lambda function. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

This operation requires permission for the lambda:UpdateFunctionCode action.

" }, @@ -531,7 +537,8 @@ {"shape":"ResourceNotFoundException"}, {"shape":"InvalidParameterValueException"}, {"shape":"TooManyRequestsException"}, - {"shape":"ResourceConflictException"} + {"shape":"ResourceConflictException"}, + {"shape":"PreconditionFailedException"} ], "documentation":"

Updates the configuration parameters for the specified Lambda function by using the values provided in the request. You provide only the parameters you want to change. This operation must only be used on an existing Lambda function and cannot be used to update the function's code.

If you are using the versioning feature, note this API will always update the $LATEST version of your Lambda function. For information about the versioning feature, see AWS Lambda Function Versioning and Aliases.

This operation requires permission for the lambda:UpdateFunctionConfiguration action.

" } @@ -558,7 +565,7 @@ }, "UnreservedConcurrentExecutions":{ "shape":"UnreservedConcurrentExecutions", - "documentation":"

The number of concurrent executions available to functions that do not have concurrency limits set.

" + "documentation":"

The number of concurrent executions available to functions that do not have concurrency limits set. For more information, see concurrent-executions.

" } }, "documentation":"

Provides limits of code size and concurrency associated with the current account and region.

" @@ -625,6 +632,10 @@ "documentation":"

You can use this optional query parameter to describe a qualified ARN using a function version or an alias name. The permission will then apply to the specific qualified ARN. For example, if you specify function version 2 as the qualifier, then permission applies only when request is made using qualified function ARN:

arn:aws:lambda:aws-region:acct-id:function:function-name:2

If you specify an alias name, for example PROD, then the permission is valid only for requests made using the alias ARN:

arn:aws:lambda:aws-region:acct-id:function:function-name:PROD

If the qualifier is not specified, the permission is valid only when requests is made using unqualified function ARN.

arn:aws:lambda:aws-region:acct-id:function:function-name

", "location":"querystring", "locationName":"Qualifier" + }, + "RevisionId":{ + "shape":"String", + "documentation":"

An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either or .

" } }, "documentation":"

" @@ -678,6 +689,10 @@ "RoutingConfig":{ "shape":"AliasRoutingConfiguration", "documentation":"

Specifies an additional function versions the alias points to, allowing you to dictate what percentage of traffic will invoke each version. For more information, see lambda-traffic-shifting-using-aliases.

" + }, + "RevisionId":{ + "shape":"String", + "documentation":"

Represents the latest updated revision of the function or alias.

" } }, "documentation":"

Provides configuration information about a Lambda function version alias.

" @@ -691,7 +706,7 @@ "members":{ "AdditionalVersionWeights":{ "shape":"AdditionalVersionWeights", - "documentation":"

Set this property value to dictate what percentage of traffic will invoke the updated function version. If set to an empty string, 100 percent of traffic will invoke function-version.

" + "documentation":"

Set this value to dictate what percentage of traffic will invoke the updated function version. If set to an empty string, 100 percent of traffic will invoke function-version. For more information, see lambda-traffic-shifting-using-aliases.

" } }, "documentation":"

The parent object that implements what percentage of traffic will invoke each function version. For more information, see lambda-traffic-shifting-using-aliases.

" @@ -732,7 +747,7 @@ "members":{ "ReservedConcurrentExecutions":{ "shape":"ReservedConcurrentExecutions", - "documentation":"

The number of concurrent executions reserved for this function.

" + "documentation":"

The number of concurrent executions reserved for this function. For more information, see concurrent-executions.

" } } }, @@ -819,7 +834,7 @@ }, "Runtime":{ "shape":"Runtime", - "documentation":"

The runtime environment for the Lambda function you are uploading.

To use the Python runtime v3.6, set the value to \"python3.6\". To use the Python runtime v2.7, set the value to \"python2.7\". To use the Node.js runtime v6.10, set the value to \"nodejs6.10\". To use the Node.js runtime v4.3, set the value to \"nodejs4.3\".

Node v0.10.42 is currently marked as deprecated. You must migrate existing functions to the newer Node.js runtime versions available on AWS Lambda (nodejs4.3 or nodejs6.10) as soon as possible. Failure to do so will result in an invalid parmaeter error being returned. Note that you will have to follow this procedure for each region that contains functions written in the Node v0.10.42 runtime.

" + "documentation":"

The runtime environment for the Lambda function you are uploading.

To use the Python runtime v3.6, set the value to \"python3.6\". To use the Python runtime v2.7, set the value to \"python2.7\". To use the Node.js runtime v6.10, set the value to \"nodejs6.10\". To use the Node.js runtime v4.3, set the value to \"nodejs4.3\".

Node v0.10.42 is currently marked as deprecated. You must migrate existing functions to the newer Node.js runtime versions available on AWS Lambda (nodejs4.3 or nodejs6.10) as soon as possible. Failure to do so will result in an invalid parameter error being returned. Note that you will have to follow this procedure for each region that contains functions written in the Node v0.10.42 runtime.

" }, "Role":{ "shape":"RoleArn", @@ -924,7 +939,7 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the function you are removing concurrent execution limits from.

", + "documentation":"

The name of the function you are removing concurrent execution limits from. For more information, see concurrent-executions.

", "location":"uri", "locationName":"FunctionName" } @@ -1215,6 +1230,10 @@ "MasterArn":{ "shape":"FunctionArn", "documentation":"

Returns the ARN (Amazon Resource Name) of the master function.

" + }, + "RevisionId":{ + "shape":"String", + "documentation":"

Represents the latest updated revision of the function or alias.

" } }, "documentation":"

A complex type that describes function metadata.

" @@ -1328,7 +1347,7 @@ }, "Concurrency":{ "shape":"Concurrency", - "documentation":"

The concurrent execution limit set for this function.

" + "documentation":"

The concurrent execution limit set for this function. For more information, see concurrent-executions.

" } }, "documentation":"

This response contains the object for the Lambda function location (see FunctionCodeLocation.

" @@ -1358,6 +1377,10 @@ "Policy":{ "shape":"String", "documentation":"

The resource policy associated with the specified function. The response returns the same as a string using a backslash (\"\\\") as an escape character in the JSON.

" + }, + "RevisionId":{ + "shape":"String", + "documentation":"

Represents the latest updated revision of the function or alias.

" } }, "documentation":"

" @@ -1509,7 +1532,7 @@ }, "ExecutedVersion":{ "shape":"Version", - "documentation":"

The function version that has been executed. This value is returned only if the invocation type is RequestResponse.

", + "documentation":"

The function version that has been executed. This value is returned only if the invocation type is RequestResponse. For more information, see lambda-traffic-shifting-using-aliases.

", "location":"header", "locationName":"X-Amz-Executed-Version" } @@ -1842,6 +1865,22 @@ "error":{"httpStatusCode":400}, "exception":true }, + "PreconditionFailedException":{ + "type":"structure", + "members":{ + "Type":{ + "shape":"String", + "documentation":"

" + }, + "message":{ + "shape":"String", + "documentation":"

" + } + }, + "documentation":"

The RevisionId provided does not match the latest RevisionId for the Lambda function or alias. Call the GetFunction or the GetAlias API to retrieve the latest RevisionId for your resource.

", + "error":{"httpStatusCode":412}, + "exception":true + }, "Principal":{ "type":"string", "pattern":".*" @@ -1863,6 +1902,10 @@ "Description":{ "shape":"Description", "documentation":"

The description for the version you are publishing. If not provided, AWS Lambda copies the description from the $LATEST version.

" + }, + "RevisionId":{ + "shape":"String", + "documentation":"

An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either or .

" } }, "documentation":"

" @@ -1876,13 +1919,13 @@ "members":{ "FunctionName":{ "shape":"FunctionName", - "documentation":"

The name of the function you are setting concurrent execution limits on.

", + "documentation":"

The name of the function you are setting concurrent execution limits on. For more information, see concurrent-executions.

", "location":"uri", "locationName":"FunctionName" }, "ReservedConcurrentExecutions":{ "shape":"ReservedConcurrentExecutions", - "documentation":"

The concurrent execution limit reserved for this function.

" + "documentation":"

The concurrent execution limit reserved for this function. For more information, see concurrent-executions.

" } } }, @@ -1916,6 +1959,12 @@ "documentation":"

You can specify this optional parameter to remove permission associated with a specific function version or function alias. If you don't specify this parameter, the API removes permission associated with the unqualified function ARN.

", "location":"querystring", "locationName":"Qualifier" + }, + "RevisionId":{ + "shape":"String", + "documentation":"

An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either or .

", + "location":"querystring", + "locationName":"RevisionId" } }, "documentation":"

" @@ -1978,7 +2027,9 @@ "python2.7", "python3.6", "dotnetcore1.0", - "nodejs4.3-edge" + "dotnetcore2.0", + "nodejs4.3-edge", + "go1.x" ] }, "S3Bucket":{ @@ -2102,7 +2153,7 @@ "message":{"shape":"String"}, "Reason":{"shape":"ThrottleReason"} }, - "documentation":"

You will get this exception for the following reasons. ConcurrentInvocationLimitExceeded is returned if you have no functions with reserved-concurrency and have exceeded your account concurrent limit or if a function without reserved concurrency exceeds the account's unreserved concurrency limit. ReservedFunctionConcurrentInvocationLimitExceeded is returned when a function with reserved concurrency exceeds its configured concurrent limit. CallerRateLimitExceeded is returned when your account limit is exceeded and you have not reserved concurrency on any function. For more information, see concurrent-executions

", + "documentation":"

", "error":{"httpStatusCode":429}, "exception":true }, @@ -2198,6 +2249,10 @@ "RoutingConfig":{ "shape":"AliasRoutingConfiguration", "documentation":"

Specifies an additional version your alias can point to, allowing you to dictate what percentage of traffic will invoke each version. For more information, see lambda-traffic-shifting-using-aliases.

" + }, + "RevisionId":{ + "shape":"String", + "documentation":"

An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either or .

" } } }, @@ -2259,6 +2314,10 @@ "DryRun":{ "shape":"Boolean", "documentation":"

This boolean parameter can be used to test your request to AWS Lambda to update the Lambda function and publish a version as an atomic operation. It will do all necessary computation and validation of your code but will not upload it or a publish a version. Each time this operation is invoked, the CodeSha256 hash value of the provided code will also be computed and returned in the response.

" + }, + "RevisionId":{ + "shape":"String", + "documentation":"

An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either or .

" } }, "documentation":"

" @@ -2313,6 +2372,10 @@ "TracingConfig":{ "shape":"TracingConfig", "documentation":"

The parent object that contains your function's tracing settings.

" + }, + "RevisionId":{ + "shape":"String", + "documentation":"

An optional value you can use to ensure you are updating the latest update of the function version or alias. If the RevisionID you pass doesn't match the latest RevisionId of the function or alias, it will fail with an error message, advising you to retrieve the latest function version or alias RevisionID using either or .

" } }, "documentation":"

" diff --git a/botocore/data/lex-models/2017-04-19/paginators-1.json b/botocore/data/lex-models/2017-04-19/paginators-1.json index ea142457..02d23082 100644 --- a/botocore/data/lex-models/2017-04-19/paginators-1.json +++ b/botocore/data/lex-models/2017-04-19/paginators-1.json @@ -1,3 +1,64 @@ { - "pagination": {} + "pagination": { + "GetSlotTypeVersions": { + "result_key": "slotTypes", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "GetSlotTypes": { + "result_key": "slotTypes", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "GetIntents": { + "result_key": "intents", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "GetBotChannelAssociations": { + "result_key": "botChannelAssociations", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "GetBots": { + "result_key": "bots", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "GetBuiltinSlotTypes": { + "result_key": "slotTypes", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "GetIntentVersions": { + "result_key": "intents", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "GetBotAliases": { + "result_key": "BotAliases", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "GetBuiltinIntents": { + "result_key": "intents", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "GetBotVersions": { + "result_key": "bots", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + } + } } diff --git a/botocore/data/medialive/2017-10-14/service-2.json b/botocore/data/medialive/2017-10-14/service-2.json index 50e1d7bb..c4356d97 100644 --- a/botocore/data/medialive/2017-10-14/service-2.json +++ b/botocore/data/medialive/2017-10-14/service-2.json @@ -277,6 +277,10 @@ "shape": "BadGatewayException", "documentation": "Bad Gateway Error" }, + { + "shape": "NotFoundException", + "documentation": "Input Security Group not found" + }, { "shape": "GatewayTimeoutException", "documentation": "Gateway Timeout Error" @@ -1649,6 +1653,10 @@ "locationName": "inputAttachments", "documentation": "List of input attachments for channel." }, + "InputSpecification": { + "shape": "InputSpecification", + "locationName": "inputSpecification" + }, "Name": { "shape": "__string", "locationName": "name", @@ -1740,6 +1748,10 @@ "locationName": "inputAttachments", "documentation": "List of input attachments for channel." }, + "InputSpecification": { + "shape": "InputSpecification", + "locationName": "inputSpecification" + }, "Name": { "shape": "__string", "locationName": "name", @@ -1792,6 +1804,11 @@ "locationName": "inputAttachments", "documentation": "List of input attachments for channel." }, + "InputSpecification": { + "shape": "InputSpecification", + "locationName": "inputSpecification", + "documentation": "Specification of input for this channel (max. bitrate, resolution, codec, etc.)" + }, "Name": { "shape": "__string", "locationName": "name", @@ -1832,6 +1849,11 @@ "locationName": "inputAttachments", "documentation": "List of input attachments for channel." }, + "InputSpecification": { + "shape": "InputSpecification", + "locationName": "inputSpecification", + "documentation": "Specification of input for this channel (max. bitrate, resolution, codec, etc.)" + }, "Name": { "shape": "__string", "locationName": "name", @@ -1882,7 +1904,7 @@ "Destinations": { "shape": "ListOfInputDestinationRequest", "locationName": "destinations", - "documentation": "settings required for PUSH-type inputs; one per redundancy group.\nOnly one of sources and destinations can be specified.\nNote: there are currently no settings required for PUSH-type inputs\n" + "documentation": "Destination settings for PUSH type inputs." }, "InputSecurityGroups": { "shape": "ListOf__string", @@ -1897,13 +1919,13 @@ "RequestId": { "shape": "__string", "locationName": "requestId", - "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries\n", + "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.\n", "idempotencyToken": true }, "Sources": { "shape": "ListOfInputSourceRequest", "locationName": "sources", - "documentation": "settings required for PULL-type inputs; one per redundancy group\nOnly one of sources and destinations can be specified\n" + "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty.\n" }, "Type": { "shape": "InputType", @@ -1918,7 +1940,7 @@ "Destinations": { "shape": "ListOfInputDestinationRequest", "locationName": "destinations", - "documentation": "settings required for PUSH-type inputs; one per redundancy group.\nOnly one of sources and destinations can be specified.\nNote: there are currently no settings required for PUSH-type inputs\n" + "documentation": "Destination settings for PUSH type inputs." }, "InputSecurityGroups": { "shape": "ListOf__string", @@ -1933,13 +1955,13 @@ "RequestId": { "shape": "__string", "locationName": "requestId", - "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries\n", + "documentation": "Unique identifier of the request to ensure the request is handled\nexactly once in case of retries.\n", "idempotencyToken": true }, "Sources": { "shape": "ListOfInputSourceRequest", "locationName": "sources", - "documentation": "settings required for PULL-type inputs; one per redundancy group\nOnly one of sources and destinations can be specified\n" + "documentation": "The source URLs for a PULL-type input. Every PULL type input needs\nexactly two source URLs for redundancy.\nOnly specify sources for PULL type Inputs. Leave Destinations empty.\n" }, "Type": { "shape": "InputType", @@ -2046,6 +2068,10 @@ "locationName": "inputAttachments", "documentation": "List of input attachments for channel." }, + "InputSpecification": { + "shape": "InputSpecification", + "locationName": "inputSpecification" + }, "Name": { "shape": "__string", "locationName": "name", @@ -2157,6 +2183,10 @@ "locationName": "inputAttachments", "documentation": "List of input attachments for channel." }, + "InputSpecification": { + "shape": "InputSpecification", + "locationName": "inputSpecification" + }, "Name": { "shape": "__string", "locationName": "name", @@ -2200,37 +2230,37 @@ "Arn": { "shape": "__string", "locationName": "arn", - "documentation": "Unique ARN of input (generated, immutable)" + "documentation": "The Unique ARN of the input (generated, immutable)." }, "AttachedChannels": { "shape": "ListOf__string", "locationName": "attachedChannels", - "documentation": "List of channel IDs that that input is attached to (currently an input can only be attached to one channel)" + "documentation": "A list of channel IDs that that input is attached to (currently an input can only be attached to one channel)." }, "Destinations": { "shape": "ListOfInputDestination", "locationName": "destinations", - "documentation": "List of destinations of input (PULL-type)" + "documentation": "A list of the destinations of the input (PUSH-type)." }, "Id": { "shape": "__string", "locationName": "id", - "documentation": "generated ID of input (unique for user account, immutable)" + "documentation": "The generated ID of the input (unique for user account, immutable)." }, "Name": { "shape": "__string", "locationName": "name", - "documentation": "user-assigned name (mutable)" + "documentation": "The user-assigned name (This is a mutable value)." }, "SecurityGroups": { "shape": "ListOf__string", "locationName": "securityGroups", - "documentation": "List of IDs for all the security groups attached to the input." + "documentation": "A list of IDs for all the security groups attached to the input." }, "Sources": { "shape": "ListOfInputSource", "locationName": "sources", - "documentation": "List of sources of input (PULL-type)" + "documentation": "A list of the sources of the input (PULL-type)." }, "State": { "shape": "InputState", @@ -3541,7 +3571,7 @@ "IndexNSegments": { "shape": "__integer", "locationName": "indexNSegments", - "documentation": "Number of segments to keep in the playlist (.m3u8) file. mode must be \"vod\" for this setting to have an effect, and this number should be less than or equal to keepSegments." + "documentation": "If mode is \"live\", the number of segments to retain in the manifest (.m3u8) file. This number must be less than or equal to keepSegments. If mode is \"vod\", this parameter has no effect." }, "InputLossAction": { "shape": "InputLossActionForHlsOut", @@ -3561,7 +3591,7 @@ "KeepSegments": { "shape": "__integer", "locationName": "keepSegments", - "documentation": "Number of segments to retain in the destination directory. mode must be \"live\" for this setting to have an effect." + "documentation": "If mode is \"live\", the number of TS segments to retain in the destination directory. If mode is \"vod\", this parameter has no effect." }, "KeyFormat": { "shape": "__string", @@ -3596,7 +3626,7 @@ "Mode": { "shape": "HlsMode", "locationName": "mode", - "documentation": "If set to \"vod\", keeps and indexes all segments starting with the first segment. If set to \"live\" segments will age out and only the last keepSegments number of segments will be retained." + "documentation": "If \"vod\", all segments are indexed and kept permanently in the destination and manifest. If \"live\", only the number segments specified in keepSegments and indexNSegments are kept; newer segments replace older segments, which may prevent players from rewinding all the way to the beginning of the event.\n\nVOD mode uses HLS EXT-X-PLAYLIST-TYPE of EVENT while the channel is running, converting it to a \"VOD\" type manifest on completion of the stream." }, "OutputSelection": { "shape": "HlsOutputSelection", @@ -3889,37 +3919,37 @@ "Arn": { "shape": "__string", "locationName": "arn", - "documentation": "Unique ARN of input (generated, immutable)" + "documentation": "The Unique ARN of the input (generated, immutable)." }, "AttachedChannels": { "shape": "ListOf__string", "locationName": "attachedChannels", - "documentation": "List of channel IDs that that input is attached to (currently an input can only be attached to one channel)" + "documentation": "A list of channel IDs that that input is attached to (currently an input can only be attached to one channel)." }, "Destinations": { "shape": "ListOfInputDestination", "locationName": "destinations", - "documentation": "List of destinations of input (PULL-type)" + "documentation": "A list of the destinations of the input (PUSH-type)." }, "Id": { "shape": "__string", "locationName": "id", - "documentation": "generated ID of input (unique for user account, immutable)" + "documentation": "The generated ID of the input (unique for user account, immutable)." }, "Name": { "shape": "__string", "locationName": "name", - "documentation": "user-assigned name (mutable)" + "documentation": "The user-assigned name (This is a mutable value)." }, "SecurityGroups": { "shape": "ListOf__string", "locationName": "securityGroups", - "documentation": "List of IDs for all the security groups attached to the input." + "documentation": "A list of IDs for all the security groups attached to the input." }, "Sources": { "shape": "ListOfInputSource", "locationName": "sources", - "documentation": "List of sources of input (PULL-type)" + "documentation": "A list of the sources of the input (PULL-type)." }, "State": { "shape": "InputState", @@ -3964,6 +3994,15 @@ }, "documentation": "Placeholder documentation for InputChannelLevel" }, + "InputCodec": { + "type": "string", + "documentation": "codec in increasing order of complexity", + "enum": [ + "MPEG2", + "AVC", + "HEVC" + ] + }, "InputDeblockFilter": { "type": "string", "enum": [ @@ -3986,12 +4025,12 @@ "Ip": { "shape": "__string", "locationName": "ip", - "documentation": "system-generated static IP address of endpoint.\nRemains fixed for the lifetime of the input\n" + "documentation": "The system-generated static IP address of endpoint.\nIt remains fixed for the lifetime of the input.\n" }, "Port": { "shape": "__string", "locationName": "port", - "documentation": "port for input" + "documentation": "The port number for the input." }, "Url": { "shape": "__string", @@ -3999,7 +4038,7 @@ "documentation": "This represents the endpoint that the customer stream will be\npushed to.\n" } }, - "documentation": "Settings for a PUSH type input" + "documentation": "The settings for a PUSH type input." }, "InputDestinationRequest": { "type": "structure", @@ -4010,7 +4049,7 @@ "documentation": "A unique name for the location the RTMP stream is being pushed\nto.\n" } }, - "documentation": "Endpoint settings for a PUSH type input" + "documentation": "Endpoint settings for a PUSH type input." }, "InputFilter": { "type": "string", @@ -4106,6 +4145,24 @@ ], "documentation": "Placeholder documentation for InputLossImageType" }, + "InputMaximumBitrate": { + "type": "string", + "documentation": "Maximum input bitrate in megabits per second. Bitrates up to 50 Mbps are supported currently.", + "enum": [ + "MAX_10_MBPS", + "MAX_20_MBPS", + "MAX_50_MBPS" + ] + }, + "InputResolution": { + "type": "string", + "documentation": "Input resolution based on lines of vertical resolution in the input; SD is less than 720 lines, HD is 720 to 1080 lines, UHD is greater than 1080 lines\n", + "enum": [ + "SD", + "HD", + "UHD" + ] + }, "InputSecurityGroup": { "type": "structure", "members": { @@ -4195,7 +4252,7 @@ "PasswordParam": { "shape": "__string", "locationName": "passwordParam", - "documentation": "key used to extract the password from EC2 Parameter store" + "documentation": "The key used to extract the password from EC2 Parameter store." }, "Url": { "shape": "__string", @@ -4205,10 +4262,10 @@ "Username": { "shape": "__string", "locationName": "username", - "documentation": "username for input source" + "documentation": "The username for the input source." } }, - "documentation": "Settings for a PULL type input" + "documentation": "The settings for a PULL type input." }, "InputSourceEndBehavior": { "type": "string", @@ -4224,7 +4281,7 @@ "PasswordParam": { "shape": "__string", "locationName": "passwordParam", - "documentation": "key used to extract the password from EC2 Parameter store" + "documentation": "The key used to extract the password from EC2 Parameter store." }, "Url": { "shape": "__string", @@ -4234,10 +4291,31 @@ "Username": { "shape": "__string", "locationName": "username", - "documentation": "username for input source" + "documentation": "The username for the input source." } }, - "documentation": "Settings for for a PULL type input" + "documentation": "Settings for for a PULL type input." + }, + "InputSpecification": { + "type": "structure", + "members": { + "Codec": { + "shape": "InputCodec", + "locationName": "codec", + "documentation": "Input codec" + }, + "MaximumBitrate": { + "shape": "InputMaximumBitrate", + "locationName": "maximumBitrate", + "documentation": "Maximum input bitrate, categorized coarsely" + }, + "Resolution": { + "shape": "InputResolution", + "locationName": "resolution", + "documentation": "Input resolution, categorized coarsely" + } + }, + "documentation": "Placeholder documentation for InputSpecification" }, "InputState": { "type": "string", @@ -4899,7 +4977,7 @@ "EcmPid": { "shape": "__string", "locationName": "ecmPid", - "documentation": "Packet Identifier (PID) for ECM in the transport stream. Only enabled when Simulcrypt is enabled. Can be entered as a decimal or hexadecimal value. Valid values are 32 (or 0x20)..8182 (or 0x1ff6)." + "documentation": "This field is unused and deprecated." }, "EsRateInPes": { "shape": "M2tsEsRateInPes", @@ -5069,7 +5147,7 @@ "EcmPid": { "shape": "__string", "locationName": "ecmPid", - "documentation": "ThePlatform-protected transport streams using 'microsoft' as Target Client include an ECM stream. This ECM stream contains the size, IV, and PTS of every sample in the transport stream. This stream PID is specified here. This PID has no effect on non ThePlatform-protected streams." + "documentation": "This parameter is unused and deprecated." }, "PatInterval": { "shape": "__integer", @@ -5770,6 +5848,10 @@ "locationName": "inputAttachments", "documentation": "List of input attachments for channel." }, + "InputSpecification": { + "shape": "InputSpecification", + "locationName": "inputSpecification" + }, "Name": { "shape": "__string", "locationName": "name", @@ -5855,6 +5937,10 @@ "locationName": "inputAttachments", "documentation": "List of input attachments for channel." }, + "InputSpecification": { + "shape": "InputSpecification", + "locationName": "inputSpecification" + }, "Name": { "shape": "__string", "locationName": "name", diff --git a/botocore/data/mobile/2017-07-01/paginators-1.json b/botocore/data/mobile/2017-07-01/paginators-1.json index ea142457..e86bb7d0 100644 --- a/botocore/data/mobile/2017-07-01/paginators-1.json +++ b/botocore/data/mobile/2017-07-01/paginators-1.json @@ -1,3 +1,16 @@ { - "pagination": {} + "pagination": { + "ListBundles": { + "result_key": "bundleList", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + }, + "ListProjects": { + "result_key": "projects", + "output_token": "nextToken", + "input_token": "nextToken", + "limit_key": "maxResults" + } + } } diff --git a/botocore/data/mturk/2017-01-17/paginators-1.json b/botocore/data/mturk/2017-01-17/paginators-1.json index ea142457..ea50cacc 100644 --- a/botocore/data/mturk/2017-01-17/paginators-1.json +++ b/botocore/data/mturk/2017-01-17/paginators-1.json @@ -1,3 +1,58 @@ { - "pagination": {} + "pagination": { + "ListAssignmentsForHIT": { + "result_key": "Assignments", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListQualificationTypes": { + "result_key": "QualificationTypes", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListHITs": { + "result_key": "HITs", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListWorkerBlocks": { + "result_key": "WorkerBlocks", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListReviewableHITs": { + "result_key": "HITs", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListHITsForQualificationType": { + "result_key": "HITs", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListQualificationRequests": { + "result_key": "QualificationRequests", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListWorkersWithQualificationType": { + "result_key": "Qualifications", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListBonusPayments": { + "result_key": "BonusPayments", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + } + } } diff --git a/botocore/data/mturk/2017-01-17/service-2.json b/botocore/data/mturk/2017-01-17/service-2.json index 3d613604..3b9a089a 100644 --- a/botocore/data/mturk/2017-01-17/service-2.json +++ b/botocore/data/mturk/2017-01-17/service-2.json @@ -241,7 +241,7 @@ {"shape":"ServiceFault"}, {"shape":"RequestError"} ], - "documentation":"

The GetFileUploadURL operation generates and returns a temporary URL. You use the temporary URL to retrieve a file uploaded by a Worker as an answer to a FileUploadAnswer question for a HIT. The temporary URL is generated the instant the GetFileUploadURL operation is called, and is valid for 60 seconds. You can get a temporary file upload URL any time until the HIT is disposed. After the HIT is disposed, any uploaded files are deleted, and cannot be retrieved.

", + "documentation":"

The GetFileUploadURL operation generates and returns a temporary URL. You use the temporary URL to retrieve a file uploaded by a Worker as an answer to a FileUploadAnswer question for a HIT. The temporary URL is generated the instant the GetFileUploadURL operation is called, and is valid for 60 seconds. You can get a temporary file upload URL any time until the HIT is disposed. After the HIT is disposed, any uploaded files are deleted, and cannot be retrieved. Pending Deprecation on December 12, 2017. The Answer Specification structure will no longer support the FileUploadAnswer element to be used for the QuestionForm data structure. Instead, we recommend that Requesters who want to create HITs asking Workers to upload files to use Amazon S3.

", "idempotent":true }, "GetHIT":{ diff --git a/botocore/data/organizations/2016-11-28/paginators-1.json b/botocore/data/organizations/2016-11-28/paginators-1.json index a2a9966a..1c8f978b 100644 --- a/botocore/data/organizations/2016-11-28/paginators-1.json +++ b/botocore/data/organizations/2016-11-28/paginators-1.json @@ -71,6 +71,12 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Targets" + }, + "ListAWSServiceAccessForOrganization": { + "result_key": "EnabledServicePrincipals", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index 8d79520c..1eca5789 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -1998,6 +1998,20 @@ }, "documentation":"

This data type is used as a response element in the action DescribeDBEngineVersions.

" }, + "CloudwatchLogsExportConfiguration":{ + "type":"structure", + "members":{ + "EnableLogTypes":{ + "shape":"LogTypeList", + "documentation":"

The list of log types to enable.

" + }, + "DisableLogTypes":{ + "shape":"LogTypeList", + "documentation":"

The list of log types to disable.

" + } + }, + "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

" + }, "CopyDBClusterParameterGroupMessage":{ "type":"structure", "required":[ @@ -2492,6 +2506,10 @@ "PerformanceInsightsKMSKeyId":{ "shape":"String", "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

" + }, + "EnableCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

The list of log types that need to be enabled for exporting to CloudWatch Logs.

" } }, "documentation":"

" @@ -2525,7 +2543,7 @@ }, "MultiAZ":{ "shape":"BooleanOptional", - "documentation":"

Specifies whether the read replica is in a Multi-AZ deployment.

" + "documentation":"

Specifies whether the read replica is in a Multi-AZ deployment.

You can create a Read Replica as a Multi-AZ DB instance. RDS creates a standby of your replica in another Availability Zone for failover support for the replica. Creating your Read Replica as a Multi-AZ DB instance is independent of whether the source database is a Multi-AZ DB instance.

Currently PostgreSQL Read Replicas can only be created as single-AZ DB instances.

" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -2583,6 +2601,10 @@ "PerformanceInsightsKMSKeyId":{ "shape":"String", "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

" + }, + "EnableCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

The list of logs that the new DB instance is to export to CloudWatch Logs.

" } } }, @@ -3382,6 +3404,14 @@ "SupportedTimezones":{ "shape":"SupportedTimezonesList", "documentation":"

A list of the time zones supported by this engine for the Timezone parameter of the CreateDBInstance action.

" + }, + "ExportableLogTypes":{ + "shape":"LogTypeList", + "documentation":"

The types of logs that the database engine has available for export to CloudWatch Logs.

" + }, + "SupportsLogExportsToCloudwatchLogs":{ + "shape":"Boolean", + "documentation":"

A value that indicates whether the engine version supports exporting the log types specified by ExportableLogTypes to CloudWatch Logs.

" } }, "documentation":"

This data type is used as a response element in the action DescribeDBEngineVersions.

" @@ -3613,6 +3643,10 @@ "PerformanceInsightsKMSKeyId":{ "shape":"String", "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

" + }, + "EnabledCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

A list of log types that this DB instance is configured to export to CloudWatch Logs.

" } }, "documentation":"

Contains the details of an Amazon RDS DB instance.

This data type is used as a response element in the DescribeDBInstances action.

", @@ -5944,6 +5978,10 @@ }, "documentation":"

" }, + "LogTypeList":{ + "type":"list", + "member":{"shape":"String"} + }, "Long":{"type":"long"}, "ModifyDBClusterMessage":{ "type":"structure", @@ -6199,6 +6237,10 @@ "PerformanceInsightsKMSKeyId":{ "shape":"String", "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS encryption key.

" + }, + "CloudwatchLogsExportConfiguration":{ + "shape":"CloudwatchLogsExportConfiguration", + "documentation":"

The configuration setting for the log types to be enabled for export to CloudWatch Logs for a specific DB instance or DB cluster.

" } }, "documentation":"

" @@ -6959,6 +7001,20 @@ "locationName":"Parameter" } }, + "PendingCloudwatchLogsExports":{ + "type":"structure", + "members":{ + "LogTypesToEnable":{ + "shape":"LogTypeList", + "documentation":"

Log types that are in the process of being deactivated. After they are deactivated, these log types aren't exported to CloudWatch Logs.

" + }, + "LogTypesToDisable":{ + "shape":"LogTypeList", + "documentation":"

Log types that are in the process of being enabled. After they are enabled, these log types are exported to CloudWatch Logs.

" + } + }, + "documentation":"

A list of the log types whose configuration is still pending. In other words, these log types are in the process of being activated or deactivated.

" + }, "PendingMaintenanceAction":{ "type":"structure", "members":{ @@ -7071,7 +7127,8 @@ "DBSubnetGroupName":{ "shape":"String", "documentation":"

The new DB subnet group for the DB instance.

" - } + }, + "PendingCloudwatchLogsExports":{"shape":"PendingCloudwatchLogsExports"} }, "documentation":"

This data type is used as a response element in the ModifyDBInstance action.

" }, @@ -7923,6 +7980,10 @@ "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

You can enable IAM database authentication for the following database engines

Default: false

" + }, + "EnableCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs.

" } }, "documentation":"

" @@ -8096,6 +8157,10 @@ "PerformanceInsightsKMSKeyId":{ "shape":"String", "documentation":"

The AWS KMS key identifier for encryption of Performance Insights data. The KMS key ID is the Amazon Resource Name (ARN), the KMS key identifier, or the KMS key alias for the KMS encryption key.

" + }, + "EnableCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs.

" } } }, @@ -8114,7 +8179,7 @@ "members":{ "SourceDBInstanceIdentifier":{ "shape":"String", - "documentation":"

The identifier of the source DB instance from which to restore.

Constraints:

" + "documentation":"

The identifier of the source DB instance from which to restore.

Constraints:

" }, "TargetDBInstanceIdentifier":{ "shape":"String", @@ -8204,6 +8269,10 @@ "EnableIAMDatabaseAuthentication":{ "shape":"BooleanOptional", "documentation":"

True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.

You can enable IAM database authentication for the following database engines

Default: false

" + }, + "EnableCloudwatchLogsExports":{ + "shape":"LogTypeList", + "documentation":"

The list of logs that the restored DB instance is to export to CloudWatch Logs.

" } }, "documentation":"

" diff --git a/botocore/data/rekognition/2016-06-27/paginators-1.json b/botocore/data/rekognition/2016-06-27/paginators-1.json index ba06a2cf..ea009a1b 100644 --- a/botocore/data/rekognition/2016-06-27/paginators-1.json +++ b/botocore/data/rekognition/2016-06-27/paginators-1.json @@ -17,6 +17,12 @@ "non_aggregate_keys": [ "FaceModelVersion" ] + }, + "ListStreamProcessors": { + "result_key": "StreamProcessors", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/botocore/data/resource-groups/2017-11-27/paginators-1.json b/botocore/data/resource-groups/2017-11-27/paginators-1.json index ea142457..a0ddf9b4 100644 --- a/botocore/data/resource-groups/2017-11-27/paginators-1.json +++ b/botocore/data/resource-groups/2017-11-27/paginators-1.json @@ -1,3 +1,22 @@ { - "pagination": {} + "pagination": { + "ListGroups": { + "result_key": "Groups", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "SearchResources": { + "result_key": "ResourceIdentifiers", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListGroupResources": { + "result_key": "ResourceIdentifiers", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + } + } } diff --git a/botocore/data/sagemaker/2017-07-24/paginators-1.json b/botocore/data/sagemaker/2017-07-24/paginators-1.json index ea142457..f6d33286 100644 --- a/botocore/data/sagemaker/2017-07-24/paginators-1.json +++ b/botocore/data/sagemaker/2017-07-24/paginators-1.json @@ -1,3 +1,40 @@ { - "pagination": {} + "pagination": { + "ListTrainingJobs": { + "result_key": "TrainingJobSummaries", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListEndpoints": { + "result_key": "Endpoints", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListEndpointConfigs": { + "result_key": "EndpointConfigs", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListNotebookInstances": { + "result_key": "NotebookInstances", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListTags": { + "result_key": "Tags", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListModels": { + "result_key": "Models", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + } + } } diff --git a/botocore/data/sagemaker/2017-07-24/service-2.json b/botocore/data/sagemaker/2017-07-24/service-2.json index 8755c75b..7842c9eb 100644 --- a/botocore/data/sagemaker/2017-07-24/service-2.json +++ b/botocore/data/sagemaker/2017-07-24/service-2.json @@ -315,7 +315,7 @@ "errors":[ {"shape":"ResourceLimitExceeded"} ], - "documentation":"

Updates variant weight, capacity, or both of one or more variants associated with an endpoint. This operation updates weight, capacity, or both for the previously provisioned endpoint. When it receives the request, Amazon SageMaker sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

" + "documentation":"

Updates variant weight of one or more variants associated with an existing endpoint, or capacity of one variant associated with an existing endpoint. When it receives the request, Amazon SageMaker sets the endpoint status to Updating. After updating the endpoint, it sets the status to InService. To check the status of an endpoint, use the DescribeEndpoint API.

" }, "UpdateNotebookInstance":{ "name":"UpdateNotebookInstance", @@ -473,6 +473,10 @@ "Tags":{ "shape":"TagList", "documentation":"

An array of key-value pairs. For more information, see Using Cost Allocation Tags in the AWS Billing and Cost Management User Guide.

" + }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance that hosts the endpoint.

" } } }, @@ -788,6 +792,10 @@ "shape":"ProductionVariantList", "documentation":"

An array of ProductionVariant objects, one for each model that you want to host at this endpoint.

" }, + "KmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

AWS KMS key ID Amazon SageMaker uses to encrypt data when storing it on the ML storage volume attached to the instance.

" + }, "CreationTime":{ "shape":"Timestamp", "documentation":"

A timestamp that shows when the endpoint configuration was created.

" @@ -1860,6 +1868,10 @@ "VolumeSizeInGB":{ "shape":"VolumeSizeInGB", "documentation":"

The size of the ML storage volume that you want to provision.

ML storage volumes store model artifacts and incremental states. Training algorithms might also use the ML storage volume for scratch space. If you want to store the training data in the ML storage volume, choose File as the TrainingInputMode in the algorithm specification.

You must specify sufficient ML storage for your scenario.

Amazon SageMaker supports only the General Purpose SSD (gp2) ML storage volume type.

" + }, + "VolumeKmsKeyId":{ + "shape":"KmsKeyId", + "documentation":"

The Amazon Resource Name (ARN) of a AWS Key Management Service key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the training job.

" } }, "documentation":"

Describes the resources, including ML compute instances and ML storage volumes, to use for model training.

" diff --git a/botocore/data/servicecatalog/2015-12-10/paginators-1.json b/botocore/data/servicecatalog/2015-12-10/paginators-1.json index ea142457..bb399615 100644 --- a/botocore/data/servicecatalog/2015-12-10/paginators-1.json +++ b/botocore/data/servicecatalog/2015-12-10/paginators-1.json @@ -1,3 +1,58 @@ { - "pagination": {} + "pagination": { + "SearchProductsAsAdmin": { + "result_key": "ProductViewDetails", + "output_token": "NextPageToken", + "input_token": "PageToken", + "limit_key": "PageSize" + }, + "ListAcceptedPortfolioShares": { + "result_key": "PortfolioDetails", + "output_token": "NextPageToken", + "input_token": "PageToken", + "limit_key": "PageSize" + }, + "ListPortfolios": { + "result_key": "PortfolioDetails", + "output_token": "NextPageToken", + "input_token": "PageToken", + "limit_key": "PageSize" + }, + "ListConstraintsForPortfolio": { + "result_key": "ConstraintDetails", + "output_token": "NextPageToken", + "input_token": "PageToken", + "limit_key": "PageSize" + }, + "ListLaunchPaths": { + "result_key": "LaunchPathSummaries", + "output_token": "NextPageToken", + "input_token": "PageToken", + "limit_key": "PageSize" + }, + "ListTagOptions": { + "result_key": "TagOptionDetails", + "output_token": "PageToken", + "input_token": "PageToken", + "limit_key": "PageSize" + }, + "ListPortfoliosForProduct": { + "result_key": "PortfolioDetails", + "output_token": "NextPageToken", + "input_token": "PageToken", + "limit_key": "PageSize" + }, + "ListPrincipalsForPortfolio": { + "result_key": "Principals", + "output_token": "NextPageToken", + "input_token": "PageToken", + "limit_key": "PageSize" + }, + "ListResourcesForTagOption": { + "result_key": "ResourceDetails", + "output_token": "PageToken", + "input_token": "PageToken", + "limit_key": "PageSize" + } + } } diff --git a/botocore/data/servicediscovery/2017-03-14/paginators-1.json b/botocore/data/servicediscovery/2017-03-14/paginators-1.json index ea142457..f58df70e 100644 --- a/botocore/data/servicediscovery/2017-03-14/paginators-1.json +++ b/botocore/data/servicediscovery/2017-03-14/paginators-1.json @@ -1,3 +1,28 @@ { - "pagination": {} + "pagination": { + "ListServices": { + "result_key": "Services", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListInstances": { + "result_key": "Instances", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListNamespaces": { + "result_key": "Namespaces", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListOperations": { + "result_key": "Operations", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + } + } } diff --git a/botocore/data/ses/2010-12-01/paginators-1.json b/botocore/data/ses/2010-12-01/paginators-1.json index af0e93d1..1d61869e 100644 --- a/botocore/data/ses/2010-12-01/paginators-1.json +++ b/botocore/data/ses/2010-12-01/paginators-1.json @@ -5,6 +5,12 @@ "output_token": "NextToken", "limit_key": "MaxItems", "result_key": "Identities" + }, + "ListCustomVerificationEmailTemplates": { + "result_key": "CustomVerificationEmailTemplates", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/botocore/data/ssm/2014-11-06/paginators-1.json b/botocore/data/ssm/2014-11-06/paginators-1.json index e3b92f76..483ca416 100644 --- a/botocore/data/ssm/2014-11-06/paginators-1.json +++ b/botocore/data/ssm/2014-11-06/paginators-1.json @@ -41,6 +41,24 @@ "output_token": "NextToken", "limit_key": "MaxResults", "result_key": "Parameters" + }, + "GetParametersByPath": { + "input_token": "NextToken", + "output_token": "NextToken", + "limit_key": "MaxResults", + "result_key": "Parameters" + }, + "GetParameterHistory": { + "result_key": "Parameters", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "GetParametersByPath": { + "result_key": "Parameters", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" } } } diff --git a/botocore/data/transcribe/2017-10-26/paginators-1.json b/botocore/data/transcribe/2017-10-26/paginators-1.json new file mode 100644 index 00000000..ea142457 --- /dev/null +++ b/botocore/data/transcribe/2017-10-26/paginators-1.json @@ -0,0 +1,3 @@ +{ + "pagination": {} +} diff --git a/botocore/data/transcribe/2017-10-26/service-2.json b/botocore/data/transcribe/2017-10-26/service-2.json new file mode 100644 index 00000000..c9377447 --- /dev/null +++ b/botocore/data/transcribe/2017-10-26/service-2.json @@ -0,0 +1,353 @@ +{ + "version":"2.0", + "metadata":{ + "apiVersion":"2017-10-26", + "endpointPrefix":"transcribe", + "jsonVersion":"1.1", + "protocol":"json", + "serviceFullName":"Amazon Transcribe Service", + "signatureVersion":"v4", + "signingName":"transcribe", + "targetPrefix":"Transcribe", + "uid":"transcribe-2017-10-26" + }, + "operations":{ + "GetTranscriptionJob":{ + "name":"GetTranscriptionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetTranscriptionJobRequest"}, + "output":{"shape":"GetTranscriptionJobResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"NotFoundException"} + ], + "documentation":"

Returns information about a transcription job. To see the status of the job, check the Status field. If the status is COMPLETE, the job is finished and you can find the results at the location specified in the TranscriptionFileUri field.

" + }, + "ListTranscriptionJobs":{ + "name":"ListTranscriptionJobs", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"ListTranscriptionJobsRequest"}, + "output":{"shape":"ListTranscriptionJobsResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"} + ], + "documentation":"

Lists transcription jobs with the specified status.

" + }, + "StartTranscriptionJob":{ + "name":"StartTranscriptionJob", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"StartTranscriptionJobRequest"}, + "output":{"shape":"StartTranscriptionJobResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"ConflictException"} + ], + "documentation":"

Starts an asynchronous job to transcribe speech to text.

" + } + }, + "shapes":{ + "BadRequestException":{ + "type":"structure", + "members":{ + "Message":{"shape":"FailureReason"} + }, + "documentation":"

There is a problem with one of the input fields. Check the S3 bucket name, make sure that the job name is not a duplicate, and confirm that you are using the correct file format. Then resend your request.

", + "exception":true + }, + "ConflictException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

The JobName field is a duplicate of a previously entered job name. Resend your request with a different name.

", + "exception":true + }, + "DateTime":{"type":"timestamp"}, + "FailureReason":{"type":"string"}, + "GetTranscriptionJobRequest":{ + "type":"structure", + "required":["TranscriptionJobName"], + "members":{ + "TranscriptionJobName":{ + "shape":"TranscriptionJobName", + "documentation":"

The name of the job.

" + } + } + }, + "GetTranscriptionJobResponse":{ + "type":"structure", + "members":{ + "TranscriptionJob":{ + "shape":"TranscriptionJob", + "documentation":"

An object that contains the results of the transcription job.

" + } + } + }, + "InternalFailureException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

There was an internal error. Check the error message and try your request again.

", + "exception":true, + "fault":true + }, + "LanguageCode":{ + "type":"string", + "enum":[ + "en-US", + "es-US" + ] + }, + "LimitExceededException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

Either you have sent too many requests or your input file is longer than 2 hours. Wait before you resend your request, or use a smaller file and resend the request.

", + "exception":true + }, + "ListTranscriptionJobsRequest":{ + "type":"structure", + "required":["Status"], + "members":{ + "Status":{ + "shape":"TranscriptionJobStatus", + "documentation":"

When specified, returns only transcription jobs with the specified status.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

If the result of the previous request to ListTranscriptionJobs was truncated, include the NextToken to fetch the next set of jobs.

" + }, + "MaxResults":{ + "shape":"MaxResults", + "documentation":"

The maximum number of jobs to return in the response.

" + } + } + }, + "ListTranscriptionJobsResponse":{ + "type":"structure", + "members":{ + "Status":{ + "shape":"TranscriptionJobStatus", + "documentation":"

The requested status of the jobs returned.

" + }, + "NextToken":{ + "shape":"NextToken", + "documentation":"

The ListTranscriptionJobs operation returns a page of jobs at a time. The size of the page is set by the MaxResults parameter. If there are more jobs in the list than the page size, Amazon Transcribe returns the NextPage token. Include the token in the next request to the ListTranscriptionJobs operation to return in the next page of jobs.

" + }, + "TranscriptionJobSummaries":{ + "shape":"TranscriptionJobSummaries", + "documentation":"

A list of objects containing summary information for a transcription job.

" + } + } + }, + "MaxResults":{ + "type":"integer", + "max":100, + "min":1 + }, + "Media":{ + "type":"structure", + "members":{ + "MediaFileUri":{ + "shape":"Uri", + "documentation":"

The S3 location of the input media file. The general form is:

https://<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>

For example:

https://s3-us-west-2.amazonaws.com/examplebucket/example.mp4

https://s3-us-west-2.amazonaws.com/examplebucket/mediadocs/example.mp4

" + } + }, + "documentation":"

Describes the input media file in a transcription request.

" + }, + "MediaFormat":{ + "type":"string", + "enum":[ + "mp3", + "mp4", + "wav", + "flac" + ] + }, + "MediaSampleRateHertz":{ + "type":"integer", + "max":48000, + "min":8000 + }, + "NextToken":{ + "type":"string", + "max":8192 + }, + "NotFoundException":{ + "type":"structure", + "members":{ + "Message":{"shape":"String"} + }, + "documentation":"

We can't find the requested job. Check the job name and try your request again.

", + "exception":true + }, + "StartTranscriptionJobRequest":{ + "type":"structure", + "required":[ + "TranscriptionJobName", + "LanguageCode", + "MediaFormat", + "Media" + ], + "members":{ + "TranscriptionJobName":{ + "shape":"TranscriptionJobName", + "documentation":"

The name of the job. The name must be unique within an AWS account.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code for the language used in the input media file.

" + }, + "MediaSampleRateHertz":{ + "shape":"MediaSampleRateHertz", + "documentation":"

The sample rate, in Hertz, of the audio track in the input media file.

" + }, + "MediaFormat":{ + "shape":"MediaFormat", + "documentation":"

The format of the input media file.

" + }, + "Media":{ + "shape":"Media", + "documentation":"

An object that describes the input media for a transcription job.

" + } + } + }, + "StartTranscriptionJobResponse":{ + "type":"structure", + "members":{ + "TranscriptionJob":{ + "shape":"TranscriptionJob", + "documentation":"

An object containing details of the asynchronous transcription job.

" + } + } + }, + "String":{"type":"string"}, + "Transcript":{ + "type":"structure", + "members":{ + "TranscriptFileUri":{ + "shape":"Uri", + "documentation":"

The S3 location where the transcription result is stored. The general form of this Uri is:

https://<aws-region>.amazonaws.com/<bucket-name>/<keyprefix>/<objectkey>

For example:

https://s3-us-west-2.amazonaws.com/examplebucket/example.json

https://s3-us-west-2.amazonaws.com/examplebucket/mediadocs/example.json

" + } + }, + "documentation":"

Describes the output of a transcription job.

" + }, + "TranscriptionJob":{ + "type":"structure", + "members":{ + "TranscriptionJobName":{ + "shape":"TranscriptionJobName", + "documentation":"

A name to identify the transcription job.

" + }, + "TranscriptionJobStatus":{ + "shape":"TranscriptionJobStatus", + "documentation":"

The identifier assigned to the job when it was created.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code for the input speech.

" + }, + "MediaSampleRateHertz":{ + "shape":"MediaSampleRateHertz", + "documentation":"

The sample rate, in Hertz, of the audio track in the input media file.

" + }, + "MediaFormat":{ + "shape":"MediaFormat", + "documentation":"

The format of the input media file.

" + }, + "Media":{ + "shape":"Media", + "documentation":"

An object that describes the input media for a transcription job.

" + }, + "Transcript":{ + "shape":"Transcript", + "documentation":"

An object that describes the output of the transcription job.

" + }, + "CreationTime":{ + "shape":"DateTime", + "documentation":"

Timestamp of the date and time that the job was created.

" + }, + "CompletionTime":{ + "shape":"DateTime", + "documentation":"

Timestamp of the date and time that the job completed.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the TranscriptionJobStatus field is FAILED, this field contains information about why the job failed.

" + } + }, + "documentation":"

Describes an asynchronous transcription job that was created with the StartTranscriptionJob operation.

" + }, + "TranscriptionJobName":{ + "type":"string", + "max":200, + "min":1, + "pattern":"^[0-9a-zA-Z._-]+" + }, + "TranscriptionJobStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "FAILED", + "COMPLETED" + ] + }, + "TranscriptionJobSummaries":{ + "type":"list", + "member":{"shape":"TranscriptionJobSummary"} + }, + "TranscriptionJobSummary":{ + "type":"structure", + "members":{ + "TranscriptionJobName":{ + "shape":"TranscriptionJobName", + "documentation":"

The name assigned to the transcription job when it was created.

" + }, + "CreationTime":{ + "shape":"DateTime", + "documentation":"

Timestamp of the date and time that the job was created.

" + }, + "CompletionTime":{ + "shape":"DateTime", + "documentation":"

Timestamp of the date and time that the job completed.

" + }, + "LanguageCode":{ + "shape":"LanguageCode", + "documentation":"

The language code for the input speech.

" + }, + "TranscriptionJobStatus":{ + "shape":"TranscriptionJobStatus", + "documentation":"

The status of the transcription job. When the status is COMPLETED, use the GetTranscriptionJob operation to get the results of the transcription.

" + }, + "FailureReason":{ + "shape":"FailureReason", + "documentation":"

If the TranscriptionJobStatus field is FAILED, this field contains a description of the error.

" + } + }, + "documentation":"

Provides a summary of information about a transcription job.

" + }, + "Uri":{ + "type":"string", + "max":2000, + "min":1 + } + }, + "documentation":"

Operations and objects for transcribing speech to text.

" +} diff --git a/botocore/data/workmail/2017-10-01/paginators-1.json b/botocore/data/workmail/2017-10-01/paginators-1.json index ea142457..d5de8253 100644 --- a/botocore/data/workmail/2017-10-01/paginators-1.json +++ b/botocore/data/workmail/2017-10-01/paginators-1.json @@ -1,3 +1,40 @@ { - "pagination": {} + "pagination": { + "ListUsers": { + "result_key": "Users", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListGroupMembers": { + "result_key": "Members", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListOrganizations": { + "result_key": "OrganizationSummaries", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListGroups": { + "result_key": "Groups", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListResources": { + "result_key": "Resources", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + }, + "ListAliases": { + "result_key": "Aliases", + "output_token": "NextToken", + "input_token": "NextToken", + "limit_key": "MaxResults" + } + } } diff --git a/botocore/docs/bcdoc/style.py b/botocore/docs/bcdoc/style.py index 1a448e64..f385c705 100644 --- a/botocore/docs/bcdoc/style.py +++ b/botocore/docs/bcdoc/style.py @@ -402,3 +402,9 @@ class ReSTStyle(BaseStyle): self.doc.write('`%s <%s>`_' % (title, link)) else: self.doc.write(title) + + def internal_link(self, title, page): + if self.doc.target == 'html': + self.doc.write(':doc:`%s <%s>`' % (title, page)) + else: + self.doc.write(title) diff --git a/botocore/handlers.py b/botocore/handlers.py index 65f2eb8b..a5e2c469 100644 --- a/botocore/handlers.py +++ b/botocore/handlers.py @@ -25,7 +25,7 @@ import warnings import uuid from botocore.compat import unquote, json, six, unquote_str, \ - ensure_bytes, get_md5, MD5_AVAILABLE + ensure_bytes, get_md5, MD5_AVAILABLE, OrderedDict from botocore.docs.utils import AutoPopulatedParam from botocore.docs.utils import HideParamFromOperations from botocore.docs.utils import AppendParamDocumentation @@ -171,7 +171,8 @@ def decode_quoted_jsondoc(value): def json_decode_template_body(parsed, **kwargs): if 'TemplateBody' in parsed: try: - value = json.loads(parsed['TemplateBody']) + value = json.loads( + parsed['TemplateBody'], object_pairs_hook=OrderedDict) parsed['TemplateBody'] = value except (ValueError, TypeError): logger.debug('error loading JSON', exc_info=True) diff --git a/botocore/signers.py b/botocore/signers.py index 64a9411d..79ac7315 100644 --- a/botocore/signers.py +++ b/botocore/signers.py @@ -131,7 +131,9 @@ class RequestSigner(object): 'before-sign.{0}.{1}'.format(self._service_name, operation_name), request=request, signing_name=signing_name, region_name=self._region_name, - signature_version=signature_version, request_signer=self) + signature_version=signature_version, request_signer=self, + operation_name=operation_name + ) if signature_version != botocore.UNSIGNED: kwargs = { @@ -551,8 +553,13 @@ def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600, """ client_method = ClientMethod params = Params + if params is None: + params = {} expires_in = ExpiresIn http_method = HttpMethod + context = { + 'is_presign_request': True + } request_signer = self._request_signer serializer = self._serializer @@ -565,6 +572,8 @@ def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600, operation_model = self.meta.service_model.operation_model( operation_name) + params = self._emit_api_params(params, operation_model, context) + # Create a request dict based on the params to serialize. request_dict = serializer.serialize_to_request( params, operation_model) @@ -575,8 +584,7 @@ def generate_presigned_url(self, ClientMethod, Params=None, ExpiresIn=3600, # Prepare the request dict by including the client's endpoint url. prepare_request_dict( - request_dict, endpoint_url=self.meta.endpoint_url - ) + request_dict, endpoint_url=self.meta.endpoint_url, context=context) # Generate the presigned url. return request_signer.generate_presigned_url( diff --git a/botocore/stub.py b/botocore/stub.py index 7b7b386a..68f5a167 100644 --- a/botocore/stub.py +++ b/botocore/stub.py @@ -331,12 +331,14 @@ class Stubber(object): operation_name=model.name, reason='Operation mismatch: found response for %s.' % name) - def _get_response_handler(self, model, params, **kwargs): + def _get_response_handler(self, model, params, context, **kwargs): self._assert_expected_call_order(model, params) # Pop off the entire response once everything has been validated return self._queue.popleft()['response'] - def _assert_expected_params(self, model, params, **kwargs): + def _assert_expected_params(self, model, params, context, **kwargs): + if self._should_not_stub(context): + return self._assert_expected_call_order(model, params) expected_params = self._queue[0]['expected_params'] if expected_params is None: @@ -357,6 +359,13 @@ class Stubber(object): reason='Expected parameters:\n%s,\nbut received:\n%s' % ( pformat(expected_params), pformat(params))) + def _should_not_stub(self, context): + # Do not include presign requests when processing stubbed client calls + # as a presign request will never have an HTTP request sent over the + # wire for it and therefore not receive a response back. + if context and context.get('is_presign_request'): + return True + def _validate_response(self, operation_name, service_response): service_model = self.client.meta.service_model operation_model = service_model.operation_model(operation_name) diff --git a/docs/source/conf.py b/docs/source/conf.py index 9f899b40..5034be62 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -54,7 +54,7 @@ copyright = u'2013, Mitch Garnaat' # The short X.Y version. version = '1.8.' # The full version, including alpha/beta/rc tags. -release = '1.8.28' +release = '1.8.36' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/tests/functional/test_credentials.py b/tests/functional/test_credentials.py index 37ea1404..ef9908d0 100644 --- a/tests/functional/test_credentials.py +++ b/tests/functional/test_credentials.py @@ -321,6 +321,20 @@ class TestAssumeRole(BaseEnvVar): session, _ = self.create_session(profile='A') session.get_credentials() + def test_misconfigured_source_profile(self): + config = ( + '[profile A]\n' + 'role_arn = arn:aws:iam::123456789:role/RoleA\n' + 'source_profile = B\n' + '[profile B]\n' + 'credential_process = command\n' + ) + self.write_config(config) + + with self.assertRaises(InvalidConfigError): + session, _ = self.create_session(profile='A') + session.get_credentials() + def test_recursive_assume_role(self): config = ( '[profile A]\n' diff --git a/tests/functional/test_endpoints.py b/tests/functional/test_endpoints.py index 025d21ea..804f47b8 100644 --- a/tests/functional/test_endpoints.py +++ b/tests/functional/test_endpoints.py @@ -20,6 +20,7 @@ from botocore.session import get_session SERVICE_RENAMES = { 'application-autoscaling': 'autoscaling', 'appstream': 'appstream2', + 'autoscaling-plans': 'autoscaling', 'dynamodbstreams': 'streams.dynamodb', 'cloudwatch': 'monitoring', 'efs': 'elasticfilesystem', diff --git a/tests/functional/test_s3.py b/tests/functional/test_s3.py index d0b2e2b2..84ac25b9 100644 --- a/tests/functional/test_s3.py +++ b/tests/functional/test_s3.py @@ -350,6 +350,49 @@ class TestGeneratePresigned(BaseS3OperationTest): self.assertEqual( 'https://s3.us-east-2.amazonaws.com/', url) + def test_presign_url_with_ssec(self): + config = Config(signature_version='s3') + client = self.session.create_client('s3', 'us-east-1', config=config) + url = client.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'mybucket', + 'Key': 'mykey', + 'SSECustomerKey': 'a' * 32, + 'SSECustomerAlgorithm': 'AES256' + } + ) + # The md5 of the sse-c key will be injected when parameters are + # built so it should show up in the presigned url as well. + self.assertIn( + 'x-amz-server-side-encryption-customer-key-md5=', url + ) + + def test_presign_s3_accelerate(self): + config = Config(signature_version=botocore.UNSIGNED, + s3={'use_accelerate_endpoint': True}) + client = self.session.create_client('s3', 'us-east-1', config=config) + url = client.generate_presigned_url( + ClientMethod='get_object', + Params={'Bucket': 'mybucket', 'Key': 'mykey'} + ) + # The url should be the accelerate endpoint + self.assertEqual( + 'https://mybucket.s3-accelerate.amazonaws.com/mykey', url) + + def test_presign_post_s3_accelerate(self): + config = Config(signature_version=botocore.UNSIGNED, + s3={'use_accelerate_endpoint': True}) + client = self.session.create_client('s3', 'us-east-1', config=config) + parts = client.generate_presigned_post( + Bucket='mybucket', Key='mykey') + # The url should be the accelerate endpoint + expected = { + 'fields': {'key': 'mykey'}, + 'url': 'https://mybucket.s3-accelerate.amazonaws.com/' + } + self.assertEqual(parts, expected) + def test_correct_url_used_for_s3(): # Test that given various sets of config options and bucket names, diff --git a/tests/functional/test_stub.py b/tests/functional/test_stub.py index 48a93ac1..f44b722b 100644 --- a/tests/functional/test_stub.py +++ b/tests/functional/test_stub.py @@ -28,9 +28,12 @@ import botocore.translate class TestStubber(unittest.TestCase): def setUp(self): session = botocore.session.get_session() - config = botocore.config.Config(signature_version=botocore.UNSIGNED) - self.client = session.create_client('s3', config=config) - + config = botocore.config.Config( + signature_version=botocore.UNSIGNED, + s3={'addressing_style': 'path'} + ) + self.client = session.create_client( + 's3', region_name='us-east-1', config=config) self.stubber = Stubber(self.client) def test_stubber_returns_response(self): @@ -270,3 +273,43 @@ class TestStubber(unittest.TestCase): except StubAssertionError: self.fail( "Stubber inappropriately raised error for same parameters.") + + def test_no_stub_for_presign_url(self): + try: + with self.stubber: + url = self.client.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'mybucket', + 'Key': 'mykey' + } + ) + self.assertEqual( + url, 'https://s3.amazonaws.com/mybucket/mykey') + except StubResponseError: + self.fail( + 'Stubbed responses should not be required for generating ' + 'presigned requests' + ) + + def test_can_stub_with_presign_url_mixed_in(self): + desired_response = {} + expected_params = { + 'Bucket': 'mybucket', + 'Prefix': 'myprefix', + } + self.stubber.add_response( + 'list_objects', desired_response, expected_params) + with self.stubber: + url = self.client.generate_presigned_url( + ClientMethod='get_object', + Params={ + 'Bucket': 'myotherbucket', + 'Key': 'myotherkey' + } + ) + self.assertEqual( + url, 'https://s3.amazonaws.com/myotherbucket/myotherkey') + actual_response = self.client.list_objects(**expected_params) + self.assertEqual(desired_response, actual_response) + self.stubber.assert_no_pending_responses() diff --git a/tests/unit/docs/bcdoc/test_style.py b/tests/unit/docs/bcdoc/test_style.py index 273106fd..4355b7cb 100644 --- a/tests/unit/docs/bcdoc/test_style.py +++ b/tests/unit/docs/bcdoc/test_style.py @@ -317,9 +317,23 @@ class TestStyle(unittest.TestCase): self.assertEqual(style.doc.getvalue(), six.b('`MyLink `_')) - def test_external_link_in_man_page(self): style = ReSTStyle(ReSTDocument()) style.doc.target = 'man' style.external_link('MyLink', 'http://example.com/foo') self.assertEqual(style.doc.getvalue(), six.b('MyLink')) + + def test_internal_link(self): + style = ReSTStyle(ReSTDocument()) + style.doc.target = 'html' + style.internal_link('MyLink', '/index') + self.assertEqual( + style.doc.getvalue(), + six.b(':doc:`MyLink `') + ) + + def test_internal_link_in_man_page(self): + style = ReSTStyle(ReSTDocument()) + style.doc.target = 'man' + style.internal_link('MyLink', '/index') + self.assertEqual(style.doc.getvalue(), six.b('MyLink')) diff --git a/tests/unit/test_handlers.py b/tests/unit/test_handlers.py index d6164cef..8f461917 100644 --- a/tests/unit/test_handlers.py +++ b/tests/unit/test_handlers.py @@ -17,9 +17,11 @@ import base64 import mock import copy import os +import json import botocore import botocore.session +from botocore.compat import OrderedDict from botocore.exceptions import ParamValidationError, MD5UnavailableError from botocore.exceptions import AliasConflictParameterError from botocore.awsrequest import AWSRequest @@ -575,6 +577,24 @@ class TestHandlers(BaseSessionTest): # an error response. self.assertEqual(original, handler_input) + def test_does_decode_template_body_in_order(self): + expected_ordering = OrderedDict([ + ('TemplateVersion', 1.0), + ('APropertyOfSomeKind', 'a value'), + ('list', [1, 2, 3]), + ('nested', OrderedDict([('key', 'value'), + ('foo', 'bar')])) + ]) + template_string = json.dumps(expected_ordering) + parsed_response = {'TemplateBody': template_string} + + handlers.json_decode_template_body(parsed=parsed_response) + result = parsed_response['TemplateBody'] + + self.assertTrue(isinstance(result, OrderedDict)) + for element, expected_element in zip(result, expected_ordering): + self.assertEqual(element, expected_element) + def test_decode_json_policy(self): parsed = { 'Document': '{"foo": "foobarbaz"}', @@ -710,8 +730,6 @@ class TestHandlers(BaseSessionTest): handlers.switch_host_with_param(request, 'PredictEndpoint') self.assertEqual(request.url, new_endpoint) - - def test_invalid_char_in_bucket_raises_exception(self): params = { 'Bucket': 'bad/bucket/name', diff --git a/tests/unit/test_signers.py b/tests/unit/test_signers.py index 3a4db0dc..c0ad4d9a 100644 --- a/tests/unit/test_signers.py +++ b/tests/unit/test_signers.py @@ -22,6 +22,7 @@ import botocore.auth from botocore.config import Config from botocore.credentials import Credentials from botocore.credentials import ReadOnlyCredentials +from botocore.hooks import HierarchicalEmitter from botocore.exceptions import NoRegionError, UnknownSignatureVersionError from botocore.exceptions import UnknownClientMethodError, ParamValidationError from botocore.exceptions import UnsupportedSignatureVersionError @@ -128,7 +129,7 @@ class TestSigner(BaseSignerTest): 'before-sign.service_name.operation_name', request=mock.ANY, signing_name='signing_name', region_name='region_name', signature_version='v4', - request_signer=self.signer) + request_signer=self.signer, operation_name='operation_name') def test_disable_signing(self): # Returning botocore.UNSIGNED from choose-signer disables signing! @@ -727,7 +728,11 @@ class TestGenerateUrl(unittest.TestCase): 'query_string': {}, 'url_path': u'/mybucket/mykey', 'method': u'GET', - 'context': {}} + # mock.ANY is used because client parameter related events + # inject values into the context. So using the context's exact + # value for these tests will be a maintenance burden if + # anymore customizations are added that inject into the context. + 'context': mock.ANY} self.generate_url_mock.assert_called_with( request_dict=ref_request_dict, expires_in=3600, operation_name='GetObject') @@ -748,7 +753,7 @@ class TestGenerateUrl(unittest.TestCase): 'query_string': {u'response-content-disposition': disposition}, 'url_path': u'/mybucket/mykey', 'method': u'GET', - 'context': {}} + 'context': mock.ANY} self.generate_url_mock.assert_called_with( request_dict=ref_request_dict, expires_in=3600, operation_name='GetObject') @@ -772,7 +777,7 @@ class TestGenerateUrl(unittest.TestCase): 'query_string': {}, 'url_path': u'/mybucket/mykey', 'method': u'GET', - 'context': {}} + 'context': mock.ANY} self.generate_url_mock.assert_called_with( request_dict=ref_request_dict, expires_in=20, operation_name='GetObject') @@ -788,11 +793,44 @@ class TestGenerateUrl(unittest.TestCase): 'query_string': {}, 'url_path': u'/mybucket/mykey', 'method': u'PUT', - 'context': {}} + 'context': mock.ANY} self.generate_url_mock.assert_called_with( request_dict=ref_request_dict, expires_in=3600, operation_name='GetObject') + def test_generate_presigned_url_emits_param_events(self): + emitter = mock.Mock(HierarchicalEmitter) + emitter.emit.return_value = [] + self.client.meta.events = emitter + self.client.generate_presigned_url( + 'get_object', Params={'Bucket': self.bucket, 'Key': self.key}) + events_emitted = [ + emit_call[0][0] for emit_call in emitter.emit.call_args_list + ] + self.assertEqual( + events_emitted, + [ + 'provide-client-params.s3.GetObject', + 'before-parameter-build.s3.GetObject' + ] + ) + + def test_generate_presign_url_emits_is_presign_in_context(self): + emitter = mock.Mock(HierarchicalEmitter) + emitter.emit.return_value = [] + self.client.meta.events = emitter + self.client.generate_presigned_url( + 'get_object', Params={'Bucket': self.bucket, 'Key': self.key}) + kwargs_emitted = [ + emit_call[1] for emit_call in emitter.emit.call_args_list + ] + for kwargs in kwargs_emitted: + self.assertTrue( + kwargs.get('context', {}).get('is_presign_request'), + 'The context did not have is_presign_request set to True for ' + 'the following kwargs emitted: %s' % kwargs + ) + class TestGeneratePresignedPost(unittest.TestCase): def setUp(self):