diff --git a/PKG-INFO b/PKG-INFO index cb68c695..97ba406a 100644 --- a/PKG-INFO +++ b/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.8.40 +Version: 1.8.48 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/PKG-INFO b/botocore.egg-info/PKG-INFO index cb68c695..97ba406a 100644 --- a/botocore.egg-info/PKG-INFO +++ b/botocore.egg-info/PKG-INFO @@ -1,6 +1,6 @@ Metadata-Version: 1.1 Name: botocore -Version: 1.8.40 +Version: 1.8.48 Summary: Low-level, data-driven core of boto 3. Home-page: https://github.com/boto/botocore Author: Amazon Web Services diff --git a/botocore.egg-info/SOURCES.txt b/botocore.egg-info/SOURCES.txt index 9f26a743..417cd9fe 100644 --- a/botocore.egg-info/SOURCES.txt +++ b/botocore.egg-info/SOURCES.txt @@ -409,6 +409,7 @@ botocore/data/sagemaker/2017-07-24/service-2.json botocore/data/sagemaker/2017-07-24/waiters-2.json botocore/data/sdb/2009-04-15/paginators-1.json botocore/data/sdb/2009-04-15/service-2.json +botocore/data/serverlessrepo/2017-09-08/paginators-1.json botocore/data/serverlessrepo/2017-09-08/service-2.json botocore/data/servicecatalog/2015-12-10/examples-1.json botocore/data/servicecatalog/2015-12-10/paginators-1.json diff --git a/botocore/__init__.py b/botocore/__init__.py index df1e0840..de13b218 100644 --- a/botocore/__init__.py +++ b/botocore/__init__.py @@ -16,7 +16,7 @@ import os import re import logging -__version__ = '1.8.40' +__version__ = '1.8.48' class NullHandler(logging.Handler): diff --git a/botocore/client.py b/botocore/client.py index f1fca3fd..18b0243f 100644 --- a/botocore/client.py +++ b/botocore/client.py @@ -132,7 +132,9 @@ class ClientCreator(object): return S3RegionRedirector(endpoint_bridge, client).register() self._set_s3_addressing_style( - endpoint_url, client.meta.config.s3, client.meta.events) + endpoint_url, client.meta.config.s3, client.meta.events, + client.meta.partition + ) # Enable accelerate if the configuration is set to to true or the # endpoint being used matches one of the accelerate endpoints. if self._is_s3_accelerate(endpoint_url, client.meta.config.s3): @@ -144,14 +146,15 @@ class ClientCreator(object): self._set_s3_presign_signature_version( client.meta, client_config, scoped_config) - def _set_s3_addressing_style(self, endpoint_url, s3_config, event_emitter): + def _set_s3_addressing_style(self, endpoint_url, s3_config, event_emitter, + partition): if s3_config is None: s3_config = {} addressing_style = self._get_s3_addressing_style( endpoint_url, s3_config) handler = self._get_s3_addressing_handler( - endpoint_url, s3_config, addressing_style) + endpoint_url, s3_config, addressing_style, partition) if handler is not None: event_emitter.register('before-sign.s3', handler) @@ -168,7 +171,7 @@ class ClientCreator(object): return configured_addressing_style def _get_s3_addressing_handler(self, endpoint_url, s3_config, - addressing_style): + addressing_style, partition): # If virtual host style was configured, use it regardless of whether # or not the bucket looks dns compatible. if addressing_style == 'virtual': @@ -188,7 +191,11 @@ class ClientCreator(object): # For dual stack mode, we need to clear the default endpoint url in # order to use the existing netloc if the bucket is dns compatible. - if s3_config.get('use_dualstack_endpoint', False): + # Also, the default_endpoint_url of 's3.amazonaws.com' only works + # if we're in the 'aws' partition. Anywhere else we should + # just use the existing netloc. + if s3_config.get('use_dualstack_endpoint', False) or \ + partition != 'aws': return functools.partial( fix_s3_host, default_endpoint_url=None) diff --git a/botocore/data/appsync/2017-07-25/service-2.json b/botocore/data/appsync/2017-07-25/service-2.json index d68d7d09..0601454c 100644 --- a/botocore/data/appsync/2017-07-25/service-2.json +++ b/botocore/data/appsync/2017-07-25/service-2.json @@ -27,7 +27,8 @@ {"shape":"UnauthorizedException"}, {"shape":"LimitExceededException"}, {"shape":"InternalFailureException"}, - {"shape":"ApiKeyLimitExceededException"} + {"shape":"ApiKeyLimitExceededException"}, + {"shape":"ApiKeyValidityOutOfBoundsException"} ], "documentation":"
Creates a unique key that you can distribute to clients who are executing your API.
" }, @@ -377,6 +378,24 @@ ], "documentation":"Adds a new schema to your GraphQL API.
This operation is asynchronous. Use to determine when it has completed.
" }, + "UpdateApiKey":{ + "name":"UpdateApiKey", + "http":{ + "method":"POST", + "requestUri":"/v1/apis/{apiId}/apikeys/{id}" + }, + "input":{"shape":"UpdateApiKeyRequest"}, + "output":{"shape":"UpdateApiKeyResponse"}, + "errors":[ + {"shape":"BadRequestException"}, + {"shape":"NotFoundException"}, + {"shape":"UnauthorizedException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"ApiKeyValidityOutOfBoundsException"} + ], + "documentation":"Updates an API key.
" + }, "UpdateDataSource":{ "name":"UpdateDataSource", "http":{ @@ -459,7 +478,7 @@ }, "expires":{ "shape":"Long", - "documentation":"The time when the API key expires.
" + "documentation":"The time after which the API key expires. The date is represented as seconds since the epoch, rounded down to the nearest hour.
" } }, "documentation":"Describes an API key.
" @@ -473,6 +492,15 @@ "error":{"httpStatusCode":400}, "exception":true }, + "ApiKeyValidityOutOfBoundsException":{ + "type":"structure", + "members":{ + "message":{"shape":"String"} + }, + "documentation":"The API key expiration must be set to a value between 1 and 365 days.
", + "error":{"httpStatusCode":400}, + "exception":true + }, "ApiKeys":{ "type":"list", "member":{"shape":"ApiKey"} @@ -527,6 +555,10 @@ "description":{ "shape":"String", "documentation":"A description of the purpose of the API key.
" + }, + "expires":{ + "shape":"Long", + "documentation":"The time after which the API key expires. The date is represented as seconds since the epoch, rounded down to the nearest hour. The default value for this parameter is 7 days from creation time.
" } } }, @@ -600,7 +632,7 @@ ], "members":{ "name":{ - "shape":"ResourceName", + "shape":"String", "documentation":"A user-supplied name for the GraphqlApi
.
The mapping template to be used for requests.
A resolver use a request mapping template to convert a GraphQL expression into a format that a data source can understand. Mapping templates are written in Apache Velocity Template Language (VTL).
" + "documentation":"The mapping template to be used for requests.
A resolver uses a request mapping template to convert a GraphQL expression into a format that a data source can understand. Mapping templates are written in Apache Velocity Template Language (VTL).
" }, "responseMappingTemplate":{ "shape":"MappingTemplate", @@ -721,7 +753,7 @@ }, "type":{ "shape":"DataSourceType", - "documentation":"The type of the data source.
" + "documentation":"The type of the data source.
AMAZON_DYNAMODB: The data source is an Amazon DynamoDB table.
AMAZON_ELASTICSEARCH: The data source is an Amazon Elasticsearch Service domain.
AWS_LAMBDA: The data source is an AWS Lambda function.
NONE: There is no data source. This type is used when the required information can be computed on the fly without connecting to a back-end data source.
The ID for the GraphQL API
", + "location":"uri", + "locationName":"apiId" + }, + "id":{ + "shape":"String", + "documentation":"The API key ID.
", + "location":"uri", + "locationName":"id" + }, + "description":{ + "shape":"String", + "documentation":"A description of the purpose of the API key.
" + }, + "expires":{ + "shape":"Long", + "documentation":"The time after which the API key expires. The date is represented as seconds since the epoch.
" + } + } + }, + "UpdateApiKeyResponse":{ + "type":"structure", + "members":{ + "apiKey":{ + "shape":"ApiKey", + "documentation":"The API key.
" + } + } + }, "UpdateDataSourceRequest":{ "type":"structure", "required":[ @@ -1606,7 +1677,7 @@ "locationName":"apiId" }, "name":{ - "shape":"ResourceName", + "shape":"String", "documentation":"The new name for the GraphqlApi
object.
The udpated GraphqlApi
object.
The updated GraphqlApi
object.
Attaches one or more EC2 instances to the specified Auto Scaling group.
When you attach instances, Auto Scaling increases the desired capacity of the group by the number of instances being attached. If the number of instances being attached plus the desired capacity of the group exceeds the maximum size of the group, the operation fails.
If there is a Classic Load Balancer attached to your Auto Scaling group, the instances are also registered with the load balancer. If there are target groups attached to your Auto Scaling group, the instances are also registered with the target groups.
For more information, see Attach EC2 Instances to Your Auto Scaling Group in the Auto Scaling User Guide.
" }, @@ -34,7 +35,8 @@ "resultWrapper":"AttachLoadBalancerTargetGroupsResult" }, "errors":[ - {"shape":"ResourceContentionFault"} + {"shape":"ResourceContentionFault"}, + {"shape":"ServiceLinkedRoleFailure"} ], "documentation":"Attaches one or more target groups to the specified Auto Scaling group.
To describe the target groups for an Auto Scaling group, use DescribeLoadBalancerTargetGroups. To detach the target group from the Auto Scaling group, use DetachLoadBalancerTargetGroups.
For more information, see Attach a Load Balancer to Your Auto Scaling Group in the Auto Scaling User Guide.
" }, @@ -50,7 +52,8 @@ "resultWrapper":"AttachLoadBalancersResult" }, "errors":[ - {"shape":"ResourceContentionFault"} + {"shape":"ResourceContentionFault"}, + {"shape":"ServiceLinkedRoleFailure"} ], "documentation":"Attaches one or more Classic Load Balancers to the specified Auto Scaling group.
To attach an Application Load Balancer instead, see AttachLoadBalancerTargetGroups.
To describe the load balancers for an Auto Scaling group, use DescribeLoadBalancers. To detach the load balancer from the Auto Scaling group, use DetachLoadBalancers.
For more information, see Attach a Load Balancer to Your Auto Scaling Group in the Auto Scaling User Guide.
" }, @@ -80,9 +83,10 @@ "errors":[ {"shape":"AlreadyExistsFault"}, {"shape":"LimitExceededFault"}, - {"shape":"ResourceContentionFault"} + {"shape":"ResourceContentionFault"}, + {"shape":"ServiceLinkedRoleFailure"} ], - "documentation":"Creates an Auto Scaling group with the specified name and attributes.
If you exceed your maximum limit of Auto Scaling groups, which by default is 20 per region, the call fails. For information about viewing and updating this limit, see DescribeAccountLimits.
For more information, see Auto Scaling Groups in the Auto Scaling User Guide.
" + "documentation":"Creates an Auto Scaling group with the specified name and attributes.
If you exceed your maximum limit of Auto Scaling groups, the call fails. For information about viewing this limit, see DescribeAccountLimits. For information about updating this limit, see Auto Scaling Limits in the Auto Scaling User Guide.
For more information, see Auto Scaling Groups in the Auto Scaling User Guide.
" }, "CreateLaunchConfiguration":{ "name":"CreateLaunchConfiguration", @@ -96,7 +100,7 @@ {"shape":"LimitExceededFault"}, {"shape":"ResourceContentionFault"} ], - "documentation":"Creates a launch configuration.
If you exceed your maximum limit of launch configurations, which by default is 100 per region, the call fails. For information about viewing and updating this limit, see DescribeAccountLimits.
For more information, see Launch Configurations in the Auto Scaling User Guide.
" + "documentation":"Creates a launch configuration.
If you exceed your maximum limit of launch configurations, the call fails. For information about viewing this limit, see DescribeAccountLimits. For information about updating this limit, see Auto Scaling Limits in the Auto Scaling User Guide.
For more information, see Launch Configurations in the Auto Scaling User Guide.
" }, "CreateOrUpdateTags":{ "name":"CreateOrUpdateTags", @@ -176,7 +180,8 @@ }, "input":{"shape":"DeletePolicyType"}, "errors":[ - {"shape":"ResourceContentionFault"} + {"shape":"ResourceContentionFault"}, + {"shape":"ServiceLinkedRoleFailure"} ], "documentation":"Deletes the specified Auto Scaling policy.
Deleting a policy deletes the underlying alarm action, but does not delete the alarm, even if it no longer has an associated action.
" }, @@ -218,7 +223,7 @@ "errors":[ {"shape":"ResourceContentionFault"} ], - "documentation":"Describes the current Auto Scaling resource limits for your AWS account.
For information about requesting an increase in these limits, see AWS Service Limits in the Amazon Web Services General Reference.
" + "documentation":"Describes the current Auto Scaling resource limits for your AWS account.
For information about requesting an increase in these limits, see Auto Scaling Limits in the Auto Scaling User Guide.
" }, "DescribeAdjustmentTypes":{ "name":"DescribeAdjustmentTypes", @@ -409,7 +414,8 @@ }, "errors":[ {"shape":"InvalidNextToken"}, - {"shape":"ResourceContentionFault"} + {"shape":"ResourceContentionFault"}, + {"shape":"ServiceLinkedRoleFailure"} ], "documentation":"Describes the policies for the specified Auto Scaling group.
" }, @@ -637,7 +643,8 @@ "input":{"shape":"PutNotificationConfigurationType"}, "errors":[ {"shape":"LimitExceededFault"}, - {"shape":"ResourceContentionFault"} + {"shape":"ResourceContentionFault"}, + {"shape":"ServiceLinkedRoleFailure"} ], "documentation":"Configures an Auto Scaling group to send notifications when specified events take place. Subscribers to the specified topic can have messages delivered to an endpoint such as a web server or an email address.
This configuration overwrites any existing configuration.
For more information see Getting SNS Notifications When Your Auto Scaling Group Scales in the Auto Scaling User Guide.
" }, @@ -654,7 +661,8 @@ }, "errors":[ {"shape":"LimitExceededFault"}, - {"shape":"ResourceContentionFault"} + {"shape":"ResourceContentionFault"}, + {"shape":"ServiceLinkedRoleFailure"} ], "documentation":"Creates or updates a policy for an Auto Scaling group. To update an existing policy, use the existing policy name and set the parameters you want to change. Any existing parameter not changed in an update to an existing policy is not changed in this update request.
If you exceed your maximum limit of step adjustments, which by default is 20 per region, the call fails. For information about updating this limit, see AWS Service Limits in the Amazon Web Services General Reference.
" }, @@ -782,7 +790,8 @@ "input":{"shape":"UpdateAutoScalingGroupType"}, "errors":[ {"shape":"ScalingActivityInProgressFault"}, - {"shape":"ResourceContentionFault"} + {"shape":"ResourceContentionFault"}, + {"shape":"ServiceLinkedRoleFailure"} ], "documentation":"Updates the configuration for the specified Auto Scaling group.
The new settings take effect on any scaling activities after this call returns. Scaling activities that are currently in progress aren't affected.
To update an Auto Scaling group with a launch configuration with InstanceMonitoring
set to false
, you must first disable the collection of group metrics. Otherwise, you will get an error. If you have previously enabled the collection of group metrics, you can disable it using DisableMetricsCollection.
Note the following:
If you specify a new value for MinSize
without specifying a value for DesiredCapacity
, and the new MinSize
is larger than the current size of the group, we implicitly call SetDesiredCapacity to set the size of the group to the new value of MinSize
.
If you specify a new value for MaxSize
without specifying a value for DesiredCapacity
, and the new MaxSize
is smaller than the current size of the group, we implicitly call SetDesiredCapacity to set the size of the group to the new value of MaxSize
.
All other optional parameters are left unchanged if not specified.
One or more instance IDs.
" + "documentation":"The IDs of the instances. You can specify up to 20 instances.
" }, "AutoScalingGroupName":{ "shape":"ResourceName", @@ -959,7 +968,7 @@ }, "TargetGroupARNs":{ "shape":"TargetGroupARNs", - "documentation":"The Amazon Resource Names (ARN) of the target groups.
" + "documentation":"The Amazon Resource Names (ARN) of the target groups. You can specify up to 10 target groups.
" } } }, @@ -981,7 +990,7 @@ }, "LoadBalancerNames":{ "shape":"LoadBalancerNames", - "documentation":"One or more load balancer names.
" + "documentation":"The names of the load balancers. You can specify up to 10 load balancers.
" } } }, @@ -1089,6 +1098,10 @@ "NewInstancesProtectedFromScaleIn":{ "shape":"InstanceProtected", "documentation":"Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.
" + }, + "ServiceLinkedRoleARN":{ + "shape":"ResourceName", + "documentation":"The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf.
" } }, "documentation":"Describes an Auto Scaling group.
" @@ -1374,6 +1387,10 @@ "Tags":{ "shape":"Tags", "documentation":"One or more tags.
For more information, see Tagging Auto Scaling Groups and Instances in the Auto Scaling User Guide.
" + }, + "ServiceLinkedRoleARN":{ + "shape":"ResourceName", + "documentation":"The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf. By default, Auto Scaling uses a service-linked role named AWSServiceRoleForAutoScaling, which it creates if it does not exist.
" } } }, @@ -1447,7 +1464,7 @@ }, "AssociatePublicIpAddress":{ "shape":"AssociatePublicIpAddress", - "documentation":"Used for groups that launch instances into a virtual private cloud (VPC). Specifies whether to assign a public IP address to each instance. For more information, see Launching Auto Scaling Instances in a VPC in the Auto Scaling User Guide.
If you specify this parameter, be sure to specify at least one subnet when you create your group.
Default: If the instance is launched into a default subnet, the default is true
. If the instance is launched into a nondefault subnet, the default is false
. For more information, see Supported Platforms in the Amazon Elastic Compute Cloud User Guide.
Used for groups that launch instances into a virtual private cloud (VPC). Specifies whether to assign a public IP address to each instance. For more information, see Launching Auto Scaling Instances in a VPC in the Auto Scaling User Guide.
If you specify this parameter, be sure to specify at least one subnet when you create your group.
Default: If the instance is launched into a default subnet, the default is to assign a public IP address. If the instance is launched into a nondefault subnet, the default is not to assign a public IP address.
" }, "PlacementTenancy":{ "shape":"XmlStringMaxLen64", @@ -1629,7 +1646,7 @@ }, "MaxRecords":{ "shape":"MaxRecords", - "documentation":"The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.
" + "documentation":"The maximum number of items to return with this call. The default value is 50 and the maximum value is 50.
" }, "NextToken":{ "shape":"XmlString", @@ -1692,7 +1709,7 @@ }, "MaxRecords":{ "shape":"MaxRecords", - "documentation":"The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.
" + "documentation":"The maximum number of items to return with this call. The default value is 100 and the maximum value is 100.
" } } }, @@ -1723,7 +1740,7 @@ }, "MaxRecords":{ "shape":"MaxRecords", - "documentation":"The maximum number of items to return with this call. The default value is 50 and the maximum value is 100.
" + "documentation":"The maximum number of items to return with this call. The default value is 100 and the maximum value is 100.
" } } }, @@ -1822,7 +1839,7 @@ }, "MaxRecords":{ "shape":"MaxRecords", - "documentation":"The maximum number of items to return with this call. The default value is 100.
" + "documentation":"The maximum number of items to return with this call. The default value is 100 and the maximum value is 100.
" }, "NextToken":{ "shape":"XmlString", @@ -1903,7 +1920,7 @@ "members":{ "InstanceIds":{ "shape":"InstanceIds", - "documentation":"One or more instance IDs.
" + "documentation":"The IDs of the instances. You can specify up to 20 instances.
" }, "AutoScalingGroupName":{ "shape":"ResourceName", @@ -1911,7 +1928,7 @@ }, "ShouldDecrementDesiredCapacity":{ "shape":"ShouldDecrementDesiredCapacity", - "documentation":"If True
, the Auto Scaling group decrements the desired capacity value by the number of instances detached.
Indicates whether the Auto Scaling group decrements the desired capacity value by the number of instances detached.
" } } }, @@ -1933,7 +1950,7 @@ }, "TargetGroupARNs":{ "shape":"TargetGroupARNs", - "documentation":"The Amazon Resource Names (ARN) of the target groups.
" + "documentation":"The Amazon Resource Names (ARN) of the target groups. You can specify up to 10 target groups.
" } } }, @@ -1955,7 +1972,7 @@ }, "LoadBalancerNames":{ "shape":"LoadBalancerNames", - "documentation":"One or more load balancer names.
" + "documentation":"The names of the load balancers. You can specify up to 10 load balancers.
" } } }, @@ -1991,7 +2008,7 @@ }, "DeleteOnTermination":{ "shape":"BlockDeviceEbsDeleteOnTermination", - "documentation":"Indicates whether the volume is deleted on instance termination.
Default: true
Indicates whether the volume is deleted on instance termination. The default is true
.
One or more instances to move into Standby
mode. You must specify at least one instance ID.
The IDs of the instances. You can specify up to 20 instances.
" }, "AutoScalingGroupName":{ "shape":"ResourceName", @@ -2070,7 +2087,7 @@ }, "ShouldDecrementDesiredCapacity":{ "shape":"ShouldDecrementDesiredCapacity", - "documentation":"Specifies whether the instances moved to Standby
mode count as part of the Auto Scaling group's desired capacity. If set, the desired capacity for the Auto Scaling group decrements by the number of instances moved to Standby
mode.
Indicates whether to decrement the desired capacity of the Auto Scaling group by the number of instances moved to Standby
mode.
If this parameter is true, Auto Scaling waits for the cooldown period to complete before executing the policy. Otherwise, Auto Scaling executes the policy without waiting for the cooldown period to complete.
This parameter is not supported if the policy type is StepScaling
.
For more information, see Auto Scaling Cooldowns in the Auto Scaling User Guide.
" + "documentation":"Indicates whether Auto Scaling waits for the cooldown period to complete before executing the policy.
This parameter is not supported if the policy type is StepScaling
.
For more information, see Auto Scaling Cooldowns in the Auto Scaling User Guide.
" }, "MetricValue":{ "shape":"MetricScale", @@ -2116,7 +2133,7 @@ "members":{ "InstanceIds":{ "shape":"InstanceIds", - "documentation":"One or more instance IDs. You must specify at least one instance ID.
" + "documentation":"The IDs of the instances. You can specify up to 20 instances.
" }, "AutoScalingGroupName":{ "shape":"ResourceName", @@ -2379,7 +2396,7 @@ }, "Version":{ "shape":"XmlStringMaxLen255", - "documentation":"The version number. By default, the default version of the launch template is used.
" + "documentation":"The version number, $Latest
, or $Default
. If the value is $Latest
, Auto Scaling selects the latest version of the launch template when launching instances. If the value is $Default
, Auto Scaling selects the default version of the launch template when launching instances. The default value is $Default
.
Describes a launch template.
" @@ -3181,6 +3198,19 @@ "type":"list", "member":{"shape":"XmlString"} }, + "ServiceLinkedRoleFailure":{ + "type":"structure", + "members":{ + "message":{"shape":"XmlStringMaxLen255"} + }, + "documentation":"The service-linked role is not yet ready for use.
", + "error":{ + "code":"ServiceLinkedRoleFailure", + "httpStatusCode":500, + "senderFault":true + }, + "exception":true + }, "SetDesiredCapacityType":{ "type":"structure", "required":[ @@ -3198,7 +3228,7 @@ }, "HonorCooldown":{ "shape":"HonorCooldown", - "documentation":"By default, SetDesiredCapacity
overrides any cooldown period associated with the Auto Scaling group. Specify True
to make Auto Scaling to wait for the cool-down period associated with the Auto Scaling group to complete before initiating a scaling activity to set your Auto Scaling group to its new capacity.
Indicates whether Auto Scaling waits for the cooldown period to complete before initiating a scaling activity to set your Auto Scaling group to its new capacity. By default, Auto Scaling does not honor the cooldown period during manual scaling activities.
" } } }, @@ -3406,7 +3436,7 @@ }, "DisableScaleIn":{ "shape":"DisableScaleIn", - "documentation":"Indicates whether scale in by the target tracking policy is disabled. If the value is true
, scale in is disabled and the target tracking policy won't remove instances from the Auto Scaling group. Otherwise, scale in is enabled and the target tracking policy can remove instances from the Auto Scaling group. The default value is false
.
Indicates whether scale in by the target tracking policy is disabled. If scale in is disabled, the target tracking policy won't remove instances from the Auto Scaling group. Otherwise, the target tracking policy can remove instances from the Auto Scaling group. The default is disabled.
" } }, "documentation":"Represents a target tracking policy configuration.
" @@ -3424,7 +3454,7 @@ }, "ShouldDecrementDesiredCapacity":{ "shape":"ShouldDecrementDesiredCapacity", - "documentation":"If true
, terminating the instance also decrements the size of the Auto Scaling group.
Indicates whether terminating the instance also decrements the size of the Auto Scaling group.
" } } }, @@ -3443,11 +3473,11 @@ }, "LaunchConfigurationName":{ "shape":"ResourceName", - "documentation":"The name of the launch configuration. You must specify either a launch configuration or a launch template.
" + "documentation":"The name of the launch configuration. If you specify a launch configuration, you can't specify a launch template.
" }, "LaunchTemplate":{ "shape":"LaunchTemplateSpecification", - "documentation":"The launch template to use to specify the updates. You must specify a launch configuration or a launch template.
" + "documentation":"The launch template to use to specify the updates. If you specify a launch template, you can't specify a launch configuration.
" }, "MinSize":{ "shape":"AutoScalingGroupMinSize", @@ -3492,6 +3522,10 @@ "NewInstancesProtectedFromScaleIn":{ "shape":"InstanceProtected", "documentation":"Indicates whether newly launched instances are protected from termination by Auto Scaling when scaling in.
" + }, + "ServiceLinkedRoleARN":{ + "shape":"ResourceName", + "documentation":"The Amazon Resource Name (ARN) of the service-linked role that the Auto Scaling group uses to call other AWS services on your behalf.
" } } }, @@ -3557,5 +3591,5 @@ "pattern":"[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" } }, - "documentation":"Auto Scaling is designed to automatically launch or terminate EC2 instances based on user-defined policies, schedules, and health checks. Use this service in conjunction with the Amazon CloudWatch and Elastic Load Balancing services.
" + "documentation":"Amazon EC2 Auto Scaling is designed to automatically launch or terminate EC2 instances based on user-defined policies, schedules, and health checks. Use this service in conjunction with the AWS Auto Scaling, Amazon CloudWatch, and Elastic Load Balancing services.
" } diff --git a/botocore/data/codecommit/2015-04-13/service-2.json b/botocore/data/codecommit/2015-04-13/service-2.json index 3093a2cd..d3c23312 100644 --- a/botocore/data/codecommit/2015-04-13/service-2.json +++ b/botocore/data/codecommit/2015-04-13/service-2.json @@ -7,6 +7,7 @@ "protocol":"json", "serviceAbbreviation":"CodeCommit", "serviceFullName":"AWS CodeCommit", + "serviceId":"CodeCommit", "signatureVersion":"v4", "targetPrefix":"CodeCommit_20150413", "uid":"codecommit-2015-04-13" @@ -642,6 +643,45 @@ "documentation":"Posts a comment in reply to an existing comment on a comparison between commits or a pull request.
", "idempotent":true }, + "PutFile":{ + "name":"PutFile", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutFileInput"}, + "output":{"shape":"PutFileOutput"}, + "errors":[ + {"shape":"RepositoryNameRequiredException"}, + {"shape":"InvalidRepositoryNameException"}, + {"shape":"RepositoryDoesNotExistException"}, + {"shape":"ParentCommitIdRequiredException"}, + {"shape":"InvalidParentCommitIdException"}, + {"shape":"ParentCommitDoesNotExistException"}, + {"shape":"ParentCommitIdOutdatedException"}, + {"shape":"FileContentRequiredException"}, + {"shape":"FileContentSizeLimitExceededException"}, + {"shape":"PathRequiredException"}, + {"shape":"InvalidPathException"}, + {"shape":"BranchNameRequiredException"}, + {"shape":"InvalidBranchNameException"}, + {"shape":"BranchDoesNotExistException"}, + {"shape":"BranchNameIsTagNameException"}, + {"shape":"InvalidFileModeException"}, + {"shape":"NameLengthExceededException"}, + {"shape":"InvalidEmailException"}, + {"shape":"CommitMessageLengthExceededException"}, + {"shape":"EncryptionIntegrityChecksFailedException"}, + {"shape":"EncryptionKeyAccessDeniedException"}, + {"shape":"EncryptionKeyDisabledException"}, + {"shape":"EncryptionKeyNotFoundException"}, + {"shape":"EncryptionKeyUnavailableException"}, + {"shape":"SameFileContentException"}, + {"shape":"FileNameConflictsWithDirectoryNameException"}, + {"shape":"DirectoryNameConflictsWithFileNameException"} + ], + "documentation":"Adds or updates a file in an AWS CodeCommit repository.
" + }, "PutRepositoryTriggers":{ "name":"PutRepositoryTriggers", "http":{ @@ -958,6 +998,13 @@ "documentation":"The specified branch name already exists.
", "exception":true }, + "BranchNameIsTagNameException":{ + "type":"structure", + "members":{ + }, + "documentation":"The specified branch name is not valid because it is a tag name. Type the name of a current branch in the repository. For a list of valid branch names, use ListBranches.
", + "exception":true + }, "BranchNameList":{ "type":"list", "member":{"shape":"BranchName"} @@ -1165,7 +1212,7 @@ }, "parents":{ "shape":"ParentList", - "documentation":"The parent list for the specified commit.
" + "documentation":"A list of parent commits for the specified commit. Each parent commit ID is the full commit ID.
" }, "message":{ "shape":"Message", @@ -1208,6 +1255,13 @@ "documentation":"A commit ID was not specified.
", "exception":true }, + "CommitMessageLengthExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"The commit message is too long. Provide a shorter string.
", + "exception":true + }, "CommitName":{"type":"string"}, "CommitRequiredException":{ "type":"structure", @@ -1444,6 +1498,13 @@ "type":"list", "member":{"shape":"Difference"} }, + "DirectoryNameConflictsWithFileNameException":{ + "type":"structure", + "members":{ + }, + "documentation":"A file cannot be added to the repository because the specified path name has the same name as a file that already exists in this repository. Either provide a different name for the file, or specify a different path for the file.
", + "exception":true + }, "Email":{"type":"string"}, "EncryptionIntegrityChecksFailedException":{ "type":"structure", @@ -1482,6 +1543,39 @@ "exception":true }, "EventDate":{"type":"timestamp"}, + "FileContent":{ + "type":"blob", + "max":6291456 + }, + "FileContentRequiredException":{ + "type":"structure", + "members":{ + }, + "documentation":"The file cannot be added because it is empty. Empty files cannot be added to the repository with this API.
", + "exception":true + }, + "FileContentSizeLimitExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"The file cannot be added because it is too large. The maximum file size that can be added using PutFile is 6 MB. For files larger than 6 MB but smaller than 2 GB, add them using a Git client.
", + "exception":true + }, + "FileModeTypeEnum":{ + "type":"string", + "enum":[ + "EXECUTABLE", + "NORMAL", + "SYMLINK" + ] + }, + "FileNameConflictsWithDirectoryNameException":{ + "type":"structure", + "members":{ + }, + "documentation":"A file cannot be added to the repository because the specified file name has the same name as a directory in this repository. Either provide another name for the file, or add the file in a directory that does not match the file name.
", + "exception":true + }, "FileTooLargeException":{ "type":"structure", "members":{ @@ -1924,6 +2018,13 @@ "documentation":"The destination commit specifier is not valid. You must provide a valid branch name, tag, or full commit ID.
", "exception":true }, + "InvalidEmailException":{ + "type":"structure", + "members":{ + }, + "documentation":"The specified email address either contains one or more characters that are not allowed, or it exceeds the maximum number of characters allowed for an email address.
", + "exception":true + }, "InvalidFileLocationException":{ "type":"structure", "members":{ @@ -1931,6 +2032,13 @@ "documentation":"The location of the file is not valid. Make sure that you include the extension of the file as well as the file name.
", "exception":true }, + "InvalidFileModeException":{ + "type":"structure", + "members":{ + }, + "documentation":"The specified file mode permission is not valid. For a list of valid file mode permissions, see PutFile.
", + "exception":true + }, "InvalidFilePositionException":{ "type":"structure", "members":{ @@ -1959,6 +2067,13 @@ "documentation":"The specified sort order is not valid.
", "exception":true }, + "InvalidParentCommitIdException":{ + "type":"structure", + "members":{ + }, + "documentation":"The parent commit ID is not valid. The commit ID cannot be empty, and must match the head commit ID for the branch of the repository where you want to add or update a file.
", + "exception":true + }, "InvalidPathException":{ "type":"structure", "members":{ @@ -2327,6 +2442,13 @@ "exception":true }, "Name":{"type":"string"}, + "NameLengthExceededException":{ + "type":"structure", + "members":{ + }, + "documentation":"The file name is not valid because it has exceeded the character limit for file names. File names, including the path to the file, cannot exceed the character limit.
", + "exception":true + }, "NextToken":{"type":"string"}, "ObjectId":{"type":"string"}, "OrderEnum":{ @@ -2336,6 +2458,27 @@ "descending" ] }, + "ParentCommitDoesNotExistException":{ + "type":"structure", + "members":{ + }, + "documentation":"The parent commit ID is not valid. The specified parent commit ID does not exist in the specified branch of the repository.
", + "exception":true + }, + "ParentCommitIdOutdatedException":{ + "type":"structure", + "members":{ + }, + "documentation":"The file could not be added because the provided parent commit ID is not the current tip of the specified branch. To view the full commit ID of the current head of the branch, use GetBranch.
", + "exception":true + }, + "ParentCommitIdRequiredException":{ + "type":"structure", + "members":{ + }, + "documentation":"A parent commit ID is required. To view the full commit ID of a branch in a repository, use GetBranch or a Git command (for example, git pull or git log).
", + "exception":true + }, "ParentList":{ "type":"list", "member":{"shape":"ObjectId"} @@ -2742,6 +2885,75 @@ "type":"list", "member":{"shape":"PullRequestTarget"} }, + "PutFileInput":{ + "type":"structure", + "required":[ + "repositoryName", + "branchName", + "fileContent", + "filePath" + ], + "members":{ + "repositoryName":{ + "shape":"RepositoryName", + "documentation":"The name of the repository where you want to add or update the file.
" + }, + "branchName":{ + "shape":"BranchName", + "documentation":"The name of the branch where you want to add or update the file.
" + }, + "fileContent":{ + "shape":"FileContent", + "documentation":"The content of the file, in binary object format.
" + }, + "filePath":{ + "shape":"Path", + "documentation":"The name of the file you want to add or update, including the relative path to the file in the repository.
If the path does not currently exist in the repository, the path will be created as part of adding the file.
The file mode permissions of the blob. Valid file mode permissions are listed below.
" + }, + "parentCommitId":{ + "shape":"CommitId", + "documentation":"The full commit ID of the head commit in the branch where you want to add or update the file. If the commit ID does not match the ID of the head commit at the time of the operation, an error will occur, and the file will not be added or updated.
" + }, + "commitMessage":{ + "shape":"Message", + "documentation":"A message about why this file was added or updated. While optional, adding a message is strongly encouraged in order to provide a more useful commit history for your repository.
" + }, + "name":{ + "shape":"Name", + "documentation":"The name of the person adding or updating the file. While optional, adding a name is strongly encouraged in order to provide a more useful commit history for your repository.
" + }, + "email":{ + "shape":"Email", + "documentation":"An email address for the person adding or updating the file.
" + } + } + }, + "PutFileOutput":{ + "type":"structure", + "required":[ + "commitId", + "blobId", + "treeId" + ], + "members":{ + "commitId":{ + "shape":"ObjectId", + "documentation":"The full SHA of the commit that contains this file change.
" + }, + "blobId":{ + "shape":"ObjectId", + "documentation":"The ID of the blob, which is its SHA-1 pointer.
" + }, + "treeId":{ + "shape":"ObjectId", + "documentation":"Tree information for the commit that contains this file change.
" + } + } + }, "PutRepositoryTriggersInput":{ "type":"structure", "required":[ @@ -3037,6 +3249,13 @@ "documentation":"The list of triggers for the repository is required but was not specified.
", "exception":true }, + "SameFileContentException":{ + "type":"structure", + "members":{ + }, + "documentation":"The file was not added or updated because the content of the file is exactly the same as the content of that file in the repository and branch that you specified.
", + "exception":true + }, "SortByEnum":{ "type":"string", "enum":[ @@ -3319,12 +3538,12 @@ }, "date":{ "shape":"Date", - "documentation":"The date when the specified commit was pushed to the repository.
" + "documentation":"The date when the specified commit was commited, in timestamp format with GMT offset.
" } }, "documentation":"Information about the user who made a specified commit.
" }, "blob":{"type":"blob"} }, - "documentation":"This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.
You can use the AWS CodeCommit API to work with the following objects:
Repositories, by calling the following:
BatchGetRepositories, which returns information about one or more repositories associated with your AWS account.
CreateRepository, which creates an AWS CodeCommit repository.
DeleteRepository, which deletes an AWS CodeCommit repository.
GetRepository, which returns information about a specified repository.
ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account.
UpdateRepositoryDescription, which sets or updates the description of the repository.
UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository will be able to access it until you send them the new HTTPS or SSH URL to use.
Branches, by calling the following:
CreateBranch, which creates a new branch in a specified repository.
DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.
GetBranch, which returns information about a specified branch.
ListBranches, which lists all branches for a specified repository.
UpdateDefaultBranch, which changes the default branch for a repository.
Information about committed code in a repository, by calling the following:
GetBlob, which returns the base-64 encoded content of an individual Git blob object within a repository.
GetCommit, which returns information about a commit, including commit messages and author and committer information.
GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID or other fully qualified reference).
Pull requests, by calling the following:
CreatePullRequest, which creates a pull request in a specified repository.
DescribePullRequestEvents, which returns information about one or more pull request events.
GetCommentsForPullRequest, which returns information about comments on a specified pull request.
GetMergeConflicts, which returns information about merge conflicts between the source and destination branch in a pull request.
GetPullRequest, which returns information about a specified pull request.
ListPullRequests, which lists all pull requests for a repository.
MergePullRequestByFastForward, which merges the source destination branch of a pull request into the specified destination branch for that pull request using the fast-forward merge option.
PostCommentForPullRequest, which posts a comment to a pull request at the specified line, file, or request.
UpdatePullRequestDescription, which updates the description of a pull request.
UpdatePullRequestStatus, which updates the status of a pull request.
UpdatePullRequestTitle, which updates the title of a pull request.
Information about comments in a repository, by calling the following:
DeleteCommentContent, which deletes the content of a comment on a commit in a repository.
GetComment, which returns information about a comment on a commit.
GetCommentsForComparedCommit, which returns information about comments on the comparison between two commit specifiers in a repository.
PostCommentForComparedCommit, which creates a comment on the comparison between two commit specifiers in a repository.
PostCommentReply, which creates a reply to a comment.
UpdateComment, which updates the content of a comment on a commit in a repository.
Triggers, by calling the following:
GetRepositoryTriggers, which returns information about triggers configured for a repository.
PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.
TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.
For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.
" + "documentation":"This is the AWS CodeCommit API Reference. This reference provides descriptions of the operations and data types for AWS CodeCommit API along with usage examples.
You can use the AWS CodeCommit API to work with the following objects:
Repositories, by calling the following:
BatchGetRepositories, which returns information about one or more repositories associated with your AWS account.
CreateRepository, which creates an AWS CodeCommit repository.
DeleteRepository, which deletes an AWS CodeCommit repository.
GetRepository, which returns information about a specified repository.
ListRepositories, which lists all AWS CodeCommit repositories associated with your AWS account.
UpdateRepositoryDescription, which sets or updates the description of the repository.
UpdateRepositoryName, which changes the name of the repository. If you change the name of a repository, no other users of that repository will be able to access it until you send them the new HTTPS or SSH URL to use.
Branches, by calling the following:
CreateBranch, which creates a new branch in a specified repository.
DeleteBranch, which deletes the specified branch in a repository unless it is the default branch.
GetBranch, which returns information about a specified branch.
ListBranches, which lists all branches for a specified repository.
UpdateDefaultBranch, which changes the default branch for a repository.
Files, by calling the following:
PutFile, which adds or modifies a file in a specified repository and branch.
Information about committed code in a repository, by calling the following:
GetBlob, which returns the base-64 encoded content of an individual Git blob object within a repository.
GetCommit, which returns information about a commit, including commit messages and author and committer information.
GetDifferences, which returns information about the differences in a valid commit specifier (such as a branch, tag, HEAD, commit ID or other fully qualified reference).
Pull requests, by calling the following:
CreatePullRequest, which creates a pull request in a specified repository.
DescribePullRequestEvents, which returns information about one or more pull request events.
GetCommentsForPullRequest, which returns information about comments on a specified pull request.
GetMergeConflicts, which returns information about merge conflicts between the source and destination branch in a pull request.
GetPullRequest, which returns information about a specified pull request.
ListPullRequests, which lists all pull requests for a repository.
MergePullRequestByFastForward, which merges the source destination branch of a pull request into the specified destination branch for that pull request using the fast-forward merge option.
PostCommentForPullRequest, which posts a comment to a pull request at the specified line, file, or request.
UpdatePullRequestDescription, which updates the description of a pull request.
UpdatePullRequestStatus, which updates the status of a pull request.
UpdatePullRequestTitle, which updates the title of a pull request.
Information about comments in a repository, by calling the following:
DeleteCommentContent, which deletes the content of a comment on a commit in a repository.
GetComment, which returns information about a comment on a commit.
GetCommentsForComparedCommit, which returns information about comments on the comparison between two commit specifiers in a repository.
PostCommentForComparedCommit, which creates a comment on the comparison between two commit specifiers in a repository.
PostCommentReply, which creates a reply to a comment.
UpdateComment, which updates the content of a comment on a commit in a repository.
Triggers, by calling the following:
GetRepositoryTriggers, which returns information about triggers configured for a repository.
PutRepositoryTriggers, which replaces all triggers for a repository and can be used to create or delete triggers.
TestRepositoryTriggers, which tests the functionality of a repository trigger by sending data to the trigger target.
For information about how to use AWS CodeCommit, see the AWS CodeCommit User Guide.
" } diff --git a/botocore/data/cognito-idp/2016-04-18/service-2.json b/botocore/data/cognito-idp/2016-04-18/service-2.json index 9bc7cda9..45a28a33 100644 --- a/botocore/data/cognito-idp/2016-04-18/service-2.json +++ b/botocore/data/cognito-idp/2016-04-18/service-2.json @@ -1160,6 +1160,20 @@ ], "documentation":"Gets the specified identity provider.
" }, + "GetSigningCertificate":{ + "name":"GetSigningCertificate", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"GetSigningCertificateRequest"}, + "output":{"shape":"GetSigningCertificateResponse"}, + "errors":[ + {"shape":"InternalErrorException"}, + {"shape":"ResourceNotFoundException"} + ], + "documentation":"This method takes a user pool ID, and returns the signing certificate.
" + }, "GetUICustomization":{ "name":"GetUICustomization", "http":{ @@ -2337,11 +2351,11 @@ }, "AuthFlow":{ "shape":"AuthFlowType", - "documentation":"The authentication flow for this call to execute. The API action will depend on this value. For example:
REFRESH_TOKEN_AUTH
will take in a valid refresh token and return new tokens.
USER_SRP_AUTH
will take in USERNAME
and SRP_A
and return the SRP variables to be used for next challenge execution.
Valid values include:
USER_SRP_AUTH
: Authentication flow for the Secure Remote Password (SRP) protocol.
REFRESH_TOKEN_AUTH
/REFRESH_TOKEN
: Authentication flow for refreshing the access token and ID token by supplying a valid refresh token.
CUSTOM_AUTH
: Custom authentication flow.
ADMIN_NO_SRP_AUTH
: Non-SRP authentication flow; you can pass in the USERNAME and PASSWORD directly if the flow is enabled for calling the app client.
The authentication flow for this call to execute. The API action will depend on this value. For example:
REFRESH_TOKEN_AUTH
will take in a valid refresh token and return new tokens.
USER_SRP_AUTH
will take in USERNAME
and SRP_A
and return the SRP variables to be used for next challenge execution.
USER_PASSWORD_AUTH
will take in USERNAME
and PASSWORD
and return the next challenge or tokens.
Valid values include:
USER_SRP_AUTH
: Authentication flow for the Secure Remote Password (SRP) protocol.
REFRESH_TOKEN_AUTH
/REFRESH_TOKEN
: Authentication flow for refreshing the access token and ID token by supplying a valid refresh token.
CUSTOM_AUTH
: Custom authentication flow.
ADMIN_NO_SRP_AUTH
: Non-SRP authentication flow; you can pass in the USERNAME and PASSWORD directly if the flow is enabled for calling the app client.
USER_PASSWORD_AUTH
: Non-SRP authentication flow; USERNAME and PASSWORD are passed directly. If a user migration Lambda trigger is set, this flow will invoke the user migration Lambda if the USERNAME is not found in the user pool.
The authentication parameters. These are inputs corresponding to the AuthFlow
that you are invoking. The required values depend on the value of AuthFlow
:
For USER_SRP_AUTH
: USERNAME
(required), SRP_A
(required), SECRET_HASH
(required if the app client is configured with a client secret), DEVICE_KEY
For REFRESH_TOKEN_AUTH/REFRESH_TOKEN
: USERNAME
(required), SECRET_HASH
(required if the app client is configured with a client secret), REFRESH_TOKEN
(required), DEVICE_KEY
For ADMIN_NO_SRP_AUTH
: USERNAME
(required), SECRET_HASH
(if app client is configured with client secret), PASSWORD
(required), DEVICE_KEY
For CUSTOM_AUTH
: USERNAME
(required), SECRET_HASH
(if app client is configured with client secret), DEVICE_KEY
The authentication parameters. These are inputs corresponding to the AuthFlow
that you are invoking. The required values depend on the value of AuthFlow
:
For USER_SRP_AUTH
: USERNAME
(required), SRP_A
(required), SECRET_HASH
(required if the app client is configured with a client secret), DEVICE_KEY
For REFRESH_TOKEN_AUTH/REFRESH_TOKEN
: REFRESH_TOKEN
(required), SECRET_HASH
(required if the app client is configured with a client secret), DEVICE_KEY
For ADMIN_NO_SRP_AUTH
: USERNAME
(required), SECRET_HASH
(if app client is configured with client secret), PASSWORD
(required), DEVICE_KEY
For CUSTOM_AUTH
: USERNAME
(required), SECRET_HASH
(if app client is configured with client secret), DEVICE_KEY
The challenge parameters. These are returned to you in the AdminInitiateAuth
response if you need to pass another challenge. The responses in this parameter should be used to compute inputs to the next call (AdminRespondToAuthChallenge
).
All challenges require USERNAME
and SECRET_HASH
(if applicable).
The value of the USER_IF_FOR_SRP
attribute will be the user's actual username, not an alias (such as email address or phone number), even if you specified an alias in your call to AdminInitiateAuth
. This is because, in the AdminRespondToAuthChallenge
API ChallengeResponses
, the USERNAME
attribute cannot be an alias.
The challenge parameters. These are returned to you in the AdminInitiateAuth
response if you need to pass another challenge. The responses in this parameter should be used to compute inputs to the next call (AdminRespondToAuthChallenge
).
All challenges require USERNAME
and SECRET_HASH
(if applicable).
The value of the USER_ID_FOR_SRP
attribute will be the user's actual username, not an alias (such as email address or phone number), even if you specified an alias in your call to AdminInitiateAuth
. This is because, in the AdminRespondToAuthChallenge
API ChallengeResponses
, the USERNAME
attribute cannot be an alias.
The user pool username.
" + "documentation":"The user pool username or an alias.
" }, "MaxResults":{ "shape":"QueryLimitType", @@ -2647,7 +2661,7 @@ }, "Username":{ "shape":"UsernameType", - "documentation":"The user pool username.
" + "documentation":"The user pool username or alias.
" }, "UserPoolId":{ "shape":"UserPoolIdType", @@ -3009,7 +3023,8 @@ "REFRESH_TOKEN_AUTH", "REFRESH_TOKEN", "CUSTOM_AUTH", - "ADMIN_NO_SRP_AUTH" + "ADMIN_NO_SRP_AUTH", + "USER_PASSWORD_AUTH" ] }, "AuthParametersType":{ @@ -3708,7 +3723,7 @@ }, "LambdaConfig":{ "shape":"LambdaConfigType", - "documentation":"The Lambda trigger configuration information for the new user pool.
" + "documentation":"The Lambda trigger configuration information for the new user pool.
In a push model, event sources (such as Amazon S3 and custom applications) need permission to invoke a function. So you will need to make an extra call to add permission for these event sources to invoke your Lambda function.
For more information on using the Lambda API to add permission, see AddPermission .
For adding permission using the AWS CLI, see add-permission .
The user pool ID.
" + } + }, + "documentation":"Request to get a signing certificate from Cognito.
" + }, + "GetSigningCertificateResponse":{ + "type":"structure", + "members":{ + "Certificate":{ + "shape":"StringType", + "documentation":"The signing certificate.
" + } + }, + "documentation":"Response from Cognito for a signing certificate request.
" + }, "GetUICustomizationRequest":{ "type":"structure", "required":["UserPoolId"], @@ -4900,11 +4937,11 @@ "members":{ "AuthFlow":{ "shape":"AuthFlowType", - "documentation":"The authentication flow for this call to execute. The API action will depend on this value. For example:
REFRESH_TOKEN_AUTH
will take in a valid refresh token and return new tokens.
USER_SRP_AUTH
will take in USERNAME
and SRP_A
and return the SRP variables to be used for next challenge execution.
Valid values include:
USER_SRP_AUTH
: Authentication flow for the Secure Remote Password (SRP) protocol.
REFRESH_TOKEN_AUTH
/REFRESH_TOKEN
: Authentication flow for refreshing the access token and ID token by supplying a valid refresh token.
CUSTOM_AUTH
: Custom authentication flow.
ADMIN_NO_SRP_AUTH
is not a valid value.
The authentication flow for this call to execute. The API action will depend on this value. For example:
REFRESH_TOKEN_AUTH
will take in a valid refresh token and return new tokens.
USER_SRP_AUTH
will take in USERNAME
and SRP_A
and return the SRP variables to be used for next challenge execution.
USER_PASSWORD_AUTH
will take in USERNAME
and PASSWORD
and return the next challenge or tokens.
Valid values include:
USER_SRP_AUTH
: Authentication flow for the Secure Remote Password (SRP) protocol.
REFRESH_TOKEN_AUTH
/REFRESH_TOKEN
: Authentication flow for refreshing the access token and ID token by supplying a valid refresh token.
CUSTOM_AUTH
: Custom authentication flow.
USER_PASSWORD_AUTH
: Non-SRP authentication flow; USERNAME and PASSWORD are passed directly. If a user migration Lambda trigger is set, this flow will invoke the user migration Lambda if the USERNAME is not found in the user pool.
ADMIN_NO_SRP_AUTH
is not a valid value.
The authentication parameters. These are inputs corresponding to the AuthFlow
that you are invoking. The required values depend on the value of AuthFlow
:
For USER_SRP_AUTH
: USERNAME
(required), SRP_A
(required), SECRET_HASH
(required if the app client is configured with a client secret), DEVICE_KEY
For REFRESH_TOKEN_AUTH/REFRESH_TOKEN
: USERNAME
(required), SECRET_HASH
(required if the app client is configured with a client secret), REFRESH_TOKEN
(required), DEVICE_KEY
For CUSTOM_AUTH
: USERNAME
(required), SECRET_HASH
(if app client is configured with client secret), DEVICE_KEY
The authentication parameters. These are inputs corresponding to the AuthFlow
that you are invoking. The required values depend on the value of AuthFlow
:
For USER_SRP_AUTH
: USERNAME
(required), SRP_A
(required), SECRET_HASH
(required if the app client is configured with a client secret), DEVICE_KEY
For REFRESH_TOKEN_AUTH/REFRESH_TOKEN
: REFRESH_TOKEN
(required), SECRET_HASH
(required if the app client is configured with a client secret), DEVICE_KEY
For CUSTOM_AUTH
: USERNAME
(required), SECRET_HASH
(if app client is configured with client secret), DEVICE_KEY
A Lambda trigger that is invoked before token generation.
" + }, + "UserMigration":{ + "shape":"ArnType", + "documentation":"The user migration Lambda config type.
" } }, "documentation":"Specifies the configuration for AWS Lambda triggers.
" diff --git a/botocore/data/config/2014-11-12/service-2.json b/botocore/data/config/2014-11-12/service-2.json index bcc7772b..399f029f 100644 --- a/botocore/data/config/2014-11-12/service-2.json +++ b/botocore/data/config/2014-11-12/service-2.json @@ -809,10 +809,11 @@ "ConfigurationItemStatus":{ "type":"string", "enum":[ - "Ok", - "Failed", - "Discovered", - "Deleted" + "OK", + "ResourceDiscovered", + "ResourceNotRecorded", + "ResourceDeleted", + "ResourceDeletedNotRecorded" ] }, "ConfigurationRecorder":{ diff --git a/botocore/data/ec2/2016-11-15/service-2.json b/botocore/data/ec2/2016-11-15/service-2.json index 3b366a1b..8555aebb 100644 --- a/botocore/data/ec2/2016-11-15/service-2.json +++ b/botocore/data/ec2/2016-11-15/service-2.json @@ -561,7 +561,7 @@ }, "input":{"shape":"CreateSnapshotRequest"}, "output":{"shape":"Snapshot"}, - "documentation":"Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.
When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.
You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending
.
To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.
Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.
For more information, see Amazon Elastic Block Store and Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.
" + "documentation":"Creates a snapshot of an EBS volume and stores it in Amazon S3. You can use snapshots for backups, to make copies of EBS volumes, and to save data before shutting down an instance.
When a snapshot is created, any AWS Marketplace product codes that are associated with the source volume are propagated to the snapshot.
You can take a snapshot of an attached volume that is in use. However, snapshots only capture data that has been written to your EBS volume at the time the snapshot command is issued; this may exclude any data that has been cached by any applications or the operating system. If you can pause any file systems on the volume long enough to take a snapshot, your snapshot should be complete. However, if you cannot pause all file writes to the volume, you should unmount the volume from within the instance, issue the snapshot command, and then remount the volume to ensure a consistent and complete snapshot. You may remount and use your volume while the snapshot status is pending
.
To create a snapshot for EBS volumes that serve as root devices, you should stop the instance before taking the snapshot.
Snapshots that are taken from encrypted volumes are automatically encrypted. Volumes that are created from encrypted snapshots are also automatically encrypted. Your encrypted volumes and any associated snapshots always remain protected.
You can tag your snapshots during creation. For more information, see Tagging Your Amazon EC2 Resources.
For more information, see Amazon Elastic Block Store and Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.
" }, "CreateSpotDatafeedSubscription":{ "name":"CreateSpotDatafeedSubscription", @@ -988,7 +988,7 @@ }, "input":{"shape":"DescribeAccountAttributesRequest"}, "output":{"shape":"DescribeAccountAttributesResult"}, - "documentation":"Describes attributes of your AWS account. The following are the supported account attributes:
supported-platforms
: Indicates whether your account can launch instances into EC2-Classic and EC2-VPC, or only into EC2-VPC.
default-vpc
: The ID of the default VPC for your account, or none
.
max-instances
: The maximum number of On-Demand instances that you can run.
vpc-max-security-groups-per-interface
: The maximum number of security groups that you can assign to a network interface.
max-elastic-ips
: The maximum number of Elastic IP addresses that you can allocate for use with EC2-Classic.
vpc-max-elastic-ips
: The maximum number of Elastic IP addresses that you can allocate for use with EC2-VPC.
Describes attributes of your AWS account. The following are the supported account attributes:
supported-platforms
: Indicates whether your account can launch instances into EC2-Classic and EC2-VPC, or only into EC2-VPC.
default-vpc
: The ID of the default VPC for your account, or none
.
max-instances
: The maximum number of On-Demand Instances that you can run.
vpc-max-security-groups-per-interface
: The maximum number of security groups that you can assign to a network interface.
max-elastic-ips
: The maximum number of Elastic IP addresses that you can allocate for use with EC2-Classic.
vpc-max-elastic-ips
: The maximum number of Elastic IP addresses that you can allocate for use with EC2-VPC.
The attribute value. Note that the value is case-sensitive.
", + "documentation":"The attribute value. The value is case-sensitive.
", "locationName":"value" } }, @@ -4561,7 +4561,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when encrypting the snapshots of an image during a copy operation. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms
namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key
namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. The specified CMK must exist in the region that the snapshot is being copied to. If a KmsKeyId
is specified, the Encrypted
flag must also be set.
An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId
is specified, the Encrypted
flag must also be set.
The CMK identifier may be provided in any of the following formats:
Key ID
Key alias
ARN using key ID. The ID ARN contains the arn:aws:kms
namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key
namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
ARN using key alias. The alias ARN contains the arn:aws:kms
namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the alias
namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
AWS parses KmsKeyId
asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. This action will eventually report failure.
The specified CMK must exist in the region that the snapshot is being copied to.
", "locationName":"kmsKeyId" }, "Name":{ @@ -4618,7 +4618,7 @@ }, "KmsKeyId":{ "shape":"String", - "documentation":"The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when creating the snapshot copy. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms
namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key
namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. The specified CMK must exist in the region that the snapshot is being copied to. If a KmsKeyId
is specified, the Encrypted
flag must also be set.
An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId
is specified, the Encrypted
flag must also be set.
The CMK identifier may be provided in any of the following formats:
Key ID
Key alias
ARN using key ID. The ID ARN contains the arn:aws:kms
namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key
namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
ARN using key alias. The alias ARN contains the arn:aws:kms
namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the alias
namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
AWS parses KmsKeyId
asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. The action will eventually fail.
The ID of the EBS volume.
" }, + "TagSpecifications":{ + "shape":"TagSpecificationList", + "documentation":"The tags to apply to the snapshot during creation.
", + "locationName":"TagSpecification" + }, "DryRun":{ "shape":"Boolean", "documentation":"Checks whether you have the required permissions for the action, without actually making the request, and provides an error response. If you have the required permissions, the error response is DryRunOperation
. Otherwise, it is UnauthorizedOperation
.
The full ARN of the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. The ARN contains the arn:aws:kms
namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key
namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef. If a KmsKeyId
is specified, the Encrypted
flag must also be set.
An identifier for the AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating the encrypted volume. This parameter is only required if you want to use a non-default CMK; if this parameter is not specified, the default CMK for EBS is used. If a KmsKeyId
is specified, the Encrypted
flag must also be set.
The CMK identifier may be provided in any of the following formats:
Key ID
Key alias
ARN using key ID. The ID ARN contains the arn:aws:kms
namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the key
namespace, and then the CMK ID. For example, arn:aws:kms:us-east-1:012345678910:key/abcd1234-a123-456a-a12b-a123b4cd56ef.
ARN using key alias. The alias ARN contains the arn:aws:kms
namespace, followed by the region of the CMK, the AWS account ID of the CMK owner, the alias
namespace, and then the CMK alias. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
AWS parses KmsKeyId
asynchronously, meaning that the action you call may appear to complete even though you provided an invalid identifier. The action will eventually fail.
Indicates whether all resrouces types in the region are configured to use longer IDs. This value will only be true
if all users are configured to use longer IDs for all resources types in the region.
Indicates whether all resource types in the region are configured to use longer IDs. This value is only true
if all users are configured to use longer IDs for all resources types in the region.
ID for a user-managed CMK under which the EBS volume is encrypted.
Note: This parameter is only supported on BlockDeviceMapping
objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.
Identifier (key ID, key alias, ID ARN, or alias ARN) for a user-managed CMK under which the EBS volume is encrypted.
Note: This parameter is only supported on BlockDeviceMapping
objects called by RunInstances, RequestSpotFleet, and RequestSpotInstances.
The instance type. For more information about the instance types that you can import, see Instance Types in the VM Import/Export User Guide.
", + "documentation":"The instance type. For more information about the instance types that you can import, see Instance Types in the VM Import/Export User Guide.
", "locationName":"instanceType" }, "Monitoring":{ @@ -16780,6 +16785,7 @@ "type":"string", "enum":[ "available", + "associated", "attaching", "in-use", "detaching" @@ -23551,5 +23557,5 @@ ] } }, - "documentation":"Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity in the AWS Cloud. Using Amazon EC2 eliminates your need to invest in hardware up front, so you can develop and deploy applications faster.
" + "documentation":"Amazon Elastic Compute Cloud (Amazon EC2) provides resizable computing capacity in the AWS Cloud. Using Amazon EC2 eliminates the need to invest in hardware up front, so you can develop and deploy applications faster.
" } diff --git a/botocore/data/endpoints.json b/botocore/data/endpoints.json index 0be5b6c7..508f691a 100644 --- a/botocore/data/endpoints.json +++ b/botocore/data/endpoints.json @@ -225,6 +225,7 @@ "endpoints" : { "ap-southeast-1" : { }, "ap-southeast-2" : { }, + "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, "us-east-1" : { }, @@ -279,6 +280,11 @@ } }, "cloudhsmv2" : { + "defaults" : { + "credentialScope" : { + "service" : "cloudhsm" + } + }, "endpoints" : { "ap-northeast-1" : { }, "ap-south-1" : { }, @@ -400,6 +406,7 @@ "codestar" : { "endpoints" : { "ap-northeast-1" : { }, + "ap-northeast-2" : { }, "ap-southeast-1" : { }, "ap-southeast-2" : { }, "ca-central-1" : { }, @@ -512,6 +519,8 @@ "endpoints" : { "ap-northeast-1" : { }, "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, "eu-west-1" : { }, "sa-east-1" : { }, "us-east-1" : { }, @@ -1002,6 +1011,15 @@ "us-west-2" : { } } }, + "kinesisvideo" : { + "endpoints" : { + "ap-northeast-1" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, "kms" : { "endpoints" : { "ap-northeast-1" : { }, @@ -1084,6 +1102,41 @@ "us-east-1" : { } } }, + "mediaconvert" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-south-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-west-1" : { }, + "us-west-2" : { } + } + }, + "medialive" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-west-1" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, + "mediapackage" : { + "endpoints" : { + "ap-northeast-1" : { }, + "ap-southeast-1" : { }, + "ap-southeast-2" : { }, + "eu-central-1" : { }, + "eu-west-1" : { }, + "eu-west-3" : { }, + "us-east-1" : { }, + "us-west-2" : { } + } + }, "metering.marketplace" : { "defaults" : { "credentialScope" : { @@ -1310,24 +1363,68 @@ "hostname" : "s3.ap-northeast-1.amazonaws.com", "signatureVersions" : [ "s3", "s3v4" ] }, + "ap-northeast-1-dualstack" : { + "hostname" : "s3.dualstack.ap-northeast-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "ap-northeast-2" : { }, + "ap-northeast-2-dualstack" : { + "hostname" : "s3.dualstack.ap-northeast-2.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, + "ap-northeast-3-dualstack" : { + "hostname" : "s3.dualstack.ap-northeast-3.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "ap-south-1" : { }, + "ap-south-1-dualstack" : { + "hostname" : "s3.dualstack.ap-south-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "ap-southeast-1" : { "hostname" : "s3.ap-southeast-1.amazonaws.com", "signatureVersions" : [ "s3", "s3v4" ] }, + "ap-southeast-1-dualstack" : { + "hostname" : "s3.dualstack.ap-southeast-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "ap-southeast-2" : { "hostname" : "s3.ap-southeast-2.amazonaws.com", "signatureVersions" : [ "s3", "s3v4" ] }, + "ap-southeast-2-dualstack" : { + "hostname" : "s3.dualstack.ap-southeast-2.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "ca-central-1" : { }, + "ca-central-1-dualstack" : { + "hostname" : "s3.dualstack.ca-central-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "eu-central-1" : { }, + "eu-central-1-dualstack" : { + "hostname" : "s3.dualstack.eu-central-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "eu-west-1" : { "hostname" : "s3.eu-west-1.amazonaws.com", "signatureVersions" : [ "s3", "s3v4" ] }, + "eu-west-1-dualstack" : { + "hostname" : "s3.dualstack.eu-west-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "eu-west-2" : { }, + "eu-west-2-dualstack" : { + "hostname" : "s3.dualstack.eu-west-2.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "eu-west-3" : { }, + "eu-west-3-dualstack" : { + "hostname" : "s3.dualstack.eu-west-3.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "s3-external-1" : { "credentialScope" : { "region" : "us-east-1" @@ -1339,18 +1436,38 @@ "hostname" : "s3.sa-east-1.amazonaws.com", "signatureVersions" : [ "s3", "s3v4" ] }, + "sa-east-1-dualstack" : { + "hostname" : "s3.dualstack.sa-east-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "us-east-1" : { "hostname" : "s3.amazonaws.com", "signatureVersions" : [ "s3", "s3v4" ] }, + "us-east-1-dualstack" : { + "hostname" : "s3.dualstack.us-east-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "us-east-2" : { }, + "us-east-2-dualstack" : { + "hostname" : "s3.dualstack.us-east-2.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "us-west-1" : { "hostname" : "s3.us-west-1.amazonaws.com", "signatureVersions" : [ "s3", "s3v4" ] }, + "us-west-1-dualstack" : { + "hostname" : "s3.dualstack.us-west-1.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] + }, "us-west-2" : { "hostname" : "s3.us-west-2.amazonaws.com", "signatureVersions" : [ "s3", "s3v4" ] + }, + "us-west-2-dualstack" : { + "hostname" : "s3.dualstack.us-west-2.amazonaws.com", + "signatureVersions" : [ "s3", "s3v4" ] } }, "isRegionalized" : true, @@ -1393,6 +1510,14 @@ "us-west-2" : { } } }, + "servicediscovery" : { + "endpoints" : { + "eu-west-1" : { }, + "us-east-1" : { }, + "us-east-2" : { }, + "us-west-2" : { } + } + }, "shield" : { "defaults" : { "protocols" : [ "https" ], @@ -1412,6 +1537,7 @@ "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, + "eu-west-2" : { }, "eu-west-3" : { }, "us-east-1" : { }, "us-east-2" : { }, @@ -1424,6 +1550,7 @@ "ap-northeast-1" : { }, "ap-south-1" : { }, "ap-southeast-2" : { }, + "ca-central-1" : { }, "eu-central-1" : { }, "eu-west-1" : { }, "eu-west-2" : { }, @@ -2111,6 +2238,11 @@ "us-gov-west-1" : { } } }, + "ecr" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "ecs" : { "endpoints" : { "us-gov-west-1" : { } @@ -2140,6 +2272,11 @@ } } }, + "es" : { + "endpoints" : { + "us-gov-west-1" : { } + } + }, "events" : { "endpoints" : { "us-gov-west-1" : { } diff --git a/botocore/data/gamelift/2015-10-01/service-2.json b/botocore/data/gamelift/2015-10-01/service-2.json index b4936bde..8d085da3 100644 --- a/botocore/data/gamelift/2015-10-01/service-2.json +++ b/botocore/data/gamelift/2015-10-01/service-2.json @@ -25,7 +25,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.
When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE
. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.
To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING
, where a new game session is created for the match.
If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where all players accepted the match, the ticket status is returned to SEARCHING
to find a new match. For tickets where one or more players failed to accept the match, the ticket status is set to FAILED
, and processing is terminated. A new matchmaking request for these players can be submitted as needed.
Matchmaking-related operations include:
" + "documentation":"Registers a player's acceptance or rejection of a proposed FlexMatch match. A matchmaking configuration may require player acceptance; if so, then matches built with that configuration cannot be completed unless all players accept the proposed match within a specified time limit.
When FlexMatch builds a match, all the matchmaking tickets involved in the proposed match are placed into status REQUIRES_ACCEPTANCE
. This is a trigger for your game to get acceptance from all players in the ticket. Acceptances are only valid for tickets when they are in this status; all other acceptances result in an error.
To register acceptance, specify the ticket ID, a response, and one or more players. Once all players have registered acceptance, the matchmaking tickets advance to status PLACING
, where a new game session is created for the match.
If any player rejects the match, or if acceptances are not received before a specified timeout, the proposed match is dropped. The matchmaking tickets are then handled in one of two ways: For tickets where all players accepted the match, the ticket status is returned to SEARCHING
to find a new match. For tickets where one or more players failed to accept the match, the ticket status is set to FAILED
, and processing is terminated. A new matchmaking request for these players can be submitted as needed.
Matchmaking-related operations include:
" }, "CreateAlias":{ "name":"CreateAlias", @@ -76,7 +76,7 @@ {"shape":"InvalidRequestException"}, {"shape":"UnauthorizedException"} ], - "documentation":"Creates a new fleet to run your game servers. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can run multiple server processes to host game sessions. You configure a fleet to create instances with certain hardware specifications (see Amazon EC2 Instance Types for more information), and deploy a specified game build to each instance. A newly created fleet passes through several statuses; once it reaches the ACTIVE
status, it can begin hosting game sessions.
To create a new fleet, you must specify the following: (1) fleet name, (2) build ID of an uploaded game build, (3) an EC2 instance type, and (4) a run-time configuration that describes which server processes to run on each instance in the fleet. (Although the run-time configuration is not a required parameter, the fleet cannot be successfully activated without it.)
You can also configure the new fleet with the following settings:
Fleet description
Access permissions for inbound traffic
Fleet-wide game session protection
Resource creation limit
If you use Amazon CloudWatch for metrics, you can add the new fleet to a metric group. This allows you to view aggregated metrics for a set of fleets. Once you specify a metric group, the new fleet's metrics are included in the metric group's data.
You have the option of creating a VPC peering connection with the new fleet. For more information, see VPC Peering with Amazon GameLift Fleets.
If the CreateFleet call is successful, Amazon GameLift performs the following tasks:
Creates a fleet record and sets the status to NEW
(followed by other statuses as the fleet is activated).
Sets the fleet's target capacity to 1 (desired instances), which causes Amazon GameLift to start one new EC2 instance.
Starts launching server processes on the instance. If the fleet is configured to run multiple server processes per instance, Amazon GameLift staggers each launch by a few seconds.
Begins writing events to the fleet event log, which can be accessed in the Amazon GameLift console.
Sets the fleet's status to ACTIVE
as soon as one server process in the fleet is ready to host a game session.
Fleet-related operations include:
Describe fleets:
Update fleets:
Manage fleet capacity:
PutScalingPolicy (automatic scaling)
DescribeScalingPolicies (automatic scaling)
DeleteScalingPolicy (automatic scaling)
Creates a new fleet to run your game servers. A fleet is a set of Amazon Elastic Compute Cloud (Amazon EC2) instances, each of which can run multiple server processes to host game sessions. You set up a fleet to use instances with certain hardware specifications (see Amazon EC2 Instance Types for more information), and deploy your game build to run on each instance.
To create a new fleet, you must specify the following: (1) a fleet name, (2) the build ID of a successfully uploaded game build, (3) an EC2 instance type, and (4) a run-time configuration, which describes the server processes to run on each instance in the fleet. If you don't specify a fleet type (on-demand or spot), the new fleet uses on-demand instances by default.
You can also configure the new fleet with the following settings:
Fleet description
Access permissions for inbound traffic
Fleet-wide game session protection
Resource usage limits
VPC peering connection (see VPC Peering with Amazon GameLift Fleets)
If you use Amazon CloudWatch for metrics, you can add the new fleet to a metric group. By adding multiple fleets to a metric group, you can view aggregated metrics for all the fleets in the group.
If the CreateFleet
call is successful, Amazon GameLift performs the following tasks. You can track the process of a fleet by checking the fleet status or by monitoring fleet creation events:
Creates a fleet record. Status: NEW
.
Begins writing events to the fleet event log, which can be accessed in the Amazon GameLift console.
Sets the fleet's target capacity to 1 (desired instances), which triggers Amazon GameLift to start one new EC2 instance.
Downloads the game build to the new instance and installs it. Statuses: DOWNLOADING
, VALIDATING
, BUILDING
.
Starts launching server processes on the instance. If the fleet is configured to run multiple server processes per instance, Amazon GameLift staggers each launch by a few seconds. Status: ACTIVATING
.
Sets the fleet's status to ACTIVE
as soon as one server process is ready to host a game session.
Fleet-related operations include:
Describe fleets:
Update fleets:
Manage fleet capacity:
PutScalingPolicy (automatic scaling)
DescribeScalingPolicies (automatic scaling)
DeleteScalingPolicy (automatic scaling)
Defines a new matchmaking configuration for use with FlexMatch. A matchmaking configuration sets out guidelines for matching players and getting the matches into games. You can set up multiple matchmaking configurations to handle the scenarios needed for your game. Each matchmaking request (StartMatchmaking) specifies a configuration for the match and provides player attributes to support the configuration being used.
To create a matchmaking configuration, at a minimum you must specify the following: configuration name; a rule set that governs how to evaluate players and find acceptable matches; a game session queue to use when placing a new game session for the match; and the maximum time allowed for a matchmaking attempt.
Player acceptance -- In each configuration, you have the option to require that all players accept participation in a proposed match. To enable this feature, set AcceptanceRequired to true and specify a time limit for player acceptance. Players have the option to accept or reject a proposed match, and a match does not move ahead to game session placement unless all matched players accept.
Matchmaking status notification -- There are two ways to track the progress of matchmaking tickets: (1) polling ticket status with DescribeMatchmaking; or (2) receiving notifications with Amazon Simple Notification Service (SNS). To use notifications, you first need to set up an SNS topic to receive the notifications, and provide the topic ARN in the matchmaking configuration (see Setting up Notifications for Matchmaking). Since notifications promise only \"best effort\" delivery, we recommend calling DescribeMatchmaking
if no notifications are received within 30 seconds.
Operations related to match configurations and rule sets include:
Defines a new matchmaking configuration for use with FlexMatch. A matchmaking configuration sets out guidelines for matching players and getting the matches into games. You can set up multiple matchmaking configurations to handle the scenarios needed for your game. Each matchmaking ticket (StartMatchmaking or StartMatchBackfill) specifies a configuration for the match and provides player attributes to support the configuration being used.
To create a matchmaking configuration, at a minimum you must specify the following: configuration name; a rule set that governs how to evaluate players and find acceptable matches; a game session queue to use when placing a new game session for the match; and the maximum time allowed for a matchmaking attempt.
Player acceptance -- In each configuration, you have the option to require that all players accept participation in a proposed match. To enable this feature, set AcceptanceRequired to true and specify a time limit for player acceptance. Players have the option to accept or reject a proposed match, and a match does not move ahead to game session placement unless all matched players accept.
Matchmaking status notification -- There are two ways to track the progress of matchmaking tickets: (1) polling ticket status with DescribeMatchmaking; or (2) receiving notifications with Amazon Simple Notification Service (SNS). To use notifications, you first need to set up an SNS topic to receive the notifications, and provide the topic ARN in the matchmaking configuration (see Setting up Notifications for Matchmaking). Since notifications promise only \"best effort\" delivery, we recommend calling DescribeMatchmaking
if no notifications are received within 30 seconds.
Operations related to match configurations and rule sets include:
Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams, and sets the parameters for acceptable player matches, such as minimum skill level or character type. Rule sets are used in matchmaking configurations, which define how matchmaking requests are handled. Each MatchmakingConfiguration uses one rule set; you can set up multiple rule sets to handle the scenarios that suit your game (such as for different game modes), and create a separate matchmaking configuration for each rule set. See additional information on rule set content in the MatchmakingRuleSet structure. For help creating rule sets, including useful examples, see the topic Adding FlexMatch to Your Game.
Once created, matchmaking rule sets cannot be changed or deleted, so we recommend checking the rule set syntax using ValidateMatchmakingRuleSetbefore creating the rule set.
To create a matchmaking rule set, provide the set of rules and a unique name. Rule sets must be defined in the same region as the matchmaking configuration they will be used with. Rule sets cannot be edited or deleted. If you need to change a rule set, create a new one with the necessary edits and then update matchmaking configurations to use the new rule set.
Operations related to match configurations and rule sets include:
Creates a new rule set for FlexMatch matchmaking. A rule set describes the type of match to create, such as the number and size of teams, and sets the parameters for acceptable player matches, such as minimum skill level or character type. Rule sets are used in matchmaking configurations, which define how matchmaking requests are handled. Each MatchmakingConfiguration uses one rule set; you can set up multiple rule sets to handle the scenarios that suit your game (such as for different game modes), and create a separate matchmaking configuration for each rule set. See additional information on rule set content in the MatchmakingRuleSet structure. For help creating rule sets, including useful examples, see the topic Adding FlexMatch to Your Game.
Once created, matchmaking rule sets cannot be changed or deleted, so we recommend checking the rule set syntax using ValidateMatchmakingRuleSet before creating the rule set.
To create a matchmaking rule set, provide the set of rules and a unique name. Rule sets must be defined in the same region as the matchmaking configuration they will be used with. Rule sets cannot be edited or deleted. If you need to change a rule set, create a new one with the necessary edits and then update matchmaking configurations to use the new rule set.
Operations related to match configurations and rule sets include:
Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including status and--once a successful match is made--acquire connection information for the resulting new game session.
You can use this operation to track the progress of matchmaking requests (through polling) as an alternative to using event notifications. See more details on tracking matchmaking requests through polling or notifications in StartMatchmaking.
To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.
Matchmaking-related operations include:
" + "documentation":"Retrieves one or more matchmaking tickets. Use this operation to retrieve ticket information, including status and--once a successful match is made--acquire connection information for the resulting new game session.
You can use this operation to track the progress of matchmaking requests (through polling) as an alternative to using event notifications. See more details on tracking matchmaking requests through polling or notifications in StartMatchmaking.
To request matchmaking tickets, provide a list of up to 10 ticket IDs. If the request is successful, a ticket object is returned for each requested ID that currently exists.
Matchmaking-related operations include:
" }, "DescribeMatchmakingConfigurations":{ "name":"DescribeMatchmakingConfigurations", @@ -851,7 +851,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.
To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed. For more detail how to set up backfilling, see Set up Match Backfilling.
The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.
Matchmaking-related operations include:
" + "documentation":"Finds new players to fill open slots in an existing game session. This operation can be used to add players to matched games that start with fewer than the maximum number of players or to replace players when they drop out. By backfilling with the same matchmaker used to create the original match, you ensure that new players meet the match criteria and maintain a consistent experience throughout the game session. You can backfill a match anytime after a game session has been created.
To request a match backfill, specify a unique ticket ID, the existing game session's ARN, a matchmaking configuration, and a set of data that describes all current players in the game session. If successful, a match backfill ticket is created and returned with status set to QUEUED. The ticket is placed in the matchmaker's ticket pool and processed. Track the status of the ticket to respond as needed. For more detail how to set up backfilling, see Backfill Existing Games with FlexMatch.
The process of finding backfill matches is essentially identical to the initial matchmaking process. The matchmaker searches the pool and groups tickets together to form potential matches, allowing only one backfill ticket per potential match. Once the a match is formed, the matchmaker creates player sessions for the new players. All tickets in the match are updated with the game session's connection information, and the GameSession object is updated to include matchmaker data on the new players. For more detail on how match backfill requests are processed, see How Amazon GameLift FlexMatch Works.
Matchmaking-related operations include:
" }, "StartMatchmaking":{ "name":"StartMatchmaking", @@ -867,7 +867,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules, and starts a new game for the matched players. Each matchmaking request specifies the type of match to build (team configuration, rules for an acceptable match, etc.). The request also specifies the players to find a match for and where to host the new game session for optimal performance. A matchmaking request might start with a single player or a group of players who want to play together. FlexMatch finds additional players as needed to fill the match. Match type, rules, and the queue used to place a new game session are defined in a MatchmakingConfiguration
. For complete information on setting up and using FlexMatch, see the topic Adding FlexMatch to Your Game.
To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include a set of player attributes relevant for the matchmaking configuration. If successful, a matchmaking ticket is returned with status set to QUEUED
. Track the status of the ticket to respond as needed and acquire game session connection information for successfully completed matches.
Tracking ticket status -- A couple of options are available for tracking the status of matchmaking requests:
Polling -- Call DescribeMatchmaking
. This operation returns the full ticket object, including current status and (for completed tickets) game session connection info. We recommend polling no more than once every 10 seconds.
Notifications -- Get event notifications for changes in ticket status using Amazon Simple Notification Service (SNS). Notifications are easy to set up (see CreateMatchmakingConfiguration) and typically deliver match status changes faster and more efficiently than polling. We recommend that you use polling to back up to notifications (since delivery is not guaranteed) and call DescribeMatchmaking
only when notifications are not received within 30 seconds.
Processing a matchmaking request -- FlexMatch handles a matchmaking request as follows:
Your client code submits a StartMatchmaking
request for one or more players and tracks the status of the request ticket.
FlexMatch uses this ticket and others in process to build an acceptable match. When a potential match is identified, all tickets in the proposed match are advanced to the next status.
If the match requires player acceptance (set in the matchmaking configuration), the tickets move into status REQUIRES_ACCEPTANCE
. This status triggers your client code to solicit acceptance from all players in every ticket involved in the match, and then call AcceptMatch for each player. If any player rejects or fails to accept the match before a specified timeout, the proposed match is dropped (see AcceptMatch
for more details).
Once a match is proposed and accepted, the matchmaking tickets move into status PLACING
. FlexMatch locates resources for a new game session using the game session queue (set in the matchmaking configuration) and creates the game session based on the match data.
When the match is successfully placed, the matchmaking tickets move into COMPLETED
status. Connection information (including game session endpoint and player session) is added to the matchmaking tickets. Matched players can use the connection information to join the game.
Matchmaking-related operations include:
" + "documentation":"Uses FlexMatch to create a game match for a group of players based on custom matchmaking rules, and starts a new game for the matched players. Each matchmaking request specifies the type of match to build (team configuration, rules for an acceptable match, etc.). The request also specifies the players to find a match for and where to host the new game session for optimal performance. A matchmaking request might start with a single player or a group of players who want to play together. FlexMatch finds additional players as needed to fill the match. Match type, rules, and the queue used to place a new game session are defined in a MatchmakingConfiguration
. For complete information on setting up and using FlexMatch, see the topic Adding FlexMatch to Your Game.
To start matchmaking, provide a unique ticket ID, specify a matchmaking configuration, and include the players to be matched. You must also include a set of player attributes relevant for the matchmaking configuration. If successful, a matchmaking ticket is returned with status set to QUEUED
. Track the status of the ticket to respond as needed and acquire game session connection information for successfully completed matches.
Tracking ticket status -- A couple of options are available for tracking the status of matchmaking requests:
Polling -- Call DescribeMatchmaking
. This operation returns the full ticket object, including current status and (for completed tickets) game session connection info. We recommend polling no more than once every 10 seconds.
Notifications -- Get event notifications for changes in ticket status using Amazon Simple Notification Service (SNS). Notifications are easy to set up (see CreateMatchmakingConfiguration) and typically deliver match status changes faster and more efficiently than polling. We recommend that you use polling to back up to notifications (since delivery is not guaranteed) and call DescribeMatchmaking
only when notifications are not received within 30 seconds.
Processing a matchmaking request -- FlexMatch handles a matchmaking request as follows:
Your client code submits a StartMatchmaking
request for one or more players and tracks the status of the request ticket.
FlexMatch uses this ticket and others in process to build an acceptable match. When a potential match is identified, all tickets in the proposed match are advanced to the next status.
If the match requires player acceptance (set in the matchmaking configuration), the tickets move into status REQUIRES_ACCEPTANCE
. This status triggers your client code to solicit acceptance from all players in every ticket involved in the match, and then call AcceptMatch for each player. If any player rejects or fails to accept the match before a specified timeout, the proposed match is dropped (see AcceptMatch
for more details).
Once a match is proposed and accepted, the matchmaking tickets move into status PLACING
. FlexMatch locates resources for a new game session using the game session queue (set in the matchmaking configuration) and creates the game session based on the match data.
When the match is successfully placed, the matchmaking tickets move into COMPLETED
status. Connection information (including game session endpoint and player session) is added to the matchmaking tickets. Matched players can use the connection information to join the game.
Matchmaking-related operations include:
" }, "StopGameSessionPlacement":{ "name":"StopGameSessionPlacement", @@ -899,7 +899,7 @@ {"shape":"InternalServiceException"}, {"shape":"UnsupportedRegionException"} ], - "documentation":"Cancels a matchmaking ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED
.
Matchmaking-related operations include:
" + "documentation":"Cancels a matchmaking ticket that is currently being processed. To stop the matchmaking operation, specify the ticket ID. If successful, work on the ticket is stopped, and the ticket status is changed to CANCELLED
.
Matchmaking-related operations include:
" }, "UpdateAlias":{ "name":"UpdateAlias", @@ -1401,6 +1401,10 @@ "PeerVpcId":{ "shape":"NonZeroAndMaxString", "documentation":"Unique identifier for a VPC with resources to be accessed by your Amazon GameLift fleet. The VPC must be in the same region where your fleet is deployed. To get VPC information, including IDs, use the Virtual Private Cloud service tools, including the VPC Dashboard in the AWS Management Console.
" + }, + "FleetType":{ + "shape":"FleetType", + "documentation":"Indicates whether to use on-demand instances or spot instances for this fleet. If empty, the default is ON_DEMAND. Both categories of instances use identical hardware and configurations, based on the instance type selected for this fleet. You can acquire on-demand instances at any time for a fixed price and keep them as long as you need them. Spot instances have lower prices, but spot pricing is variable, and while in use they can be interrupted (with a two-minute notification). Learn more about Amazon GameLift spot instances with at Choose Computing Resources.
" } }, "documentation":"Represents the input for a request action.
" @@ -2607,7 +2611,7 @@ }, "EventCode":{ "shape":"EventCode", - "documentation":"Type of event being logged. The following events are currently in use:
General events:
GENERIC_EVENT -- An unspecified event has occurred.
Fleet creation events:
FLEET_CREATED -- A fleet record was successfully created with a status of NEW
. Event messaging includes the fleet ID.
FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW
to DOWNLOADING
. The compressed build has started downloading to a fleet instance for installation.
FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.
FLEET_CREATION_EXTRACTING_BUILD – The game server build was successfully downloaded to an instance, and the build files are now being extracted from the uploaded build and saved to an instance. Failure at this stage prevents a fleet from moving to ACTIVE
status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.
FLEET_CREATION_RUNNING_INSTALLER – The game server build files were successfully extracted, and the Amazon GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE
status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.
FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the Amazon GameLift is now verifying that the game server launch paths, which are specified in the fleet's run-time configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE
status. Logs for this stage list the launch paths in the run-time configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.
FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING
to VALIDATING
.
FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the run-time configuration failed because the executable specified in a launch path does not exist on the instance.
FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING
to BUILDING
.
FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the run-time configuration failed because the executable specified in a launch path failed to run on the fleet instance.
FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING
to ACTIVATING
.
FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. A possible reason for failure is that the game server is not reporting \"process ready\" to the Amazon GameLift service.
FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING
to ACTIVE
. The fleet is now ready to host game sessions.
VPC peering events:
FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your AWS account.
FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information (see DescribeVpcPeeringConnections) provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your AWS account. For more information on VPC peering failures, see http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html
FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.
Other fleet events:
FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.
FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.
FLEET_DELETED -- A request to delete a fleet was initiated.
Type of event being logged. The following events are currently in use:
Fleet creation events:
FLEET_CREATED -- A fleet record was successfully created with a status of NEW
. Event messaging includes the fleet ID.
FLEET_STATE_DOWNLOADING -- Fleet status changed from NEW
to DOWNLOADING
. The compressed build has started downloading to a fleet instance for installation.
FLEET_BINARY_DOWNLOAD_FAILED -- The build failed to download to the fleet instance.
FLEET_CREATION_EXTRACTING_BUILD – The game server build was successfully downloaded to an instance, and the build files are now being extracted from the uploaded build and saved to an instance. Failure at this stage prevents a fleet from moving to ACTIVE
status. Logs for this stage display a list of the files that are extracted and saved on the instance. Access the logs by using the URL in PreSignedLogUrl.
FLEET_CREATION_RUNNING_INSTALLER – The game server build files were successfully extracted, and the Amazon GameLift is now running the build's install script (if one is included). Failure in this stage prevents a fleet from moving to ACTIVE
status. Logs for this stage list the installation steps and whether or not the install completed successfully. Access the logs by using the URL in PreSignedLogUrl.
FLEET_CREATION_VALIDATING_RUNTIME_CONFIG -- The build process was successful, and the Amazon GameLift is now verifying that the game server launch paths, which are specified in the fleet's run-time configuration, exist. If any listed launch path exists, Amazon GameLift tries to launch a game server process and waits for the process to report ready. Failures in this stage prevent a fleet from moving to ACTIVE
status. Logs for this stage list the launch paths in the run-time configuration and indicate whether each is found. Access the logs by using the URL in PreSignedLogUrl.
FLEET_STATE_VALIDATING -- Fleet status changed from DOWNLOADING
to VALIDATING
.
FLEET_VALIDATION_LAUNCH_PATH_NOT_FOUND -- Validation of the run-time configuration failed because the executable specified in a launch path does not exist on the instance.
FLEET_STATE_BUILDING -- Fleet status changed from VALIDATING
to BUILDING
.
FLEET_VALIDATION_EXECUTABLE_RUNTIME_FAILURE -- Validation of the run-time configuration failed because the executable specified in a launch path failed to run on the fleet instance.
FLEET_STATE_ACTIVATING -- Fleet status changed from BUILDING
to ACTIVATING
.
FLEET_ACTIVATION_FAILED - The fleet failed to successfully complete one of the steps in the fleet activation process. This event code indicates that the game build was successfully downloaded to a fleet instance, built, and validated, but was not able to start a server process. A possible reason for failure is that the game server is not reporting \"process ready\" to the Amazon GameLift service.
FLEET_STATE_ACTIVE -- The fleet's status changed from ACTIVATING
to ACTIVE
. The fleet is now ready to host game sessions.
VPC peering events:
FLEET_VPC_PEERING_SUCCEEDED -- A VPC peering connection has been established between the VPC for an Amazon GameLift fleet and a VPC in your AWS account.
FLEET_VPC_PEERING_FAILED -- A requested VPC peering connection has failed. Event details and status information (see DescribeVpcPeeringConnections) provide additional detail. A common reason for peering failure is that the two VPCs have overlapping CIDR blocks of IPv4 addresses. To resolve this, change the CIDR block for the VPC in your AWS account. For more information on VPC peering failures, see http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide/invalid-peering-configurations.html
FLEET_VPC_PEERING_DELETED -- A VPC peering connection has been successfully deleted.
Spot instance events:
INSTANCE_INTERRUPTED -- A spot instance was interrupted by EC2 with a two-minute notification.
Other fleet events:
FLEET_SCALING_EVENT -- A change was made to the fleet's capacity settings (desired instances, minimum/maximum scaling limits). Event messaging includes the new capacity settings.
FLEET_NEW_GAME_SESSION_PROTECTION_POLICY_UPDATED -- A change was made to the fleet's game session protection policy setting. Event messaging includes both the old and new policy setting.
FLEET_DELETED -- A request to delete a fleet was initiated.
GENERIC_EVENT -- An unspecified event has occurred.
Identifier for a fleet that is unique across all regions.
" }, + "FleetType":{ + "shape":"FleetType", + "documentation":"Indicates whether the fleet uses on-demand or spot instances. A spot instance in use may be interrupted with a two-minute notification.
" + }, + "InstanceType":{ + "shape":"EC2InstanceType", + "documentation":"EC2 instance type indicating the computing resources of each instance in the fleet, including CPU, memory, storage, and networking capacity. See Amazon EC2 Instance Types for detailed descriptions.
" + }, "Description":{ "shape":"NonZeroAndMaxString", "documentation":"Human-readable description of the fleet.
" @@ -2788,6 +2801,13 @@ "TERMINATED" ] }, + "FleetType":{ + "type":"string", + "enum":[ + "ON_DEMAND", + "SPOT" + ] + }, "FleetUtilization":{ "type":"structure", "members":{ @@ -2886,6 +2906,10 @@ "shape":"GameSessionStatus", "documentation":"Current status of the game session. A game session must have an ACTIVE
status to have player sessions.
Provides additional information about game session status. INTERRUPTED
indicates that the game session was hosted on a spot instance that was reclaimed, causing the active game session to be terminated.
Set of custom properties for a game session, formatted as key:value pairs. These properties are passed to a game server process in the GameSession object with a request to start a new game session (see Start a Game Session). You can search for active game sessions based on this custom data with SearchGameSessions.
" @@ -3048,7 +3072,7 @@ }, "MatchmakerData":{ "shape":"MatchmakerData", - "documentation":"Information on the matchmaking process for this game. Data is in JSON syntax, formated as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see http://docs.aws.amazon.com/gamelift/latest/developerguide/match-server.html#match-server-data.
" + "documentation":"Information on the matchmaking process for this game. Data is in JSON syntax, formated as a string. It identifies the matchmaking configuration used to create the match, and contains data on all players assigned to the match, including player attributes and team assignments. For more details on matchmaker data, see Match Data.
" } }, "documentation":"Object that describes a StartGameSessionPlacement request. This object includes the full details of the original request plus the current status and start/end time stamps.
Game session placement-related operations include:
" @@ -3126,6 +3150,10 @@ "ERROR" ] }, + "GameSessionStatusReason":{ + "type":"string", + "enum":["INTERRUPTED"] + }, "GetGameSessionLogUrlInput":{ "type":"structure", "required":["GameSessionId"], @@ -3644,7 +3672,7 @@ }, "EndTime":{ "shape":"Timestamp", - "documentation":"Time stamp indicating when the matchmaking request stopped being processed due to successful completion, timeout, or cancellation. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").
" + "documentation":"Time stamp indicating when this matchmaking request stopped being processed due to success, failure, or cancellation. Format is a number expressed in Unix time as milliseconds (for example \"1469498468.057\").
" }, "Players":{ "shape":"PlayerList", @@ -4337,7 +4365,7 @@ }, "Players":{ "shape":"PlayerList", - "documentation":"Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.
PlayerID, PlayerAttributes, Team -- This information is maintained in the GameSession object, MatchmakerData
property, for all players who are currently assigned to the game session. The matchmaker data is in JSON syntax, formatted as a string. For more details, see Match Data.
LatencyInMs -- If the matchmaker uses player latency, include a latency value, in milliseconds, for the region that the game session is currently in. Do not include latency values for any other region.
Match information on all players that are currently assigned to the game session. This information is used by the matchmaker to find new players and add them to the existing game.
PlayerID, PlayerAttributes, Team -\\\\- This information is maintained in the GameSession object, MatchmakerData
property, for all players who are currently assigned to the game session. The matchmaker data is in JSON syntax, formatted as a string. For more details, see Match Data.
LatencyInMs -\\\\- If the matchmaker uses player latency, include a latency value, in milliseconds, for the region that the game session is currently in. Do not include latency values for any other region.
Represents the input for a request action.
" @@ -4888,5 +4916,5 @@ "min":0 } }, - "documentation":"Amazon GameLift is a managed service for developers who need a scalable, dedicated server solution for their multiplayer games. Use Amazon GameLift for these tasks: (1) set up computing resources and deploy your game servers, (2) run game sessions and get players into games, (3) automatically scale your resources to meet player demand and manage costs, and (4) track in-depth metrics on game server performance and player usage.
The Amazon GameLift service API includes two important function sets:
Manage game sessions and player access -- Retrieve information on available game sessions; create new game sessions; send player requests to join a game session.
Configure and manage game server resources -- Manage builds, fleets, queues, and aliases; set autoscaling policies; retrieve logs and metrics.
This reference guide describes the low-level service API for Amazon GameLift. You can use the API functionality with these tools:
The Amazon Web Services software development kit (AWS SDK) is available in multiple languages including C++ and C#. Use the SDK to access the API programmatically from an application, such as a game client.
The AWS command-line interface (CLI) tool is primarily useful for handling administrative actions, such as setting up and managing Amazon GameLift settings and resources. You can use the AWS CLI to manage all of your AWS services.
The AWS Management Console for Amazon GameLift provides a web interface to manage your Amazon GameLift settings and resources. The console includes a dashboard for tracking key resources, including builds and fleets, and displays usage and performance metrics for your games as customizable graphs.
Amazon GameLift Local is a tool for testing your game's integration with Amazon GameLift before deploying it on the service. This tools supports a subset of key API actions, which can be called from either the AWS CLI or programmatically. See Testing an Integration.
Learn more
Developer Guide -- Read about Amazon GameLift features and how to use them.
Tutorials -- Get started fast with walkthroughs and sample projects.
GameDev Blog -- Stay up to date with new features and techniques.
GameDev Forums -- Connect with the GameDev community.
Release notes and document history -- Stay current with updates to the Amazon GameLift service, SDKs, and documentation.
API SUMMARY
This list offers a functional overview of the Amazon GameLift service API.
Managing Games and Players
Use these actions to start new game sessions, find existing game sessions, track game session status and other information, and enable player access to game sessions.
Discover existing game sessions
SearchGameSessions -- Retrieve all available game sessions or search for game sessions that match a set of criteria.
Start new game sessions
Start new games with Queues to find the best available hosting resources across multiple regions, minimize player latency, and balance game session activity for efficiency and cost effectiveness.
StartGameSessionPlacement -- Request a new game session placement and add one or more players to it.
DescribeGameSessionPlacement -- Get details on a placement request, including status.
StopGameSessionPlacement -- Cancel a placement request.
CreateGameSession -- Start a new game session on a specific fleet. Available in Amazon GameLift Local.
Start new game sessions with FlexMatch matchmaking
StartMatchmaking -- Request matchmaking for one players or a group who want to play together.
DescribeMatchmaking -- Get details on a matchmaking request, including status.
AcceptMatch -- Register that a player accepts a proposed match, for matches that require player acceptance.
StartMatchBackfill - Request additional player matches to fill empty slots in an existing game session.
StopMatchmaking -- Cancel a matchmaking request.
Manage game session data
DescribeGameSessions -- Retrieve metadata for one or more game sessions, including length of time active and current player count. Available in Amazon GameLift Local.
DescribeGameSessionDetails -- Retrieve metadata and the game session protection setting for one or more game sessions.
UpdateGameSession -- Change game session settings, such as maximum player count and join policy.
GetGameSessionLogUrl -- Get the location of saved logs for a game session.
Manage player sessions
CreatePlayerSession -- Send a request for a player to join a game session. Available in Amazon GameLift Local.
CreatePlayerSessions -- Send a request for multiple players to join a game session. Available in Amazon GameLift Local.
DescribePlayerSessions -- Get details on player activity, including status, playing time, and player data. Available in Amazon GameLift Local.
Setting Up and Managing Game Servers
When setting up Amazon GameLift resources for your game, you first create a game build and upload it to Amazon GameLift. You can then use these actions to configure and manage a fleet of resources to run your game servers, scale capacity to meet player demand, access performance and utilization metrics, and more.
Manage game builds
CreateBuild -- Create a new build using files stored in an Amazon S3 bucket. To create a build and upload files from a local path, use the AWS CLI command upload-build
.
ListBuilds -- Get a list of all builds uploaded to a Amazon GameLift region.
DescribeBuild -- Retrieve information associated with a build.
UpdateBuild -- Change build metadata, including build name and version.
DeleteBuild -- Remove a build from Amazon GameLift.
Manage fleets
CreateFleet -- Configure and activate a new fleet to run a build's game servers.
ListFleets -- Get a list of all fleet IDs in a Amazon GameLift region (all statuses).
DeleteFleet -- Terminate a fleet that is no longer running game servers or hosting players.
View / update fleet configurations.
DescribeFleetAttributes / UpdateFleetAttributes -- View or change a fleet's metadata and settings for game session protection and resource creation limits.
DescribeFleetPortSettings / UpdateFleetPortSettings -- View or change the inbound permissions (IP address and port setting ranges) allowed for a fleet.
DescribeRuntimeConfiguration / UpdateRuntimeConfiguration -- View or change what server processes (and how many) to run on each instance in a fleet.
Control fleet capacity
DescribeEC2InstanceLimits -- Retrieve maximum number of instances allowed for the current AWS account and the current usage level.
DescribeFleetCapacity / UpdateFleetCapacity -- Retrieve the capacity settings and the current number of instances in a fleet; adjust fleet capacity settings to scale up or down.
Autoscale -- Manage autoscaling rules and apply them to a fleet.
PutScalingPolicy -- Create a new autoscaling policy, or update an existing one.
DescribeScalingPolicies -- Retrieve an existing autoscaling policy.
DeleteScalingPolicy -- Delete an autoscaling policy and stop it from affecting a fleet's capacity.
Manage VPC peering connections for fleets
CreateVpcPeeringAuthorization -- Authorize a peering connection to one of your VPCs.
DescribeVpcPeeringAuthorizations -- Retrieve valid peering connection authorizations.
DeleteVpcPeeringAuthorization -- Delete a peering connection authorization.
CreateVpcPeeringConnection -- Establish a peering connection between the VPC for a Amazon GameLift fleet and one of your VPCs.
DescribeVpcPeeringConnections -- Retrieve information on active or pending VPC peering connections with a Amazon GameLift fleet.
DeleteVpcPeeringConnection -- Delete a VPC peering connection with a Amazon GameLift fleet.
Access fleet activity statistics
DescribeFleetUtilization -- Get current data on the number of server processes, game sessions, and players currently active on a fleet.
DescribeFleetEvents -- Get a fleet's logged events for a specified time span.
DescribeGameSessions -- Retrieve metadata associated with one or more game sessions, including length of time active and current player count.
Remotely access an instance
DescribeInstances -- Get information on each instance in a fleet, including instance ID, IP address, and status.
GetInstanceAccess -- Request access credentials needed to remotely connect to a specified instance in a fleet.
Manage fleet aliases
CreateAlias -- Define a new alias and optionally assign it to a fleet.
ListAliases -- Get all fleet aliases defined in a Amazon GameLift region.
DescribeAlias -- Retrieve information on an existing alias.
UpdateAlias -- Change settings for a alias, such as redirecting it from one fleet to another.
DeleteAlias -- Remove an alias from the region.
ResolveAlias -- Get the fleet ID that a specified alias points to.
Manage game session queues
CreateGameSessionQueue -- Create a queue for processing requests for new game sessions.
DescribeGameSessionQueues -- Retrieve game session queues defined in a Amazon GameLift region.
UpdateGameSessionQueue -- Change the configuration of a game session queue.
DeleteGameSessionQueue -- Remove a game session queue from the region.
Manage FlexMatch resources
CreateMatchmakingConfiguration -- Create a matchmaking configuration with instructions for building a player group and placing in a new game session.
DescribeMatchmakingConfigurations -- Retrieve matchmaking configurations defined a Amazon GameLift region.
UpdateMatchmakingConfiguration -- Change settings for matchmaking configuration. queue.
DeleteMatchmakingConfiguration -- Remove a matchmaking configuration from the region.
CreateMatchmakingRuleSet -- Create a set of rules to use when searching for player matches.
DescribeMatchmakingRuleSets -- Retrieve matchmaking rule sets defined in a Amazon GameLift region.
ValidateMatchmakingRuleSet -- Verify syntax for a set of matchmaking rules.
Amazon GameLift is a managed service for developers who need a scalable, dedicated server solution for their multiplayer games. Use Amazon GameLift for these tasks: (1) set up computing resources and deploy your game servers, (2) run game sessions and get players into games, (3) automatically scale your resources to meet player demand and manage costs, and (4) track in-depth metrics on game server performance and player usage.
The Amazon GameLift service API includes two important function sets:
Manage game sessions and player access -- Retrieve information on available game sessions; create new game sessions; send player requests to join a game session.
Configure and manage game server resources -- Manage builds, fleets, queues, and aliases; set autoscaling policies; retrieve logs and metrics.
This reference guide describes the low-level service API for Amazon GameLift. You can use the API functionality with these tools:
The Amazon Web Services software development kit (AWS SDK) is available in multiple languages including C++ and C#. Use the SDK to access the API programmatically from an application, such as a game client.
The AWS command-line interface (CLI) tool is primarily useful for handling administrative actions, such as setting up and managing Amazon GameLift settings and resources. You can use the AWS CLI to manage all of your AWS services.
The AWS Management Console for Amazon GameLift provides a web interface to manage your Amazon GameLift settings and resources. The console includes a dashboard for tracking key resources, including builds and fleets, and displays usage and performance metrics for your games as customizable graphs.
Amazon GameLift Local is a tool for testing your game's integration with Amazon GameLift before deploying it on the service. This tools supports a subset of key API actions, which can be called from either the AWS CLI or programmatically. See Testing an Integration.
Learn more
Developer Guide -- Read about Amazon GameLift features and how to use them.
Tutorials -- Get started fast with walkthroughs and sample projects.
GameDev Blog -- Stay up to date with new features and techniques.
GameDev Forums -- Connect with the GameDev community.
Release notes and document history -- Stay current with updates to the Amazon GameLift service, SDKs, and documentation.
API SUMMARY
This list offers a functional overview of the Amazon GameLift service API.
Managing Games and Players
Use these actions to start new game sessions, find existing game sessions, track game session status and other information, and enable player access to game sessions.
Discover existing game sessions
SearchGameSessions -- Retrieve all available game sessions or search for game sessions that match a set of criteria.
Start new game sessions
Start new games with Queues to find the best available hosting resources across multiple regions, minimize player latency, and balance game session activity for efficiency and cost effectiveness.
StartGameSessionPlacement -- Request a new game session placement and add one or more players to it.
DescribeGameSessionPlacement -- Get details on a placement request, including status.
StopGameSessionPlacement -- Cancel a placement request.
CreateGameSession -- Start a new game session on a specific fleet. Available in Amazon GameLift Local.
Match players to game sessions with FlexMatch matchmaking
StartMatchmaking -- Request matchmaking for one players or a group who want to play together.
StartMatchBackfill - Request additional player matches to fill empty slots in an existing game session.
DescribeMatchmaking -- Get details on a matchmaking request, including status.
AcceptMatch -- Register that a player accepts a proposed match, for matches that require player acceptance.
StopMatchmaking -- Cancel a matchmaking request.
Manage game session data
DescribeGameSessions -- Retrieve metadata for one or more game sessions, including length of time active and current player count. Available in Amazon GameLift Local.
DescribeGameSessionDetails -- Retrieve metadata and the game session protection setting for one or more game sessions.
UpdateGameSession -- Change game session settings, such as maximum player count and join policy.
GetGameSessionLogUrl -- Get the location of saved logs for a game session.
Manage player sessions
CreatePlayerSession -- Send a request for a player to join a game session. Available in Amazon GameLift Local.
CreatePlayerSessions -- Send a request for multiple players to join a game session. Available in Amazon GameLift Local.
DescribePlayerSessions -- Get details on player activity, including status, playing time, and player data. Available in Amazon GameLift Local.
Setting Up and Managing Game Servers
When setting up Amazon GameLift resources for your game, you first create a game build and upload it to Amazon GameLift. You can then use these actions to configure and manage a fleet of resources to run your game servers, scale capacity to meet player demand, access performance and utilization metrics, and more.
Manage game builds
CreateBuild -- Create a new build using files stored in an Amazon S3 bucket. To create a build and upload files from a local path, use the AWS CLI command upload-build
.
ListBuilds -- Get a list of all builds uploaded to a Amazon GameLift region.
DescribeBuild -- Retrieve information associated with a build.
UpdateBuild -- Change build metadata, including build name and version.
DeleteBuild -- Remove a build from Amazon GameLift.
Manage fleets
CreateFleet -- Configure and activate a new fleet to run a build's game servers.
ListFleets -- Get a list of all fleet IDs in a Amazon GameLift region (all statuses).
DeleteFleet -- Terminate a fleet that is no longer running game servers or hosting players.
View / update fleet configurations.
DescribeFleetAttributes / UpdateFleetAttributes -- View or change a fleet's metadata and settings for game session protection and resource creation limits.
DescribeFleetPortSettings / UpdateFleetPortSettings -- View or change the inbound permissions (IP address and port setting ranges) allowed for a fleet.
DescribeRuntimeConfiguration / UpdateRuntimeConfiguration -- View or change what server processes (and how many) to run on each instance in a fleet.
Control fleet capacity
DescribeEC2InstanceLimits -- Retrieve maximum number of instances allowed for the current AWS account and the current usage level.
DescribeFleetCapacity / UpdateFleetCapacity -- Retrieve the capacity settings and the current number of instances in a fleet; adjust fleet capacity settings to scale up or down.
Autoscale -- Manage autoscaling rules and apply them to a fleet.
PutScalingPolicy -- Create a new autoscaling policy, or update an existing one.
DescribeScalingPolicies -- Retrieve an existing autoscaling policy.
DeleteScalingPolicy -- Delete an autoscaling policy and stop it from affecting a fleet's capacity.
Manage VPC peering connections for fleets
CreateVpcPeeringAuthorization -- Authorize a peering connection to one of your VPCs.
DescribeVpcPeeringAuthorizations -- Retrieve valid peering connection authorizations.
DeleteVpcPeeringAuthorization -- Delete a peering connection authorization.
CreateVpcPeeringConnection -- Establish a peering connection between the VPC for a Amazon GameLift fleet and one of your VPCs.
DescribeVpcPeeringConnections -- Retrieve information on active or pending VPC peering connections with a Amazon GameLift fleet.
DeleteVpcPeeringConnection -- Delete a VPC peering connection with a Amazon GameLift fleet.
Access fleet activity statistics
DescribeFleetUtilization -- Get current data on the number of server processes, game sessions, and players currently active on a fleet.
DescribeFleetEvents -- Get a fleet's logged events for a specified time span.
DescribeGameSessions -- Retrieve metadata associated with one or more game sessions, including length of time active and current player count.
Remotely access an instance
DescribeInstances -- Get information on each instance in a fleet, including instance ID, IP address, and status.
GetInstanceAccess -- Request access credentials needed to remotely connect to a specified instance in a fleet.
Manage fleet aliases
CreateAlias -- Define a new alias and optionally assign it to a fleet.
ListAliases -- Get all fleet aliases defined in a Amazon GameLift region.
DescribeAlias -- Retrieve information on an existing alias.
UpdateAlias -- Change settings for a alias, such as redirecting it from one fleet to another.
DeleteAlias -- Remove an alias from the region.
ResolveAlias -- Get the fleet ID that a specified alias points to.
Manage game session queues
CreateGameSessionQueue -- Create a queue for processing requests for new game sessions.
DescribeGameSessionQueues -- Retrieve game session queues defined in a Amazon GameLift region.
UpdateGameSessionQueue -- Change the configuration of a game session queue.
DeleteGameSessionQueue -- Remove a game session queue from the region.
Manage FlexMatch resources
CreateMatchmakingConfiguration -- Create a matchmaking configuration with instructions for building a player group and placing in a new game session.
DescribeMatchmakingConfigurations -- Retrieve matchmaking configurations defined a Amazon GameLift region.
UpdateMatchmakingConfiguration -- Change settings for matchmaking configuration. queue.
DeleteMatchmakingConfiguration -- Remove a matchmaking configuration from the region.
CreateMatchmakingRuleSet -- Create a set of rules to use when searching for player matches.
DescribeMatchmakingRuleSets -- Retrieve matchmaking rule sets defined in a Amazon GameLift region.
ValidateMatchmakingRuleSet -- Verify syntax for a set of matchmaking rules.
Optional. If the encryption type is aws:kms
, you can use this value to specify the encryption context for the restore results.
Optional. If the encryption type is aws:kms
, you can use this value to specify the encryption context for the job results.
Contains information about the encryption used to store the job results in Amazon S3.
" @@ -1360,7 +1360,7 @@ }, "Tier":{ "shape":"string", - "documentation":"The retrieval option to use for the archive retrieval. Valid values are Expedited
, Standard
, or Bulk
. Standard
is the default.
The tier to use for a select or an archive retrieval. Valid values are Expedited
, Standard
, or Bulk
. Standard
is the default.
Contains the parameters that define a select job.
" + "documentation":"Contains the parameters used for a select.
" }, "OutputLocation":{ "shape":"OutputLocation", @@ -1686,7 +1686,7 @@ }, "Tier":{ "shape":"string", - "documentation":"The retrieval option to use for a select or archive retrieval job. Valid values are Expedited
, Standard
, or Bulk
. Standard
is the default.
The tier to use for a select or an archive retrieval job. Valid values are Expedited
, Standard
, or Bulk
. Standard
is the default.
Describes an S3 location that will receive the results of the restore request.
" + "documentation":"Describes an S3 location that will receive the results of the job request.
" } }, "documentation":"Contains information about the location where the select job results are stored.
" @@ -2214,11 +2214,11 @@ "members":{ "BucketName":{ "shape":"string", - "documentation":"The name of the bucket where the restore results are stored.
" + "documentation":"The name of the Amazon S3 bucket where the job results are stored.
" }, "Prefix":{ "shape":"string", - "documentation":"The prefix that is prepended to the restore results for this request.
" + "documentation":"The prefix that is prepended to the results for this request.
" }, "Encryption":{ "shape":"Encryption", @@ -2226,7 +2226,7 @@ }, "CannedACL":{ "shape":"CannedACL", - "documentation":"The canned ACL to apply to the restore results.
" + "documentation":"The canned access control list (ACL) to apply to the job results.
" }, "AccessControlList":{ "shape":"AccessControlPolicyList", @@ -2234,15 +2234,15 @@ }, "Tagging":{ "shape":"hashmap", - "documentation":"The tag-set that is applied to the restore results.
" + "documentation":"The tag-set that is applied to the job results.
" }, "UserMetadata":{ "shape":"hashmap", - "documentation":"A map of metadata to store with the restore results in Amazon S3.
" + "documentation":"A map of metadata to store with the job results in Amazon S3.
" }, "StorageClass":{ "shape":"StorageClass", - "documentation":"The storage class used to store the restore results.
" + "documentation":"The storage class used to store the job results.
" } }, "documentation":"Contains information about the location in Amazon S3 where the select job results are stored.
" diff --git a/botocore/data/guardduty/2017-11-28/service-2.json b/botocore/data/guardduty/2017-11-28/service-2.json index 39d0dbf9..e52a0afb 100644 --- a/botocore/data/guardduty/2017-11-28/service-2.json +++ b/botocore/data/guardduty/2017-11-28/service-2.json @@ -973,6 +973,11 @@ "shape" : "NetworkConnectionAction", "locationName" : "networkConnectionAction", "documentation" : "Information about the NETWORK_CONNECTION action described in this finding." + }, + "PortProbeAction" : { + "shape" : "PortProbeAction", + "locationName" : "portProbeAction", + "documentation" : "Information about the PORT_PROBE action described in this finding." } }, "documentation" : "Information about the activity described in a finding." @@ -2329,6 +2334,12 @@ } } }, + "ListOfPortProbeDetail" : { + "type" : "list", + "member" : { + "shape" : "PortProbeDetail" + } + }, "ListThreatIntelSetsRequest" : { "type" : "structure", "members" : { @@ -2626,6 +2637,38 @@ }, "documentation" : "ISP Organization information of the remote IP address." }, + "PortProbeAction" : { + "type" : "structure", + "members" : { + "Blocked" : { + "shape" : "__boolean", + "locationName" : "blocked", + "documentation" : "Port probe blocked information." + }, + "PortProbeDetails" : { + "shape" : "ListOfPortProbeDetail", + "locationName" : "portProbeDetails", + "documentation" : "A list of port probe details objects." + } + }, + "documentation" : "Information about the PORT_PROBE action described in this finding." + }, + "PortProbeDetail" : { + "type" : "structure", + "members" : { + "LocalPortDetails" : { + "shape" : "LocalPortDetails", + "locationName" : "localPortDetails", + "documentation" : "Local port information of the connection." + }, + "RemoteIpDetails" : { + "shape" : "RemoteIpDetails", + "locationName" : "remoteIpDetails", + "documentation" : "Remote IP information of the connection." + } + }, + "documentation" : "Details about the port probe finding." + }, "PrivateDnsName" : { "type" : "string", "documentation" : "Private DNS name of the EC2 instance." diff --git a/botocore/data/kms/2014-11-01/service-2.json b/botocore/data/kms/2014-11-01/service-2.json index 6d4238af..79be1560 100644 --- a/botocore/data/kms/2014-11-01/service-2.json +++ b/botocore/data/kms/2014-11-01/service-2.json @@ -519,6 +519,7 @@ }, "input":{"shape":"RetireGrantRequest"}, "errors":[ + {"shape":"InvalidArnException"}, {"shape":"InvalidGrantTokenException"}, {"shape":"InvalidGrantIdException"}, {"shape":"NotFoundException"}, diff --git a/botocore/data/lex-models/2017-04-19/service-2.json b/botocore/data/lex-models/2017-04-19/service-2.json index c22a0a3b..21e445c4 100644 --- a/botocore/data/lex-models/2017-04-19/service-2.json +++ b/botocore/data/lex-models/2017-04-19/service-2.json @@ -411,6 +411,23 @@ ], "documentation":"Exports the contents of a Amazon Lex resource in a specified format.
" }, + "GetImport":{ + "name":"GetImport", + "http":{ + "method":"GET", + "requestUri":"/imports/{importId}", + "responseCode":200 + }, + "input":{"shape":"GetImportRequest"}, + "output":{"shape":"GetImportResponse"}, + "errors":[ + {"shape":"NotFoundException"}, + {"shape":"LimitExceededException"}, + {"shape":"InternalFailureException"}, + {"shape":"BadRequestException"} + ], + "documentation":"Gets information about an import job started with the StartImport
operation.
Creates a custom slot type or replaces an existing custom slot type.
To create a custom slot type, specify a name for the slot type and a set of enumeration values, which are the values that a slot of this type can assume. For more information, see how-it-works.
If you specify the name of an existing slot type, the fields in the request replace the existing values in the $LATEST
version of the slot type. Amazon Lex removes the fields that you don't provide in the request. If you don't specify required fields, Amazon Lex throws an exception. When you update the $LATEST
version of a slot type, if a bot uses the $LATEST
version of an intent that contains the slot type, the bot's status
field is set to NOT_BUILT
.
This operation requires permissions for the lex:PutSlotType
action.
Starts a job to import a resource to Amazon Lex.
" } }, "shapes":{ @@ -624,6 +657,7 @@ "error":{"httpStatusCode":400}, "exception":true }, + "Blob":{"type":"blob"}, "Boolean":{"type":"boolean"}, "BotAliasMetadata":{ "type":"structure", @@ -1305,7 +1339,10 @@ }, "ExportType":{ "type":"string", - "enum":["ALEXA_SKILLS_KIT"] + "enum":[ + "ALEXA_SKILLS_KIT", + "LEX" + ] }, "FollowUpPrompt":{ "type":"structure", @@ -1905,6 +1942,51 @@ } } }, + "GetImportRequest":{ + "type":"structure", + "required":["importId"], + "members":{ + "importId":{ + "shape":"String", + "documentation":"The identifier of the import job information to return.
", + "location":"uri", + "locationName":"importId" + } + } + }, + "GetImportResponse":{ + "type":"structure", + "members":{ + "name":{ + "shape":"Name", + "documentation":"The name given to the import job.
" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"The type of resource imported.
" + }, + "mergeStrategy":{ + "shape":"MergeStrategy", + "documentation":"The action taken when there was a conflict between an existing resource and a resource in the import file.
" + }, + "importId":{ + "shape":"String", + "documentation":"The identifier for the specific import job.
" + }, + "importStatus":{ + "shape":"ImportStatus", + "documentation":"The status of the import job. If the status is FAILED
, you can get the reason for the failure from the failureReason
field.
A string that describes why an import job failed to complete.
" + }, + "createdDate":{ + "shape":"Timestamp", + "documentation":"A timestamp for the date and time that the import job was created.
" + } + } + }, "GetIntentRequest":{ "type":"structure", "required":[ @@ -2242,6 +2324,14 @@ "max":5, "min":1 }, + "ImportStatus":{ + "type":"string", + "enum":[ + "IN_PROGRESS", + "COMPLETE", + "FAILED" + ] + }, "Intent":{ "type":"structure", "required":[ @@ -2346,7 +2436,11 @@ }, "Locale":{ "type":"string", - "enum":["en-US"] + "enum":[ + "en-US", + "en-GB", + "de-DE" + ] }, "LocaleList":{ "type":"list", @@ -2358,6 +2452,13 @@ "max":50, "min":1 }, + "MergeStrategy":{ + "type":"string", + "enum":[ + "OVERWRITE_LATEST", + "FAIL_ON_CONFLICT" + ] + }, "Message":{ "type":"structure", "required":[ @@ -2393,7 +2494,7 @@ }, "Name":{ "type":"string", - "max":64, + "max":100, "min":1, "pattern":"[a-zA-Z_]+" }, @@ -2572,7 +2673,7 @@ }, "processBehavior":{ "shape":"ProcessBehavior", - "documentation":"If you set the processBehavior
element to Build
, Amazon Lex builds the bot so that it can be run. If you set the element to Save
Amazon Lex saves the bot, but doesn't build it.
If you don't specify this value, the default value is Save
.
If you set the processBehavior
element to BUILD
, Amazon Lex builds the bot so that it can be run. If you set the element to SAVE
Amazon Lex saves the bot, but doesn't build it.
If you don't specify this value, the default value is BUILD
.
For each Amazon Lex bot created with the Amazon Lex Model Building Service, you must specify whether your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to the Children's Online Privacy Protection Act (COPPA) by specifying true
or false
in the childDirected
field. By specifying true
in the childDirected
field, you confirm that your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. By specifying false
in the childDirected
field, you confirm that your use of Amazon Lex is not related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. You may not specify a default value for the childDirected
field that does not accurately reflect whether your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA.
If your use of Amazon Lex relates to a website, program, or other application that is directed in whole or in part, to children under age 13, you must obtain any required verifiable parental consent under COPPA. For information regarding the use of Amazon Lex in connection with websites, programs, or other applications that are directed or targeted, in whole or in part, to children under age 13, see the Amazon Lex FAQ.
" - } + }, + "createVersion":{"shape":"Boolean"} } }, "PutBotResponse":{ @@ -2646,7 +2748,8 @@ "childDirected":{ "shape":"Boolean", "documentation":"For each Amazon Lex bot created with the Amazon Lex Model Building Service, you must specify whether your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to the Children's Online Privacy Protection Act (COPPA) by specifying true
or false
in the childDirected
field. By specifying true
in the childDirected
field, you confirm that your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. By specifying false
in the childDirected
field, you confirm that your use of Amazon Lex is not related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA. You may not specify a default value for the childDirected
field that does not accurately reflect whether your use of Amazon Lex is related to a website, program, or other application that is directed or targeted, in whole or in part, to children under age 13 and subject to COPPA.
If your use of Amazon Lex relates to a website, program, or other application that is directed in whole or in part, to children under age 13, you must obtain any required verifiable parental consent under COPPA. For information regarding the use of Amazon Lex in connection with websites, programs, or other applications that are directed or targeted, in whole or in part, to children under age 13, see the Amazon Lex FAQ.
" - } + }, + "createVersion":{"shape":"Boolean"} } }, "PutIntentRequest":{ @@ -2702,7 +2805,8 @@ "checksum":{ "shape":"String", "documentation":"Identifies a specific revision of the $LATEST
version.
When you create a new intent, leave the checksum
field blank. If you specify a checksum you get a BadRequestException
exception.
When you want to update a intent, set the checksum
field to the checksum of the most recent revision of the $LATEST
version. If you don't specify the checksum
field, or if the checksum does not match the $LATEST
version, you get a PreconditionFailedException
exception.
Checksum of the $LATEST
version of the intent created or updated.
Determines the slot resolution strategy that Amazon Lex uses to return slot type values. The field can be set to one of the following values:
ORIGINAL_VALUE
- Returns the value entered by the user, if the user value is similar to the slot value.
TOP_RESOLUTION
- If there is a resolution list for the slot, return the first value in the resolution list as the slot type value. If there is no resolution list, null is returned.
If you don't specify the valueSelectionStrategy
, the default is ORIGINAL_VALUE
.
The slot resolution strategy that Amazon Lex uses to determine the value of the slot. For more information, see PutSlotType.
" - } + }, + "createVersion":{"shape":"Boolean"} } }, "ReferenceType":{ @@ -2870,7 +2977,11 @@ }, "ResourceType":{ "type":"string", - "enum":["BOT"] + "enum":[ + "BOT", + "INTENT", + "SLOT_TYPE" + ] }, "ResponseCard":{ "type":"string", @@ -2996,6 +3107,57 @@ "TOP_RESOLUTION" ] }, + "StartImportRequest":{ + "type":"structure", + "required":[ + "payload", + "resourceType", + "mergeStrategy" + ], + "members":{ + "payload":{ + "shape":"Blob", + "documentation":"A zip archive in binary format. The archive should contain one file, a JSON file containing the resource to import. The resource should match the type specified in the resourceType
field.
Specifies the type of resource to export. Each resource also exports any resources that it depends on.
A bot exports dependent intents.
An intent exports dependent slot types.
Specifies the action that the StartImport
operation should take when there is an existing resource with the same name.
FAIL_ON_CONFLICT - The import operation is stopped on the first conflict between a resource in the import file and an existing resource. The name of the resource causing the conflict is in the failureReason
field of the response to the GetImport
operation.
OVERWRITE_LATEST - The import operation proceeds even if there is a conflict with an existing resource. The $LASTEST version of the existing resource is overwritten with the data from the import file.
The name given to the import job.
" + }, + "resourceType":{ + "shape":"ResourceType", + "documentation":"The type of resource to import.
" + }, + "mergeStrategy":{ + "shape":"MergeStrategy", + "documentation":"The action to take when there is a merge conflict.
" + }, + "importId":{ + "shape":"String", + "documentation":"The identifier for the specific import job.
" + }, + "importStatus":{ + "shape":"ImportStatus", + "documentation":"The status of the import job. If the status is FAILED
, you can get the reason for the failure using the GetImport
operation.
A timestamp for the date and time that the import job was requested.
" + } + } + }, "Statement":{ "type":"structure", "required":["messages"], @@ -3028,6 +3190,10 @@ ] }, "String":{"type":"string"}, + "StringList":{ + "type":"list", + "member":{"shape":"String"} + }, "SynonymList":{ "type":"list", "member":{"shape":"Value"} diff --git a/botocore/data/mediaconvert/2017-08-29/service-2.json b/botocore/data/mediaconvert/2017-08-29/service-2.json index 9a6674ea..97ef8b8b 100644 --- a/botocore/data/mediaconvert/2017-08-29/service-2.json +++ b/botocore/data/mediaconvert/2017-08-29/service-2.json @@ -1138,8 +1138,7 @@ "members": { "AudioNormalizationSettings": { "shape": "AudioNormalizationSettings", - "locationName": "audioNormalizationSettings", - "documentation": "Settings for Audio Normalization" + "locationName": "audioNormalizationSettings" }, "AudioSourceName": { "shape": "__string", @@ -1263,7 +1262,7 @@ "ExternalAudioFileInput": { "shape": "__string", "locationName": "externalAudioFileInput", - "documentation": "Specifies audio data from an external file source. Auto populated when Infer External Filename is checked" + "documentation": "Specifies audio data from an external file source." }, "LanguageCode": { "shape": "LanguageCode", @@ -1566,8 +1565,7 @@ }, "DestinationType": { "shape": "CaptionDestinationType", - "locationName": "destinationType", - "documentation": "Type of Caption output, including Burn-In, Embedded, SCC, SRT, TTML, WebVTT, DVB-Sub, Teletext." + "locationName": "destinationType" }, "DvbSubDestinationSettings": { "shape": "DvbSubDestinationSettings", @@ -1583,14 +1581,14 @@ }, "TtmlDestinationSettings": { "shape": "TtmlDestinationSettings", - "locationName": "ttmlDestinationSettings", - "documentation": "Settings specific to TTML caption outputs, including Pass style information (TtmlStylePassthrough)." + "locationName": "ttmlDestinationSettings" } }, "documentation": "Specific settings required by destination type. Note that burnin_destination_settings are not available if the source of the caption data is Embedded or Teletext." }, "CaptionDestinationType": { "type": "string", + "documentation": "Type of Caption output, including Burn-In, Embedded, SCC, SRT, TTML, WebVTT, DVB-Sub, Teletext.", "enum": [ "BURN_IN", "DVB_SUB", @@ -2680,7 +2678,7 @@ "SourceFile": { "shape": "__string", "locationName": "sourceFile", - "documentation": "External caption file used for loading captions. Accepted file extensions are 'scc', 'ttml', 'dfxp', 'stl', 'srt', and 'smi'. Auto-populated when Infer External Filename is checked." + "documentation": "External caption file used for loading captions. Accepted file extensions are 'scc', 'ttml', 'dfxp', 'stl', 'srt', and 'smi'." }, "TimeDelta": { "shape": "__integer", @@ -4401,7 +4399,7 @@ "Type": { "shape": "Type", "locationName": "type", - "documentation": "A job template can be of two types: system or custom. System or built-in job templates can’t be modified or deleted by the user." + "documentation": "A job template can be of two types: system or custom. System or built-in job templates can't be modified or deleted by the user." } }, "documentation": "A job template is a pre-made set of encoding instructions that you can use to quickly create a job." @@ -5009,6 +5007,14 @@ "EXCLUDE" ] }, + "M2tsNielsenId3": { + "type": "string", + "documentation": "If INSERT, Nielsen inaudible tones for media tracking will be detected in the input audio and an equivalent ID3 tag will be inserted in the output.", + "enum": [ + "INSERT", + "NONE" + ] + }, "M2tsPcrControl": { "type": "string", "documentation": "When set to PCR_EVERY_PES_PACKET, a Program Clock Reference value is inserted for every Packetized Elementary Stream (PES) header. This is effective only when the PCR PID is the same as the video or audio elementary stream.", @@ -5027,7 +5033,7 @@ }, "M2tsScte35Source": { "type": "string", - "documentation": "Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from input to output. This is only available for certain containers.", + "documentation": "Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from input to output.", "enum": [ "PASSTHROUGH", "NONE" @@ -5068,7 +5074,7 @@ "AudioPids": { "shape": "ListOf__integer", "locationName": "audioPids", - "documentation": "Packet Identifier (PID) of the elementary audio stream(s) in the transport stream. Multiple values are accepted, and can be entered in ranges and/or by comma separation. Can be entered as decimal or hexadecimal values." + "documentation": "Packet Identifier (PID) of the elementary audio stream(s) in the transport stream. Multiple values are accepted, and can be entered in ranges and/or by comma separation." }, "Bitrate": { "shape": "__integer", @@ -5090,7 +5096,7 @@ "DvbSubPids": { "shape": "ListOf__integer", "locationName": "dvbSubPids", - "documentation": "Packet Identifier (PID) for input source DVB Subtitle data to this output. Multiple values are accepted, and can be entered in ranges and/or by comma separation. Can be entered as decimal or hexadecimal values." + "documentation": "Packet Identifier (PID) for input source DVB Subtitle data to this output. Multiple values are accepted, and can be entered in ranges and/or by comma separation." }, "DvbTdtSettings": { "shape": "DvbTdtSettings", @@ -5099,7 +5105,7 @@ "DvbTeletextPid": { "shape": "__integer", "locationName": "dvbTeletextPid", - "documentation": "Packet Identifier (PID) for input source DVB Teletext data to this output. Can be entered as a decimal or hexadecimal value." + "documentation": "Packet Identifier (PID) for input source DVB Teletext data to this output." }, "EbpAudioInterval": { "shape": "M2tsEbpAudioInterval", @@ -5128,6 +5134,10 @@ "locationName": "minEbpInterval", "documentation": "When set, enforces that Encoder Boundary Points do not come within the specified time interval of each other by looking ahead at input video. If another EBP is going to come in within the specified time interval, the current EBP is not emitted, and the segment is \"stretched\" to the next marker. The lookahead value does not add latency to the system. The Live Event must be configured elsewhere to create sufficient latency to make the lookahead accurate." }, + "NielsenId3": { + "shape": "M2tsNielsenId3", + "locationName": "nielsenId3" + }, "NullPacketBitrate": { "shape": "__double", "locationName": "nullPacketBitrate", @@ -5145,7 +5155,7 @@ "PcrPid": { "shape": "__integer", "locationName": "pcrPid", - "documentation": "Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport stream. When no value is given, the encoder will assign the same value as the Video PID. Can be entered as a decimal or hexadecimal value." + "documentation": "Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport stream. When no value is given, the encoder will assign the same value as the Video PID." }, "PmtInterval": { "shape": "__integer", @@ -5155,12 +5165,12 @@ "PmtPid": { "shape": "__integer", "locationName": "pmtPid", - "documentation": "Packet Identifier (PID) for the Program Map Table (PMT) in the transport stream. Can be entered as a decimal or hexadecimal value." + "documentation": "Packet Identifier (PID) for the Program Map Table (PMT) in the transport stream." }, "PrivateMetadataPid": { "shape": "__integer", "locationName": "privateMetadataPid", - "documentation": "Packet Identifier (PID) of the private metadata stream in the transport stream. Can be entered as a decimal or hexadecimal value." + "documentation": "Packet Identifier (PID) of the private metadata stream in the transport stream." }, "ProgramNumber": { "shape": "__integer", @@ -5174,7 +5184,7 @@ "Scte35Pid": { "shape": "__integer", "locationName": "scte35Pid", - "documentation": "Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Can be entered as a decimal or hexadecimal value." + "documentation": "Packet Identifier (PID) of the SCTE-35 stream in the transport stream." }, "Scte35Source": { "shape": "M2tsScte35Source", @@ -5193,6 +5203,11 @@ "locationName": "segmentationTime", "documentation": "The length in seconds of each segment. Required unless markers is set to _none_." }, + "TimedMetadataPid": { + "shape": "__integer", + "locationName": "timedMetadataPid", + "documentation": "Packet Identifier (PID) of the timed metadata stream in the transport stream." + }, "TransportStreamId": { "shape": "__integer", "locationName": "transportStreamId", @@ -5201,11 +5216,19 @@ "VideoPid": { "shape": "__integer", "locationName": "videoPid", - "documentation": "Packet Identifier (PID) of the elementary video stream in the transport stream. Can be entered as a decimal or hexadecimal value." + "documentation": "Packet Identifier (PID) of the elementary video stream in the transport stream." } }, "documentation": "Settings for M2TS Container." }, + "M3u8NielsenId3": { + "type": "string", + "documentation": "If INSERT, Nielsen inaudible tones for media tracking will be detected in the input audio and an equivalent ID3 tag will be inserted in the output.", + "enum": [ + "INSERT", + "NONE" + ] + }, "M3u8PcrControl": { "type": "string", "documentation": "When set to PCR_EVERY_PES_PACKET a Program Clock Reference value is inserted for every Packetized Elementary Stream (PES) header. This parameter is effective only when the PCR PID is the same as the video or audio elementary stream.", @@ -5216,7 +5239,7 @@ }, "M3u8Scte35Source": { "type": "string", - "documentation": "Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from input to output. This is only available for certain containers.", + "documentation": "Enables SCTE-35 passthrough (scte35Source) to pass any SCTE-35 signals from input to output.", "enum": [ "PASSTHROUGH", "NONE" @@ -5233,7 +5256,11 @@ "AudioPids": { "shape": "ListOf__integer", "locationName": "audioPids", - "documentation": "Packet Identifier (PID) of the elementary audio stream(s) in the transport stream. Multiple values are accepted, and can be entered in ranges and/or by comma separation. Can be entered as decimal or hexadecimal values." + "documentation": "Packet Identifier (PID) of the elementary audio stream(s) in the transport stream. Multiple values are accepted, and can be entered in ranges and/or by comma separation." + }, + "NielsenId3": { + "shape": "M3u8NielsenId3", + "locationName": "nielsenId3" }, "PatInterval": { "shape": "__integer", @@ -5247,7 +5274,7 @@ "PcrPid": { "shape": "__integer", "locationName": "pcrPid", - "documentation": "Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport stream. When no value is given, the encoder will assign the same value as the Video PID. Can be entered as a decimal or hexadecimal value." + "documentation": "Packet Identifier (PID) of the Program Clock Reference (PCR) in the transport stream. When no value is given, the encoder will assign the same value as the Video PID." }, "PmtInterval": { "shape": "__integer", @@ -5257,12 +5284,12 @@ "PmtPid": { "shape": "__integer", "locationName": "pmtPid", - "documentation": "Packet Identifier (PID) for the Program Map Table (PMT) in the transport stream. Can be entered as a decimal or hexadecimal value." + "documentation": "Packet Identifier (PID) for the Program Map Table (PMT) in the transport stream." }, "PrivateMetadataPid": { "shape": "__integer", "locationName": "privateMetadataPid", - "documentation": "Packet Identifier (PID) of the private metadata stream in the transport stream. Can be entered as a decimal or hexadecimal value." + "documentation": "Packet Identifier (PID) of the private metadata stream in the transport stream." }, "ProgramNumber": { "shape": "__integer", @@ -5272,7 +5299,7 @@ "Scte35Pid": { "shape": "__integer", "locationName": "scte35Pid", - "documentation": "Packet Identifier (PID) of the SCTE-35 stream in the transport stream. Can be entered as a decimal or hexadecimal value." + "documentation": "Packet Identifier (PID) of the SCTE-35 stream in the transport stream." }, "Scte35Source": { "shape": "M3u8Scte35Source", @@ -5285,7 +5312,7 @@ "TimedMetadataPid": { "shape": "__integer", "locationName": "timedMetadataPid", - "documentation": "Packet Identifier (PID) of the timed metadata stream in the transport stream. Can be entered as a decimal or hexadecimal value." + "documentation": "Packet Identifier (PID) of the timed metadata stream in the transport stream." }, "TransportStreamId": { "shape": "__integer", @@ -5295,7 +5322,7 @@ "VideoPid": { "shape": "__integer", "locationName": "videoPid", - "documentation": "Packet Identifier (PID) of the elementary video stream in the transport stream. Can be entered as a decimal or hexadecimal value." + "documentation": "Packet Identifier (PID) of the elementary video stream in the transport stream." } }, "documentation": "Settings for TS segments in HLS" @@ -5794,7 +5821,7 @@ "FragmentLength": { "shape": "__integer", "locationName": "fragmentLength", - "documentation": "Use Fragment length (FragmentLength) to specify the mp4 fragment sizes in seconds. Fragment length must be compatible with GOP size and framerate." + "documentation": "Use Fragment length (FragmentLength) to specify the mp4 fragment sizes in seconds. Fragment length must be compatible with GOP size and framerate." }, "ManifestEncoding": { "shape": "MsSmoothManifestEncoding", @@ -5937,7 +5964,7 @@ "NameModifier": { "shape": "__string", "locationName": "nameModifier", - "documentation": "Use Name modifier (NameModifier) to have the service add a string to the end of each output filename. You specify the base filename as part of your destination URI. When you create multiple outputs in the same output group, Name modifier is required. Name modifier also accepts format identifiers. For DASH ISO outputs, if you use the format identifiers $Number$ or $Time$ in one output, you must use them in the same way in all outputs of the output group." + "documentation": "Use Name modifier (NameModifier) to have the service add a string to the end of each output filename. You specify the base filename as part of your destination URI. When you create multiple outputs in the same output group, Name modifier (NameModifier) is required. Name modifier also accepts format identifiers. For DASH ISO outputs, if you use the format identifiers $Number$ or $Time$ in one output, you must use them in the same way in all outputs of the output group." }, "OutputSettings": { "shape": "OutputSettings", @@ -6039,14 +6066,14 @@ }, "Type": { "shape": "OutputGroupType", - "locationName": "type", - "documentation": "Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth Streaming)" + "locationName": "type" } }, "documentation": "Output Group settings, including type" }, "OutputGroupType": { "type": "string", + "documentation": "Type of output group (File group, Apple HLS, DASH ISO, Microsoft Smooth Streaming)", "enum": [ "HLS_GROUP_SETTINGS", "DASH_ISO_GROUP_SETTINGS", @@ -6056,7 +6083,7 @@ }, "OutputSdt": { "type": "string", - "documentation": "Selects method of inserting SDT information into output stream. \"Follow input SDT\" copies SDT information from input stream to output stream. \"Follow input SDT if present\" copies SDT information from input stream to output stream if SDT information is present in the input, otherwise it will fall back on the user-defined values. Enter \"SDT Manually\" means user will enter the SDT information. \"No SDT\" means output stream will not contain SDT information.", + "documentation": "Selects method of inserting SDT information into output stream. \"Follow input SDT\" copies SDT information from input stream to output stream. \"Follow input SDT if present\" copies SDT information from input stream to output stream if SDT information is present in the input, otherwise it will fall back on the user-defined values. Enter \"SDT Manually\" means user will enter the SDT information. \"No SDT\" means output stream will not contain SDT information.", "enum": [ "SDT_FOLLOW", "SDT_FOLLOW_IF_PRESENT", @@ -6114,7 +6141,7 @@ "Type": { "shape": "Type", "locationName": "type", - "documentation": "A preset can be of two types: system or custom. System or built-in preset can’t be modified or deleted by the user." + "documentation": "A preset can be of two types: system or custom. System or built-in preset can't be modified or deleted by the user." } }, "documentation": "A preset is a collection of preconfigured media conversion settings that you want MediaConvert to apply to the output during the conversion process." @@ -6303,7 +6330,7 @@ "Type": { "shape": "Type", "locationName": "type", - "documentation": "A queue can be of two types: system or custom. System or built-in queues can’t be modified or deleted by the user." + "documentation": "A queue can be of two types: system or custom. System or built-in queues can't be modified or deleted by the user." } }, "documentation": "MediaConvert jobs are submitted to a queue. Unless specified otherwise jobs are submitted to a built-in default queue. User can create additional queues to separate the jobs of different categories or priority." @@ -6460,7 +6487,7 @@ "PageNumber": { "shape": "__string", "locationName": "pageNumber", - "documentation": "Set pageNumber to the Teletext page number for the destination captions for this output. This value must be a three-digit hexadecimal string; strings ending in -FF are invalid. If you are passing through the entire set of Teletext data, do not use this field." + "documentation": "Set pageNumber to the Teletext page number for the destination captions for this output. This value must be a three-digit hexadecimal string; strings ending in -FF are invalid. If you are passing through the entire set of Teletext data, do not use this field." } }, "documentation": "Settings for Teletext caption output" @@ -6531,7 +6558,7 @@ "TimestampOffset": { "shape": "__string", "locationName": "timestampOffset", - "documentation": "Only applies to outputs that support program-date-time stamp. Use Time stamp offset (TimestampOffset) to overwrite the timecode date without affecting the time and frame number. Provide the new date as a string in the format \"yyyy-mm-dd\". To use Time stamp offset, you must also enable Insert program-date-time (InsertProgramDateTime) in the output settings." + "documentation": "Only applies to outputs that support program-date-time stamp. Use Time stamp offset (TimestampOffset) to overwrite the timecode date without affecting the time and frame number. Provide the new date as a string in the format \"yyyy-mm-dd\". To use Time stamp offset, you must also enable Insert program-date-time (InsertProgramDateTime) in the output settings." } }, "documentation": "Contains settings used to acquire and adjust timecode information from inputs." @@ -6547,7 +6574,7 @@ }, "TimedMetadata": { "type": "string", - "documentation": "If PASSTHROUGH, inserts ID3 timed metadata from the timed_metadata REST command into this output. Only available for certain containers.", + "documentation": "If PASSTHROUGH, inserts ID3 timed metadata from the timed_metadata REST command into this output.", "enum": [ "PASSTHROUGH", "NONE" @@ -6607,7 +6634,7 @@ "locationName": "stylePassthrough" } }, - "documentation": "Settings for TTML caption output" + "documentation": "Settings specific to TTML caption outputs, including Pass style information (TtmlStylePassthrough)." }, "TtmlStylePassthrough": { "type": "string", diff --git a/botocore/data/rds/2014-10-31/service-2.json b/botocore/data/rds/2014-10-31/service-2.json index 1eca5789..8e802053 100644 --- a/botocore/data/rds/2014-10-31/service-2.json +++ b/botocore/data/rds/2014-10-31/service-2.json @@ -319,7 +319,7 @@ {"shape":"StorageTypeNotSupportedFault"}, {"shape":"KMSKeyNotAccessibleFault"} ], - "documentation":"Creates a new DB instance that acts as a Read Replica for an existing source DB instance. You can create a Read Replica for a DB instance running MySQL, MariaDB, or PostgreSQL. For more information, see Working with PostgreSQL, MySQL, and MariaDB Read Replicas.
Amazon Aurora does not support this action. You must call the CreateDBInstance
action to create a DB instance for an Aurora DB cluster.
All Read Replica DB instances are created with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified below.
Your source DB instance must have backup retention enabled.
Creates a new DB instance that acts as a Read Replica for an existing source DB instance. You can create a Read Replica for a DB instance running MySQL, MariaDB, or PostgreSQL. For more information, see Working with PostgreSQL, MySQL, and MariaDB Read Replicas.
Amazon Aurora doesn't support this action. You must call the CreateDBInstance
action to create a DB instance for an Aurora DB cluster.
All Read Replica DB instances are created with backups disabled. All other DB instance attributes (including DB security groups and DB parameter groups) are inherited from the source DB instance, except as specified following.
Your source DB instance must have backup retention enabled.
Lists all of the attributes for a customer account. The attributes include Amazon RDS quotas for the account, such as the number of DB instances allowed. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value.
This command does not take any parameters.
" + "documentation":"Lists all of the attributes for a customer account. The attributes include Amazon RDS quotas for the account, such as the number of DB instances allowed. The description for a quota includes the quota name, current usage toward that quota, and the quota's maximum value.
This command doesn't take any parameters.
" }, "DescribeCertificates":{ "name":"DescribeCertificates", @@ -1298,7 +1298,7 @@ {"shape":"InvalidDBInstanceStateFault"}, {"shape":"DBInstanceNotFoundFault"} ], - "documentation":"Promotes a Read Replica DB instance to a standalone DB instance.
We recommend that you enable automated backups on your Read Replica before promoting the Read Replica. This ensures that no backup is taken during the promotion process. Once the instance is promoted to a primary instance, backups are taken based on your backup settings.
Promotes a Read Replica DB instance to a standalone DB instance.
We recommend that you enable automated backups on your Read Replica before promoting the Read Replica. This ensures that no backup is taken during the promotion process. Once the instance is promoted to a primary instance, backups are taken based on your backup settings.
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL.
Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the most of original configuration with the default security group and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored AZ deployment and not a single-AZ deployment.
If your intent is to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot action. RDS does not allow two DB instances with the same name. Once you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot action. The result is that you will replace the original DB instance with the DB instance created from the snapshot.
If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier
must be the ARN of the shared DB snapshot.
Creates a new DB instance from a DB snapshot. The target database is created from the source database restore point with the most of original configuration with the default security group and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored AZ deployment and not a single-AZ deployment.
If your intent is to replace your original DB instance with the new, restored DB instance, then rename your original DB instance before you call the RestoreDBInstanceFromDBSnapshot action. RDS doesn't allow two DB instances with the same name. Once you have renamed your original DB instance with a different identifier, then you can pass the original name of the DB instance as the DBInstanceIdentifier in the call to the RestoreDBInstanceFromDBSnapshot action. The result is that you will replace the original DB instance with the DB instance created from the snapshot.
If you are restoring from a shared manual DB snapshot, the DBSnapshotIdentifier
must be the ARN of the shared DB snapshot.
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterFromSnapshot.
Restores a DB instance to an arbitrary point in time. You can restore to any point in time before the time identified by the LatestRestorableTime property. You can restore to a point up to the number of days specified by the BackupRetentionPeriod property.
The target database is created with most of the original configuration, but in a system-selected availability zone, with the default security group, the default subnet group, and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored deployment and not a single-AZ deployment.
" + "documentation":"Restores a DB instance to an arbitrary point in time. You can restore to any point in time before the time identified by the LatestRestorableTime property. You can restore to a point up to the number of days specified by the BackupRetentionPeriod property.
The target database is created with most of the original configuration, but in a system-selected availability zone, with the default security group, the default subnet group, and the default DB parameter group. By default, the new DB instance is created as a single-AZ deployment except when the instance is a SQL Server instance that has an option group that is associated with mirroring; in this case, the instance becomes a mirrored deployment and not a single-AZ deployment.
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL. For Aurora, use RestoreDBClusterToPointInTime.
Starts a DB instance that was stopped using the AWS console, the stop-db-instance AWS CLI command, or the StopDBInstance action. For more information, see Stopping and Starting a DB instance in the AWS RDS user guide.
This command does not apply to Aurora MySQL and Aurora PostgreSQL.
Starts a DB instance that was stopped using the AWS console, the stop-db-instance AWS CLI command, or the StopDBInstance action. For more information, see Stopping and Starting a DB instance in the AWS RDS user guide.
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL.
Stops a DB instance. When you stop a DB instance, Amazon RDS retains the DB instance's metadata, including its endpoint, DB parameter group, and option group membership. Amazon RDS also retains the transaction logs so you can do a point-in-time restore if necessary. For more information, see Stopping and Starting a DB instance in the AWS RDS user guide.
This command does not apply to Aurora MySQL and Aurora PostgreSQL.
Stops a DB instance. When you stop a DB instance, Amazon RDS retains the DB instance's metadata, including its endpoint, DB parameter group, and option group membership. Amazon RDS also retains the transaction logs so you can do a point-in-time restore if necessary. For more information, see Stopping and Starting a DB instance in the AWS RDS user guide.
This command doesn't apply to Aurora MySQL and Aurora PostgreSQL.
The name of the database engine to be used for this DB cluster.
Valid Values: aurora
, aurora-postgresql
The name of the database engine to be used for this DB cluster.
Valid Values: aurora
(for MySQL 5.6-compatible Aurora), aurora-mysql
(for MySQL 5.7-compatible Aurora), and aurora-postgresql
The version number of the database engine to use.
Aurora
Example: 5.6.10a
The version number of the database engine to use.
Aurora MySQL
Example: 5.6.10a
, 5.7.12
Aurora PostgreSQL
Example: 9.6.3
The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.
" + "documentation":"The DB cluster parameter group family name. A DB cluster parameter group can be associated with one and only one DB cluster parameter group family, and can be applied only to a DB cluster running a database engine and engine version compatible with that DB cluster parameter group family.
Aurora MySQL
Example: aurora5.6
, aurora-mysql5.7
Aurora PostgreSQL
Example: aurora-postgresql9.6
The name of the database engine to be used for this instance.
Not every database engine is available for every AWS Region.
Valid Values:
aurora
aurora-postgresql
mariadb
mysql
oracle-ee
oracle-se2
oracle-se1
oracle-se
postgres
sqlserver-ee
sqlserver-se
sqlserver-ex
sqlserver-web
The name of the database engine to be used for this instance.
Not every database engine is available for every AWS Region.
Valid Values:
aurora
(for MySQL 5.6-compatible Aurora)
aurora-mysql
(for MySQL 5.7-compatible Aurora)
aurora-postgresql
mariadb
mysql
oracle-ee
oracle-se2
oracle-se1
oracle-se
postgres
sqlserver-ee
sqlserver-se
sqlserver-ex
sqlserver-web
Specifies whether the read replica is in a Multi-AZ deployment.
You can create a Read Replica as a Multi-AZ DB instance. RDS creates a standby of your replica in another Availability Zone for failover support for the replica. Creating your Read Replica as a Multi-AZ DB instance is independent of whether the source database is a Multi-AZ DB instance.
Currently PostgreSQL Read Replicas can only be created as single-AZ DB instances.
Specifies whether the Read Replica is in a Multi-AZ deployment.
You can create a Read Replica as a Multi-AZ DB instance. RDS creates a standby of your replica in another Availability Zone for failover support for the replica. Creating your Read Replica as a Multi-AZ DB instance is independent of whether the source database is a Multi-AZ DB instance.
" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", @@ -3412,6 +3412,10 @@ "SupportsLogExportsToCloudwatchLogs":{ "shape":"Boolean", "documentation":"A value that indicates whether the engine version supports exporting the log types specified by ExportableLogTypes to CloudWatch Logs.
" + }, + "SupportsReadReplica":{ + "shape":"Boolean", + "documentation":"Indicates whether the database engine version supports read replicas.
" } }, "documentation":"This data type is used as a response element in the action DescribeDBEngineVersions.
" @@ -6021,7 +6025,7 @@ }, "OptionGroupName":{ "shape":"String", - "documentation":"A value that indicates that the DB cluster should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case, and the change is applied during the next maintenance window unless the ApplyImmediately
parameter is set to true
for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.
Permanent options can't be removed from an option group. The option group can't be removed from a DB cluster once it is associated with a DB cluster.
" + "documentation":"A value that indicates that the DB cluster should be associated with the specified option group. Changing this parameter doesn't result in an outage except in the following case, and the change is applied during the next maintenance window unless the ApplyImmediately
parameter is set to true
for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.
Permanent options can't be removed from an option group. The option group can't be removed from a DB cluster once it is associated with a DB cluster.
" }, "PreferredBackupWindow":{ "shape":"String", @@ -6116,7 +6120,7 @@ }, "DBSecurityGroups":{ "shape":"DBSecurityGroupNameList", - "documentation":"A list of DB security groups to authorize on this DB instance. Changing this setting does not result in an outage and the change is asynchronously applied as soon as possible.
Constraints:
If supplied, must match existing DBSecurityGroups.
A list of DB security groups to authorize on this DB instance. Changing this setting doesn't result in an outage and the change is asynchronously applied as soon as possible.
Constraints:
If supplied, must match existing DBSecurityGroups.
The new password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".
Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword
element exists in the PendingModifiedValues
element of the operation response.
Amazon Aurora
Not applicable. The password for the master user is managed by the DB cluster. For more information, see ModifyDBCluster.
Default: Uses existing setting
MariaDB
Constraints: Must contain from 8 to 41 characters.
Microsoft SQL Server
Constraints: Must contain from 8 to 128 characters.
MySQL
Constraints: Must contain from 8 to 41 characters.
Oracle
Constraints: Must contain from 8 to 30 characters.
PostgreSQL
Constraints: Must contain from 8 to 128 characters.
Amazon RDS API actions never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked.
The new password for the master user. The password can include any printable ASCII character except \"/\", \"\"\", or \"@\".
Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible. Between the time of the request and the completion of the request, the MasterUserPassword
element exists in the PendingModifiedValues
element of the operation response.
Amazon Aurora
Not applicable. The password for the master user is managed by the DB cluster. For more information, see ModifyDBCluster.
Default: Uses existing setting
MariaDB
Constraints: Must contain from 8 to 41 characters.
Microsoft SQL Server
Constraints: Must contain from 8 to 128 characters.
MySQL
Constraints: Must contain from 8 to 41 characters.
Oracle
Constraints: Must contain from 8 to 30 characters.
PostgreSQL
Constraints: Must contain from 8 to 128 characters.
Amazon RDS API actions never return the password, so this action provides a way to regain access to a primary instance user if the password is lost. This includes restoring privileges that might have been accidentally revoked.
The name of the DB parameter group to apply to the DB instance. Changing this setting does not result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The db instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window.
Default: Uses existing setting
Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance.
" + "documentation":"The name of the DB parameter group to apply to the DB instance. Changing this setting doesn't result in an outage. The parameter group name itself is changed immediately, but the actual parameter changes are not applied until you reboot the instance without failover. The db instance will NOT be rebooted automatically and the parameter changes will NOT be applied during the next maintenance window.
Default: Uses existing setting
Constraints: The DB parameter group must be in the same DB parameter group family as this DB instance.
" }, "BackupRetentionPeriod":{ "shape":"IntegerOptional", @@ -6140,15 +6144,15 @@ }, "PreferredBackupWindow":{ "shape":"String", - "documentation":" The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod
parameter. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.
Amazon Aurora
Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.
Constraints:
Must be in the format hh24:mi-hh24:mi
Must be in Universal Time Coordinated (UTC)
Must not conflict with the preferred maintenance window
Must be at least 30 minutes
The daily time range during which automated backups are created if automated backups are enabled, as determined by the BackupRetentionPeriod
parameter. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible.
Amazon Aurora
Not applicable. The daily time range for creating automated backups is managed by the DB cluster. For more information, see ModifyDBCluster.
Constraints:
Must be in the format hh24:mi-hh24:mi
Must be in Universal Time Coordinated (UTC)
Must not conflict with the preferred maintenance window
Must be at least 30 minutes
The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter does not result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.
Default: Uses existing setting
Format: ddd:hh24:mi-ddd:hh24:mi
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes
" + "documentation":"The weekly time range (in UTC) during which system maintenance can occur, which might result in an outage. Changing this parameter doesn't result in an outage, except in the following situation, and the change is asynchronously applied as soon as possible. If there are pending actions that cause a reboot, and the maintenance window is changed to include the current time, then changing this parameter will cause a reboot of the DB instance. If moving this window to the current time, there must be at least 30 minutes between the current time and end of the window to ensure pending changes are applied.
Default: Uses existing setting
Format: ddd:hh24:mi-ddd:hh24:mi
Valid Days: Mon | Tue | Wed | Thu | Fri | Sat | Sun
Constraints: Must be at least 30 minutes
" }, "MultiAZ":{ "shape":"BooleanOptional", - "documentation":"Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately
parameter is set to true
for this request.
Specifies if the DB instance is a Multi-AZ deployment. Changing this parameter doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately
parameter is set to true
for this request.
Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.
Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version.
" + "documentation":"Indicates that major version upgrades are allowed. Changing this parameter doesn't result in an outage and the change is asynchronously applied as soon as possible.
Constraints: This parameter must be set to true when specifying a value for the EngineVersion parameter that is a different major version than the DB instance's current version.
" }, "AutoMinorVersionUpgrade":{ "shape":"BooleanOptional", - "documentation":" Indicates that minor version upgrades are applied automatically to the DB instance during the maintenance window. Changing this parameter does not result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true
during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version.
Indicates that minor version upgrades are applied automatically to the DB instance during the maintenance window. Changing this parameter doesn't result in an outage except in the following case and the change is asynchronously applied as soon as possible. An outage will result if this parameter is set to true
during the maintenance window, and a newer minor version is available, and RDS has enabled auto patching for that engine version.
The new Provisioned IOPS (I/O operations per second) value for the RDS instance.
Changing this setting does not result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately
parameter is set to true
for this request. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.
If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.
Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.
Default: Uses existing setting
" + "documentation":"The new Provisioned IOPS (I/O operations per second) value for the RDS instance.
Changing this setting doesn't result in an outage and the change is applied during the next maintenance window unless the ApplyImmediately
parameter is set to true
for this request. If you are migrating from Provisioned IOPS to standard storage, set this value to 0. The DB instance will require a reboot for the change in storage type to take effect.
If you choose to migrate your DB instance from using standard storage to using Provisioned IOPS, or from using Provisioned IOPS to using standard storage, the process can take time. The duration of the migration depends on several factors such as database load, storage size, storage type (standard or Provisioned IOPS), amount of IOPS provisioned (if any), and the number of prior scale storage operations. Typical migration times are under 24 hours, but the process can take up to several days in some cases. During the migration, the DB instance is available for use, but might experience performance degradation. While the migration takes place, nightly backups for the instance are suspended. No other Amazon RDS operations can take place for the instance, including modifying the instance, rebooting the instance, deleting the instance, creating a Read Replica for the instance, and creating a DB snapshot of the instance.
Constraints: For MariaDB, MySQL, Oracle, and PostgreSQL, the value supplied must be at least 10% greater than the current value. Values that are not at least 10% greater than the existing value are rounded up so that they are 10% greater than the current value.
Default: Uses existing setting
" }, "OptionGroupName":{ "shape":"String", - "documentation":" Indicates that the DB instance should be associated with the specified option group. Changing this parameter does not result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately
parameter is set to true
for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.
Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance
" + "documentation":" Indicates that the DB instance should be associated with the specified option group. Changing this parameter doesn't result in an outage except in the following case and the change is applied during the next maintenance window unless the ApplyImmediately
parameter is set to true
for this request. If the parameter change results in an option group that enables OEM, this change can cause a brief (sub-second) period during which new connections are rejected but existing connections are not interrupted.
Permanent options, such as the TDE option for Oracle Advanced Security TDE, can't be removed from an option group, and that option group can't be removed from a DB instance once it is associated with a DB instance
" }, "NewDBInstanceIdentifier":{ "shape":"String", @@ -7692,7 +7696,7 @@ }, "EngineVersion":{ "shape":"String", - "documentation":"The version number of the database engine to use.
Aurora
Example: 5.6.10a
The version number of the database engine to use.
Aurora MySQL
Example: 5.6.10a
Aurora PostgreSQL
Example: 9.6.3
The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source. For example, you can restore a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot.
Valid Values:
aurora
aurora-postgresql
mariadb
mysql
oracle-ee
oracle-se2
oracle-se1
oracle-se
postgres
sqlserver-ee
sqlserver-se
sqlserver-ex
sqlserver-web
The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source. For example, you can restore a MariaDB 10.1 DB instance from a MySQL 5.6 snapshot.
Valid Values:
mariadb
mysql
oracle-ee
oracle-se2
oracle-se1
oracle-se
postgres
sqlserver-ee
sqlserver-se
sqlserver-ex
sqlserver-web
True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.
You can enable IAM database authentication for the following database engines
For MySQL 5.6, minor version 5.6.34 or higher
For MySQL 5.7, minor version 5.7.16 or higher
Aurora 5.6 or higher.
Default: false
True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.
You can enable IAM database authentication for the following database engines
For MySQL 5.6, minor version 5.6.34 or higher
For MySQL 5.7, minor version 5.7.16 or higher
Default: false
The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Valid Values:
aurora
aurora-postgresql
mariadb
mysql
oracle-ee
oracle-se2
oracle-se1
oracle-se
postgres
sqlserver-ee
sqlserver-se
sqlserver-ex
sqlserver-web
The database engine to use for the new instance.
Default: The same as source
Constraint: Must be compatible with the engine of the source
Valid Values:
mariadb
mysql
oracle-ee
oracle-se2
oracle-se1
oracle-se
postgres
sqlserver-ee
sqlserver-se
sqlserver-ex
sqlserver-web
True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.
You can enable IAM database authentication for the following database engines
For MySQL 5.6, minor version 5.6.34 or higher
For MySQL 5.7, minor version 5.7.16 or higher
Aurora 5.6 or higher.
Default: false
True to enable mapping of AWS Identity and Access Management (IAM) accounts to database accounts, and otherwise false.
You can enable IAM database authentication for the following database engines
For MySQL 5.6, minor version 5.6.34 or higher
For MySQL 5.7, minor version 5.7.16 or higher
Default: false
Success
" }, "errors" : [ { "shape" : "TooManyRequestsException", - "documentation" : "429 response" + "documentation" : "The client is sending more than the allowed number of requests per unit time.
" }, { "shape" : "BadRequestException", - "documentation" : "400 response" + "documentation" : "One of the parameters in the request is invalid.
" }, { "shape" : "InternalServerErrorException", - "documentation" : "500 response" + "documentation" : "The AWS Serverless Application Repository service encountered an internal error.
" }, { "shape" : "ConflictException", - "documentation" : "409 response" + "documentation" : "The resource already exists.
" }, { "shape" : "ForbiddenException", - "documentation" : "403 response" + "documentation" : "The client is not authenticated.
" } ], - "documentation" : "Creates an application, optionally including an AWS SAM file to create the first application version in the same call." + "documentation" : "Creates an application, optionally including an AWS SAM file to create the first application version in the same call.
" }, "CreateApplicationVersion" : { "name" : "CreateApplicationVersion", @@ -55,25 +55,25 @@ }, "output" : { "shape" : "CreateApplicationVersionResponse", - "documentation" : "201 response" + "documentation" : "Success
" }, "errors" : [ { "shape" : "TooManyRequestsException", - "documentation" : "429 response" + "documentation" : "The client is sending more than the allowed number of requests per unit time.
" }, { "shape" : "BadRequestException", - "documentation" : "400 response" + "documentation" : "One of the parameters in the request is invalid.
" }, { "shape" : "InternalServerErrorException", - "documentation" : "500 response" + "documentation" : "The AWS Serverless Application Repository service encountered an internal error.
" }, { "shape" : "ConflictException", - "documentation" : "409 response" + "documentation" : "The resource already exists.
" }, { "shape" : "ForbiddenException", - "documentation" : "403 response" + "documentation" : "The client is not authenticated.
" } ], - "documentation" : "Creates an application version." + "documentation" : "Creates an application version.
" }, "CreateCloudFormationChangeSet" : { "name" : "CreateCloudFormationChangeSet", @@ -87,22 +87,53 @@ }, "output" : { "shape" : "CreateCloudFormationChangeSetResponse", - "documentation" : "201 response" + "documentation" : "Success
" }, "errors" : [ { "shape" : "TooManyRequestsException", - "documentation" : "429 response" + "documentation" : "The client is sending more than the allowed number of requests per unit time.
" }, { "shape" : "BadRequestException", - "documentation" : "400 response" + "documentation" : "One of the parameters in the request is invalid.
" }, { "shape" : "InternalServerErrorException", - "documentation" : "500 response" + "documentation" : "The AWS Serverless Application Repository service encountered an internal error.
" }, { "shape" : "ForbiddenException", - "documentation" : "403 response" + "documentation" : "The client is not authenticated.
" } ], - "documentation" : "Creates an AWS CloudFormation ChangeSet for the given application." + "documentation" : "Creates an AWS CloudFormation ChangeSet for the given application.
" + }, + "DeleteApplication" : { + "name" : "DeleteApplication", + "http" : { + "method" : "DELETE", + "requestUri" : "/applications/{applicationId}", + "responseCode" : 204 + }, + "input" : { + "shape" : "DeleteApplicationRequest" + }, + "errors" : [ { + "shape" : "BadRequestException", + "documentation" : "One of the parameters in the request is invalid.
" + }, { + "shape" : "InternalServerErrorException", + "documentation" : "The AWS Serverless Application Repository service encountered an internal error.
" + }, { + "shape" : "ForbiddenException", + "documentation" : "The client is not authenticated.
" + }, { + "shape" : "NotFoundException", + "documentation" : "The resource (for example, an access policy statement) specified in the request does not exist.
" + }, { + "shape" : "TooManyRequestsException", + "documentation" : "The client is sending more than the allowed number of requests per unit time.
" + }, { + "shape" : "ConflictException", + "documentation" : "The resource already exists.
" + } ], + "documentation" : "Deletes the specified application.
" }, "GetApplication" : { "name" : "GetApplication", @@ -116,25 +147,25 @@ }, "output" : { "shape" : "GetApplicationResponse", - "documentation" : "Success" + "documentation" : "Success
" }, "errors" : [ { "shape" : "NotFoundException", - "documentation" : "404 response" + "documentation" : "The resource (for example, an access policy statement) specified in the request does not exist.
" }, { "shape" : "TooManyRequestsException", - "documentation" : "429 response" + "documentation" : "The client is sending more than the allowed number of requests per unit time.
" }, { "shape" : "BadRequestException", - "documentation" : "400 response" + "documentation" : "One of the parameters in the request is invalid.
" }, { "shape" : "InternalServerErrorException", - "documentation" : "500 response" + "documentation" : "The AWS Serverless Application Repository service encountered an internal error.
" }, { "shape" : "ForbiddenException", - "documentation" : "403 response" + "documentation" : "The client is not authenticated.
" } ], - "documentation" : "Gets the specified application." + "documentation" : "Gets the specified application.
" }, "GetApplicationPolicy" : { "name" : "GetApplicationPolicy", @@ -148,25 +179,25 @@ }, "output" : { "shape" : "GetApplicationPolicyResponse", - "documentation" : "Success" + "documentation" : "Success
" }, "errors" : [ { "shape" : "NotFoundException", - "documentation" : "404 response" + "documentation" : "The resource (for example, an access policy statement) specified in the request does not exist.
" }, { "shape" : "TooManyRequestsException", - "documentation" : "429 response" + "documentation" : "The client is sending more than the allowed number of requests per unit time.
" }, { "shape" : "BadRequestException", - "documentation" : "400 response" + "documentation" : "One of the parameters in the request is invalid.
" }, { "shape" : "InternalServerErrorException", - "documentation" : "500 response" + "documentation" : "The AWS Serverless Application Repository service encountered an internal error.
" }, { "shape" : "ForbiddenException", - "documentation" : "403 response" + "documentation" : "The client is not authenticated.
" } ], - "documentation" : "Gets the policy for the specified application." + "documentation" : "Gets the policy for the specified application.
" }, "ListApplicationVersions" : { "name" : "ListApplicationVersions", @@ -180,25 +211,25 @@ }, "output" : { "shape" : "ListApplicationVersionsResponse", - "documentation" : "Success" + "documentation" : "Success
" }, "errors" : [ { "shape" : "NotFoundException", - "documentation" : "404 response" + "documentation" : "The resource (for example, an access policy statement) specified in the request does not exist.
" }, { "shape" : "TooManyRequestsException", - "documentation" : "429 response" + "documentation" : "The client is sending more than the allowed number of requests per unit time.
" }, { "shape" : "BadRequestException", - "documentation" : "400 response" + "documentation" : "One of the parameters in the request is invalid.
" }, { "shape" : "InternalServerErrorException", - "documentation" : "500 response" + "documentation" : "The AWS Serverless Application Repository service encountered an internal error.
" }, { "shape" : "ForbiddenException", - "documentation" : "403 response" + "documentation" : "The client is not authenticated.
" } ], - "documentation" : "Lists versions for the specified application." + "documentation" : "Lists versions for the specified application.
" }, "ListApplications" : { "name" : "ListApplications", @@ -212,22 +243,22 @@ }, "output" : { "shape" : "ListApplicationsResponse", - "documentation" : "Success" + "documentation" : "Success
" }, "errors" : [ { "shape" : "NotFoundException", - "documentation" : "404 response" + "documentation" : "The resource (for example, an access policy statement) specified in the request does not exist.
" }, { "shape" : "BadRequestException", - "documentation" : "400 response" + "documentation" : "One of the parameters in the request is invalid.
" }, { "shape" : "InternalServerErrorException", - "documentation" : "500 response" + "documentation" : "The AWS Serverless Application Repository service encountered an internal error.
" }, { "shape" : "ForbiddenException", - "documentation" : "403 response" + "documentation" : "The client is not authenticated.
" } ], - "documentation" : "Lists applications owned by the requester." + "documentation" : "Lists applications owned by the requester.
" }, "PutApplicationPolicy" : { "name" : "PutApplicationPolicy", @@ -241,25 +272,25 @@ }, "output" : { "shape" : "PutApplicationPolicyResponse", - "documentation" : "Success" + "documentation" : "Success
" }, "errors" : [ { "shape" : "NotFoundException", - "documentation" : "404 response" + "documentation" : "The resource (for example, an access policy statement) specified in the request does not exist.
" }, { "shape" : "TooManyRequestsException", - "documentation" : "429 response" + "documentation" : "The client is sending more than the allowed number of requests per unit time.
" }, { "shape" : "BadRequestException", - "documentation" : "400 response" + "documentation" : "One of the parameters in the request is invalid.
" }, { "shape" : "InternalServerErrorException", - "documentation" : "500 response" + "documentation" : "The AWS Serverless Application Repository service encountered an internal error.
" }, { "shape" : "ForbiddenException", - "documentation" : "403 response" + "documentation" : "The client is not authenticated.
" } ], - "documentation" : "Puts the policy for the specified application." + "documentation" : "Puts the policy for the specified application.
" }, "UpdateApplication" : { "name" : "UpdateApplication", @@ -273,28 +304,28 @@ }, "output" : { "shape" : "UpdateApplicationResponse", - "documentation" : "Success" + "documentation" : "Success
" }, "errors" : [ { "shape" : "BadRequestException", - "documentation" : "400 response" + "documentation" : "One of the parameters in the request is invalid.
" }, { "shape" : "InternalServerErrorException", - "documentation" : "500 response" + "documentation" : "The AWS Serverless Application Repository service encountered an internal error.
" }, { "shape" : "ForbiddenException", - "documentation" : "403 response" + "documentation" : "The client is not authenticated.
" }, { "shape" : "NotFoundException", - "documentation" : "404 response" + "documentation" : "The resource (for example, an access policy statement) specified in the request does not exist.
" }, { "shape" : "TooManyRequestsException", - "documentation" : "429 response" + "documentation" : "The client is sending more than the allowed number of requests per unit time.
" }, { "shape" : "ConflictException", - "documentation" : "409 response" + "documentation" : "The resource already exists.
" } ], - "documentation" : "Updates the specified application." + "documentation" : "Updates the specified application.
" } }, "shapes" : { @@ -304,103 +335,112 @@ "ApplicationId" : { "shape" : "__string", "locationName" : "applicationId", - "documentation" : "The application Amazon Resource Name (ARN)." + "documentation" : "The application Amazon Resource Name (ARN).
" }, "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "The name of the author publishing the app.\\nMin Length=1. Max Length=127.\\nPattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";" + "documentation" : "The name of the author publishing the app.
Min Length=1. Max Length=127.
Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";
" }, "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "The date/time this resource was created." + "documentation" : "The date/time this resource was created.
" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "The description of the application.\\nMin Length=1. Max Length=256" + "documentation" : "The description of the application.
Min Length=1. Max Length=256
" + }, + "HomePageUrl" : { + "shape" : "__string", + "locationName" : "homePageUrl", + "documentation" : "A URL with more information about the application, for example\n the location of your GitHub repository for the application.
" }, "Labels" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "Labels to improve discovery of apps in search results.\\nMin Length=1. Max Length=127. Maximum number of labels: 10\\nPattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";" + "documentation" : "Labels to improve discovery of apps in search results.
Min Length=1. Max Length=127. Maximum number of labels: 10
Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";
" }, "LicenseUrl" : { "shape" : "__string", "locationName" : "licenseUrl", - "documentation" : "A link to a license file of the app that matches the spdxLicenseID of your application.\\nMax size 5 MB" + "documentation" : "A link to a license file of the app that matches the spdxLicenseID of your application.
Max size 5 MB
" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "The name of the application.\\nMin Length=1. Max Length=140\\nPattern: \"[a-zA-Z0-9\\\\-]+\";" + "documentation" : "The name of the application.
Min Length=1. Max Length=140
Pattern: \"[a-zA-Z0-9\\\\-]+\";
" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.\\nMax size 5 MB" + "documentation" : "A link to the readme file that contains a more detailed description of the application and how it works in Markdown language.
Max size 5 MB
" }, "SpdxLicenseId" : { "shape" : "__string", "locationName" : "spdxLicenseId", - "documentation" : "A valid identifier from https://spdx.org/licenses/." + "documentation" : "A valid identifier from https://spdx.org/licenses/.
" }, "Version" : { "shape" : "Version", "locationName" : "version", - "documentation" : "Version information about the application." + "documentation" : "Version information about the application.
" } }, - "documentation" : "Details about the application." + "documentation" : "Details about the application.
", + "required" : [ "Description", "Author", "ApplicationId", "Name" ] }, "ApplicationPage" : { "type" : "structure", "members" : { "Applications" : { - "shape" : "ListOfApplicationSummary", + "shape" : "__listOfApplicationSummary", "locationName" : "applications", - "documentation" : "Array of application summaries." + "documentation" : "Array of application summaries.
" }, "NextToken" : { "shape" : "__string", "locationName" : "nextToken", - "documentation" : "The token to request the next page of results." + "documentation" : "The token to request the next page of results.
" } }, - "documentation" : "List of application details." + "documentation" : "List of application details.
", + "required" : [ "Applications" ] }, "ApplicationPolicy" : { "type" : "structure", "members" : { "Statements" : { - "shape" : "ListOfApplicationPolicyStatement", + "shape" : "__listOfApplicationPolicyStatement", "locationName" : "statements", - "documentation" : "Array of policy statements applied to the application." + "documentation" : "Array of policy statements applied to the application.
" } }, - "documentation" : "Policy statements applied to the application." + "documentation" : "Policy statements applied to the application.
", + "required" : [ "Statements" ] }, "ApplicationPolicyStatement" : { "type" : "structure", "members" : { "Actions" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "actions", - "documentation" : "A list of supported actions:\\n\\n GetApplication \\n \\n\\n CreateCloudFormationChangeSet \\n \\n\\n ListApplicationVersions \\n \\n\\n SearchApplications \\n \\n\\n Deploy (Note: This action enables all other actions above.)" + "documentation" : "A list of supported actions:
\n GetApplication\n
\n CreateCloudFormationChangeSet\n
\n ListApplicationVersions\n
\n SearchApplications\n
\n Deploy (Note: This action enables all other actions above.)
" }, "Principals" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "principals", - "documentation" : "An AWS account ID, or * to make the application public." + "documentation" : "An AWS account ID, or * to make the application public.
" }, "StatementId" : { "shape" : "__string", "locationName" : "statementId", - "documentation" : "A unique ID for the statement." + "documentation" : "A unique ID for the statement.
" } }, - "documentation" : "Policy statement applied to the application." + "documentation" : "Policy statement applied to the application.
", + "required" : [ "Principals", "Actions" ] }, "ApplicationSummary" : { "type" : "structure", @@ -408,40 +448,46 @@ "ApplicationId" : { "shape" : "__string", "locationName" : "applicationId", - "documentation" : "The application ARN." + "documentation" : "The application ARN.
" }, "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "The name of the author publishing the app\\nMin Length=1. Max Length=127.\\nPattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";" + "documentation" : "The name of the author publishing the app.
Min Length=1. Max Length=127.
Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";
" }, "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "The date/time this resource was created." + "documentation" : "The date/time this resource was created.
" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "The description of the application.\\nMin Length=1. Max Length=256" + "documentation" : "The description of the application.
Min Length=1. Max Length=256
" + }, + "HomePageUrl" : { + "shape" : "__string", + "locationName" : "homePageUrl", + "documentation" : "A URL with more information about the application, for example\n the location of your GitHub repository for the application.
" }, "Labels" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "Labels to improve discovery of apps in search results.\\nMin Length=1. Max Length=127. Maximum number of labels: 10\\nPattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";" + "documentation" : "Labels to improve discovery of apps in search results.
Min Length=1. Max Length=127. Maximum number of labels: 10
Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";
" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "The name of the application.\\nMin Length=1. Max Length=140\\nPattern: \"[a-zA-Z0-9\\\\-]+\";" + "documentation" : "The name of the application.
Min Length=1. Max Length=140
Pattern: \"[a-zA-Z0-9\\\\-]+\";
" }, "SpdxLicenseId" : { "shape" : "__string", "locationName" : "spdxLicenseId", - "documentation" : "A valid identifier from https://spdx.org/licenses/ ." + "documentation" : "A valid identifier from https://spdx.org/licenses/.
" } }, - "documentation" : "Summary of details about the application." + "documentation" : "Summary of details about the application.
", + "required" : [ "Description", "Author", "ApplicationId", "Name" ] }, "ApplicationVersionPage" : { "type" : "structure", @@ -449,15 +495,16 @@ "NextToken" : { "shape" : "__string", "locationName" : "nextToken", - "documentation" : "The token to request the next page of results." + "documentation" : "The token to request the next page of results.
" }, "Versions" : { - "shape" : "ListOfVersionSummary", + "shape" : "__listOfVersionSummary", "locationName" : "versions", - "documentation" : "Array of version summaries for the application." + "documentation" : "Array of version summaries for the application.
" } }, - "documentation" : "List of version summaries for the application." + "documentation" : "List of version summaries for the application.
", + "required" : [ "Versions" ] }, "BadRequestException" : { "type" : "structure", @@ -465,15 +512,15 @@ "ErrorCode" : { "shape" : "__string", "locationName" : "errorCode", - "documentation" : "400" + "documentation" : "400
" }, "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "One of the parameters in the request is invalid." + "documentation" : "One of the parameters in the request is invalid.
" } }, - "documentation" : "One of the parameters in the request is invalid.", + "documentation" : "One of the parameters in the request is invalid.
", "exception" : true, "error" : { "httpStatusCode" : 400 @@ -485,25 +532,26 @@ "ApplicationId" : { "shape" : "__string", "locationName" : "applicationId", - "documentation" : "The application Amazon Resource Name (ARN)." + "documentation" : "The application Amazon Resource Name (ARN).
" }, "ChangeSetId" : { "shape" : "__string", "locationName" : "changeSetId", - "documentation" : "The ARN of the change set.\\nLength Constraints: Minimum length of 1.\\nPattern: arn:[-a-zA-Z0-9:/]*" + "documentation" : "The ARN of the change set.
Length Constraints: Minimum length of 1.
Pattern: Amazon Resource Name (ARN):[-a-zA-Z0-9:/]*
" }, "SemanticVersion" : { "shape" : "__string", "locationName" : "semanticVersion", - "documentation" : "The semantic version of the application:\\n\\n https://semver.org/" + "documentation" : "The semantic version of the application:
\n https://semver.org/\n
" }, "StackId" : { "shape" : "__string", "locationName" : "stackId", - "documentation" : "The unique ID of the stack." + "documentation" : "The unique ID of the stack.
" } }, - "documentation" : "Details of the change set." + "documentation" : "Details of the change set.
", + "required" : [ "ChangeSetId", "ApplicationId", "StackId", "SemanticVersion" ] }, "ConflictException" : { "type" : "structure", @@ -511,15 +559,15 @@ "ErrorCode" : { "shape" : "__string", "locationName" : "errorCode", - "documentation" : "409" + "documentation" : "409
" }, "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "The resource already exists." + "documentation" : "The resource already exists.
" } }, - "documentation" : "The resource already exists.", + "documentation" : "The resource already exists.
", "exception" : true, "error" : { "httpStatusCode" : 409 @@ -531,70 +579,76 @@ "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "The name of the author publishing the app.\\nMin Length=1. Max Length=127.\\nPattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";" + "documentation" : "The name of the author publishing the app.
Min Length=1. Max Length=127.
Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";
" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "The description of the application.\\nMin Length=1. Max Length=256" + "documentation" : "The description of the application.
Min Length=1. Max Length=256
" + }, + "HomePageUrl" : { + "shape" : "__string", + "locationName" : "homePageUrl", + "documentation" : "A URL with more information about the application, for example\n the location of your GitHub repository for the application.
" }, "Labels" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "Labels to improve discovery of apps in search results.\\nMin Length=1. Max Length=127. Maximum number of labels: 10\\nPattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";" + "documentation" : "Labels to improve discovery of apps in search results.
Min Length=1. Max Length=127. Maximum number of labels: 10
Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";
" }, "LicenseBody" : { "shape" : "__string", "locationName" : "licenseBody", - "documentation" : "A raw text file that contains the license of the app that matches the spdxLicenseID of your application.\\nMax size 5 MB" + "documentation" : "A raw text file that contains the license of the app that matches the spdxLicenseID of your application.
Max size 5 MB
" }, "LicenseUrl" : { "shape" : "__string", "locationName" : "licenseUrl", - "documentation" : "A link to a license file of the app that matches the spdxLicenseID of your application.\\nMax size 5 MB" + "documentation" : "A link to a license file of the app that matches the spdxLicenseID of your application.
Max size 5 MB
" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "The name of the application you want to publish.\\nMin Length=1. Max Length=140\\nPattern: \"[a-zA-Z0-9\\\\-]+\";" + "documentation" : "The name of the application you want to publish.
Min Length=1. Max Length=140
Pattern: \"[a-zA-Z0-9\\\\-]+\";
" }, "ReadmeBody" : { "shape" : "__string", "locationName" : "readmeBody", - "documentation" : "A raw text Readme file that contains a more detailed description of the application and how it works in markdown language.\\nMax size 5 MB" + "documentation" : "A raw text Readme file that contains a more detailed description of the application and how it works in markdown language.
Max size 5 MB
" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.\\nMax size 5 MB" + "documentation" : "A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.
Max size 5 MB
" }, "SemanticVersion" : { "shape" : "__string", "locationName" : "semanticVersion", - "documentation" : "The semantic version of the application:\\n\\n https://semver.org/" + "documentation" : "The semantic version of the application:
\n https://semver.org/\n
" }, "SourceCodeUrl" : { "shape" : "__string", "locationName" : "sourceCodeUrl", - "documentation" : "A link to a public repository for the source code of your application." + "documentation" : "A link to a public repository for the source code of your application.
" }, "SpdxLicenseId" : { "shape" : "__string", "locationName" : "spdxLicenseId", - "documentation" : "A valid identifier from https://spdx.org/licenses/ ." + "documentation" : "A valid identifier from https://spdx.org/licenses/.
" }, "TemplateBody" : { "shape" : "__string", "locationName" : "templateBody", - "documentation" : "The raw packaged SAM template of your application." + "documentation" : "The raw packaged AWS SAM template of your application.
" }, "TemplateUrl" : { "shape" : "__string", "locationName" : "templateUrl", - "documentation" : "A link to the packaged SAM template of your application." + "documentation" : "A link to the packaged AWS SAM template of your application.
" } }, - "documentation" : "Create application request." + "documentation" : "Create application request.
", + "required" : [ "Description", "Name", "Author" ] }, "CreateApplicationRequest" : { "type" : "structure", @@ -602,67 +656,72 @@ "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "The name of the author publishing the app.\\nMin Length=1. Max Length=127.\\nPattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";" + "documentation" : "The name of the author publishing the app.
Min Length=1. Max Length=127.
Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";
" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "The description of the application.\\nMin Length=1. Max Length=256" + "documentation" : "The description of the application.
Min Length=1. Max Length=256
" + }, + "HomePageUrl" : { + "shape" : "__string", + "locationName" : "homePageUrl", + "documentation" : "A URL with more information about the application, for example\n the location of your GitHub repository for the application.
" }, "Labels" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "Labels to improve discovery of apps in search results.\\nMin Length=1. Max Length=127. Maximum number of labels: 10\\nPattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";" + "documentation" : "Labels to improve discovery of apps in search results.
Min Length=1. Max Length=127. Maximum number of labels: 10
Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";
" }, "LicenseBody" : { "shape" : "__string", "locationName" : "licenseBody", - "documentation" : "A raw text file that contains the license of the app that matches the spdxLicenseID of your application.\\nMax size 5 MB" + "documentation" : "A raw text file that contains the license of the app that matches the spdxLicenseID of your application.
Max size 5 MB
" }, "LicenseUrl" : { "shape" : "__string", "locationName" : "licenseUrl", - "documentation" : "A link to a license file of the app that matches the spdxLicenseID of your application.\\nMax size 5 MB" + "documentation" : "A link to a license file of the app that matches the spdxLicenseID of your application.
Max size 5 MB
" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "The name of the application you want to publish.\\nMin Length=1. Max Length=140\\nPattern: \"[a-zA-Z0-9\\\\-]+\";" + "documentation" : "The name of the application you want to publish.
Min Length=1. Max Length=140
Pattern: \"[a-zA-Z0-9\\\\-]+\";
" }, "ReadmeBody" : { "shape" : "__string", "locationName" : "readmeBody", - "documentation" : "A raw text Readme file that contains a more detailed description of the application and how it works in markdown language.\\nMax size 5 MB" + "documentation" : "A raw text Readme file that contains a more detailed description of the application and how it works in markdown language.
Max size 5 MB
" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.\\nMax size 5 MB" + "documentation" : "A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.
Max size 5 MB
" }, "SemanticVersion" : { "shape" : "__string", "locationName" : "semanticVersion", - "documentation" : "The semantic version of the application:\\n\\n https://semver.org/" + "documentation" : "The semantic version of the application:
\n https://semver.org/\n
" }, "SourceCodeUrl" : { "shape" : "__string", "locationName" : "sourceCodeUrl", - "documentation" : "A link to a public repository for the source code of your application." + "documentation" : "A link to a public repository for the source code of your application.
" }, "SpdxLicenseId" : { "shape" : "__string", "locationName" : "spdxLicenseId", - "documentation" : "A valid identifier from https://spdx.org/licenses/ ." + "documentation" : "A valid identifier from https://spdx.org/licenses/.
" }, "TemplateBody" : { "shape" : "__string", "locationName" : "templateBody", - "documentation" : "The raw packaged SAM template of your application." + "documentation" : "The raw packaged AWS SAM template of your application.
" }, "TemplateUrl" : { "shape" : "__string", "locationName" : "templateUrl", - "documentation" : "A link to the packaged SAM template of your application." + "documentation" : "A link to the packaged AWS SAM template of your application.
" } } }, @@ -672,52 +731,57 @@ "ApplicationId" : { "shape" : "__string", "locationName" : "applicationId", - "documentation" : "The application Amazon Resource Name (ARN)." + "documentation" : "The application Amazon Resource Name (ARN).
" }, "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "The name of the author publishing the app.\\nMin Length=1. Max Length=127.\\nPattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";" + "documentation" : "The name of the author publishing the app.
Min Length=1. Max Length=127.
Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";
" }, "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "The date/time this resource was created." + "documentation" : "The date/time this resource was created.
" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "The description of the application.\\nMin Length=1. Max Length=256" + "documentation" : "The description of the application.
Min Length=1. Max Length=256
" + }, + "HomePageUrl" : { + "shape" : "__string", + "locationName" : "homePageUrl", + "documentation" : "A URL with more information about the application, for example\n the location of your GitHub repository for the application.
" }, "Labels" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "Labels to improve discovery of apps in search results.\\nMin Length=1. Max Length=127. Maximum number of labels: 10\\nPattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";" + "documentation" : "Labels to improve discovery of apps in search results.
Min Length=1. Max Length=127. Maximum number of labels: 10
Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";
" }, "LicenseUrl" : { "shape" : "__string", "locationName" : "licenseUrl", - "documentation" : "A link to a license file of the app that matches the spdxLicenseID of your application.\\nMax size 5 MB" + "documentation" : "A link to a license file of the app that matches the spdxLicenseID of your application.
Max size 5 MB
" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "The name of the application.\\nMin Length=1. Max Length=140\\nPattern: \"[a-zA-Z0-9\\\\-]+\";" + "documentation" : "The name of the application.
Min Length=1. Max Length=140
Pattern: \"[a-zA-Z0-9\\\\-]+\";
" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.\\nMax size 5 MB" + "documentation" : "A link to the readme file that contains a more detailed description of the application and how it works in Markdown language.
Max size 5 MB
" }, "SpdxLicenseId" : { "shape" : "__string", "locationName" : "spdxLicenseId", - "documentation" : "A valid identifier from https://spdx.org/licenses/." + "documentation" : "A valid identifier from https://spdx.org/licenses/.
" }, "Version" : { "shape" : "Version", "locationName" : "version", - "documentation" : "Version information about the application." + "documentation" : "Version information about the application.
" } } }, @@ -727,20 +791,20 @@ "SourceCodeUrl" : { "shape" : "__string", "locationName" : "sourceCodeUrl", - "documentation" : "A link to a public repository for the source code of your application." + "documentation" : "A link to a public repository for the source code of your application.
" }, "TemplateBody" : { "shape" : "__string", "locationName" : "templateBody", - "documentation" : "The raw packaged SAM template of your application." + "documentation" : "The raw packaged AWS SAM template of your application.
" }, "TemplateUrl" : { "shape" : "__string", "locationName" : "templateUrl", - "documentation" : "A link to the packaged SAM template of your application." + "documentation" : "A link to the packaged AWS SAM template of your application.
" } }, - "documentation" : "Create version request." + "documentation" : "Create version request.
" }, "CreateApplicationVersionRequest" : { "type" : "structure", @@ -749,28 +813,28 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "The id of the application to create a new version for" + "documentation" : "The ID of the application to get.
" }, "SemanticVersion" : { "shape" : "__string", "location" : "uri", "locationName" : "semanticVersion", - "documentation" : "The semantic version of the new version" + "documentation" : "The semantic version of the new version.
" }, "SourceCodeUrl" : { "shape" : "__string", "locationName" : "sourceCodeUrl", - "documentation" : "A link to a public repository for the source code of your application." + "documentation" : "A link to a public repository for the source code of your application.
" }, "TemplateBody" : { "shape" : "__string", "locationName" : "templateBody", - "documentation" : "The raw packaged SAM template of your application." + "documentation" : "The raw packaged AWS SAM template of your application.
" }, "TemplateUrl" : { "shape" : "__string", "locationName" : "templateUrl", - "documentation" : "A link to the packaged SAM template of your application." + "documentation" : "A link to the packaged AWS SAM template of your application.
" } }, "required" : [ "ApplicationId", "SemanticVersion" ] @@ -781,32 +845,32 @@ "ApplicationId" : { "shape" : "__string", "locationName" : "applicationId", - "documentation" : "The application Amazon Resource Name (ARN)." + "documentation" : "The application Amazon Resource Name (ARN).
" }, "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "The date/time this resource was created." + "documentation" : "The date/time this resource was created.
" }, "ParameterDefinitions" : { - "shape" : "ListOfParameterDefinition", + "shape" : "__listOfParameterDefinition", "locationName" : "parameterDefinitions", - "documentation" : "Array of parameter types supported by the application." + "documentation" : "Array of parameter types supported by the application.
" }, "SemanticVersion" : { "shape" : "__string", "locationName" : "semanticVersion", - "documentation" : "The semantic version of the application:\\n\\n https://semver.org/" + "documentation" : "The semantic version of the application:
\n https://semver.org/\n
" }, "SourceCodeUrl" : { "shape" : "__string", "locationName" : "sourceCodeUrl", - "documentation" : "A link to a public repository for the source code of your application." + "documentation" : "A link to a public repository for the source code of your application.
" }, "TemplateUrl" : { "shape" : "__string", "locationName" : "templateUrl", - "documentation" : "A link to the packaged SAM template of your application." + "documentation" : "A link to the packaged AWS SAM template of your application.
" } } }, @@ -814,22 +878,23 @@ "type" : "structure", "members" : { "ParameterOverrides" : { - "shape" : "ListOfParameterValue", + "shape" : "__listOfParameterValue", "locationName" : "parameterOverrides", - "documentation" : "A list of parameter values for the parameters of the application." + "documentation" : "A list of parameter values for the parameters of the application.
" }, "SemanticVersion" : { "shape" : "__string", "locationName" : "semanticVersion", - "documentation" : "The semantic version of the application:\\n\\n https://semver.org/" + "documentation" : "The semantic version of the application:
\n https://semver.org/\n
" }, "StackName" : { "shape" : "__string", "locationName" : "stackName", - "documentation" : "The name or the unique ID of the stack for which you are creating a change set. AWS CloudFormation generates\\n the change set by comparing this stack's information with the information that you submit, such as a modified\\n template or different parameter input values. \\nConstraints: Minimum length of 1.\\nPattern: ([a-zA-Z][-a-zA-Z0-9]*)|(arn:\\b(aws|aws-us-gov|aws-cn)\\b:[-a-zA-Z0-9:/._+]*)" + "documentation" : "The name or the unique ID of the stack for which you are creating a change set. AWS CloudFormation generates\n the change set by comparing this stack's information with the information that you submit, such as a modified\n template or different parameter input values.
Constraints: Minimum length of 1.
Pattern: ([a-zA-Z][-a-zA-Z0-9]*)|(arn:\\b(aws|aws-us-gov|aws-cn)\\b:[-a-zA-Z0-9:/._+]*)
" } }, - "documentation" : "Create application ChangeSet request." + "documentation" : "Create application ChangeSet request.
", + "required" : [ "StackName" ] }, "CreateCloudFormationChangeSetRequest" : { "type" : "structure", @@ -838,25 +903,24 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "The id of the application to create the ChangeSet for" + "documentation" : "The ID of the application to get.
" }, "ParameterOverrides" : { - "shape" : "ListOfParameterValue", + "shape" : "__listOfParameterValue", "locationName" : "parameterOverrides", - "documentation" : "A list of parameter values for the parameters of the application." + "documentation" : "A list of parameter values for the parameters of the application.
" }, "SemanticVersion" : { "shape" : "__string", "locationName" : "semanticVersion", - "documentation" : "The semantic version of the application:\\n\\n https://semver.org/" + "documentation" : "The semantic version of the application:
\n https://semver.org/\n
" }, "StackName" : { "shape" : "__string", "locationName" : "stackName", - "documentation" : "The name or the unique ID of the stack for which you are creating a change set. AWS CloudFormation generates\\n the change set by comparing this stack's information with the information that you submit, such as a modified\\n template or different parameter input values. \\nConstraints: Minimum length of 1.\\nPattern: ([a-zA-Z][-a-zA-Z0-9]*)|(arn:\\b(aws|aws-us-gov|aws-cn)\\b:[-a-zA-Z0-9:/._+]*)" + "documentation" : "The name or the unique ID of the stack for which you are creating a change set. AWS CloudFormation generates\n the change set by comparing this stack's information with the information that you submit, such as a modified\n template or different parameter input values.
Constraints: Minimum length of 1.
Pattern: ([a-zA-Z][-a-zA-Z0-9]*)|(arn:\\b(aws|aws-us-gov|aws-cn)\\b:[-a-zA-Z0-9:/._+]*)
" } }, - "documentation" : "Create application ChangeSet request", "required" : [ "ApplicationId" ] }, "CreateCloudFormationChangeSetResponse" : { @@ -865,40 +929,52 @@ "ApplicationId" : { "shape" : "__string", "locationName" : "applicationId", - "documentation" : "The application Amazon Resource Name (ARN)." + "documentation" : "The application Amazon Resource Name (ARN).
" }, "ChangeSetId" : { "shape" : "__string", "locationName" : "changeSetId", - "documentation" : "The ARN of the change set.\\nLength Constraints: Minimum length of 1.\\nPattern: arn:[-a-zA-Z0-9:/]*" + "documentation" : "The ARN of the change set.
Length Constraints: Minimum length of 1.
Pattern: Amazon Resource Name (ARN):[-a-zA-Z0-9:/]*
" }, "SemanticVersion" : { "shape" : "__string", "locationName" : "semanticVersion", - "documentation" : "The semantic version of the application:\\n\\n https://semver.org/" + "documentation" : "The semantic version of the application:
\n https://semver.org/\n
" }, "StackId" : { "shape" : "__string", "locationName" : "stackId", - "documentation" : "The unique ID of the stack." + "documentation" : "The unique ID of the stack.
" } } }, + "DeleteApplicationRequest" : { + "type" : "structure", + "members" : { + "ApplicationId" : { + "shape" : "__string", + "location" : "uri", + "locationName" : "applicationId", + "documentation" : "The ID of the application to get.
" + } + }, + "required" : [ "ApplicationId" ] + }, "ForbiddenException" : { "type" : "structure", "members" : { "ErrorCode" : { "shape" : "__string", "locationName" : "errorCode", - "documentation" : "403" + "documentation" : "403
" }, "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "The client is not authenticated." + "documentation" : "The client is not authenticated.
" } }, - "documentation" : "The client is not authenticated.", + "documentation" : "The client is not authenticated.
", "exception" : true, "error" : { "httpStatusCode" : 403 @@ -911,7 +987,7 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "The id of the application to get policy for" + "documentation" : "The ID of the application to get.
" } }, "required" : [ "ApplicationId" ] @@ -920,9 +996,9 @@ "type" : "structure", "members" : { "Statements" : { - "shape" : "ListOfApplicationPolicyStatement", + "shape" : "__listOfApplicationPolicyStatement", "locationName" : "statements", - "documentation" : "Array of policy statements applied to the application." + "documentation" : "Array of policy statements applied to the application.
" } } }, @@ -933,13 +1009,13 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "The id of the application to get" + "documentation" : "The ID of the application to get.
" }, "SemanticVersion" : { "shape" : "__string", "location" : "querystring", "locationName" : "semanticVersion", - "documentation" : "The semantic version of the application to get" + "documentation" : "The semantic version of the application to get.
" } }, "required" : [ "ApplicationId" ] @@ -950,52 +1026,57 @@ "ApplicationId" : { "shape" : "__string", "locationName" : "applicationId", - "documentation" : "The application Amazon Resource Name (ARN)." + "documentation" : "The application Amazon Resource Name (ARN).
" }, "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "The name of the author publishing the app.\\nMin Length=1. Max Length=127.\\nPattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";" + "documentation" : "The name of the author publishing the app.
Min Length=1. Max Length=127.
Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";
" }, "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "The date/time this resource was created." + "documentation" : "The date/time this resource was created.
" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "The description of the application.\\nMin Length=1. Max Length=256" + "documentation" : "The description of the application.
Min Length=1. Max Length=256
" + }, + "HomePageUrl" : { + "shape" : "__string", + "locationName" : "homePageUrl", + "documentation" : "A URL with more information about the application, for example\n the location of your GitHub repository for the application.
" }, "Labels" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "Labels to improve discovery of apps in search results.\\nMin Length=1. Max Length=127. Maximum number of labels: 10\\nPattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";" + "documentation" : "Labels to improve discovery of apps in search results.
Min Length=1. Max Length=127. Maximum number of labels: 10
Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";
" }, "LicenseUrl" : { "shape" : "__string", "locationName" : "licenseUrl", - "documentation" : "A link to a license file of the app that matches the spdxLicenseID of your application.\\nMax size 5 MB" + "documentation" : "A link to a license file of the app that matches the spdxLicenseID of your application.
Max size 5 MB
" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "The name of the application.\\nMin Length=1. Max Length=140\\nPattern: \"[a-zA-Z0-9\\\\-]+\";" + "documentation" : "The name of the application.
Min Length=1. Max Length=140
Pattern: \"[a-zA-Z0-9\\\\-]+\";
" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.\\nMax size 5 MB" + "documentation" : "A link to the readme file that contains a more detailed description of the application and how it works in Markdown language.
Max size 5 MB
" }, "SpdxLicenseId" : { "shape" : "__string", "locationName" : "spdxLicenseId", - "documentation" : "A valid identifier from https://spdx.org/licenses/." + "documentation" : "A valid identifier from https://spdx.org/licenses/.
" }, "Version" : { "shape" : "Version", "locationName" : "version", - "documentation" : "Version information about the application." + "documentation" : "Version information about the application.
" } } }, @@ -1005,15 +1086,15 @@ "ErrorCode" : { "shape" : "__string", "locationName" : "errorCode", - "documentation" : "500" + "documentation" : "500
" }, "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "The AWS Serverless Application Repository service encountered an internal error." + "documentation" : "The AWS Serverless Application Repository service encountered an internal error.
" } }, - "documentation" : "The AWS Serverless Application Repository service encountered an internal error.", + "documentation" : "The AWS Serverless Application Repository service encountered an internal error.
", "exception" : true, "error" : { "httpStatusCode" : 500 @@ -1026,19 +1107,19 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "The id of the application to list" + "documentation" : "The ID of the application to get.
" }, "MaxItems" : { "shape" : "MaxItems", "location" : "querystring", "locationName" : "maxItems", - "documentation" : "The total number of items to return" + "documentation" : "The total number of items to return.
" }, "NextToken" : { "shape" : "__string", "location" : "querystring", "locationName" : "nextToken", - "documentation" : "A token to specify where to start paginating" + "documentation" : "A token to specify where to start paginating.
" } }, "required" : [ "ApplicationId" ] @@ -1049,12 +1130,12 @@ "NextToken" : { "shape" : "__string", "locationName" : "nextToken", - "documentation" : "The token to request the next page of results." + "documentation" : "The token to request the next page of results.
" }, "Versions" : { - "shape" : "ListOfVersionSummary", + "shape" : "__listOfVersionSummary", "locationName" : "versions", - "documentation" : "Array of version summaries for the application." + "documentation" : "Array of version summaries for the application.
" } } }, @@ -1065,13 +1146,13 @@ "shape" : "MaxItems", "location" : "querystring", "locationName" : "maxItems", - "documentation" : "The total number of items to return" + "documentation" : "The total number of items to return.
" }, "NextToken" : { "shape" : "__string", "location" : "querystring", "locationName" : "nextToken", - "documentation" : "A token to specify where to start paginating" + "documentation" : "A token to specify where to start paginating.
" } } }, @@ -1079,53 +1160,17 @@ "type" : "structure", "members" : { "Applications" : { - "shape" : "ListOfApplicationSummary", + "shape" : "__listOfApplicationSummary", "locationName" : "applications", - "documentation" : "Array of application summaries." + "documentation" : "Array of application summaries.
" }, "NextToken" : { "shape" : "__string", "locationName" : "nextToken", - "documentation" : "The token to request the next page of results." + "documentation" : "The token to request the next page of results.
" } } }, - "ListOfApplicationPolicyStatement" : { - "type" : "list", - "member" : { - "shape" : "ApplicationPolicyStatement" - } - }, - "ListOfApplicationSummary" : { - "type" : "list", - "member" : { - "shape" : "ApplicationSummary" - } - }, - "ListOfParameterDefinition" : { - "type" : "list", - "member" : { - "shape" : "ParameterDefinition" - } - }, - "ListOfParameterValue" : { - "type" : "list", - "member" : { - "shape" : "ParameterValue" - } - }, - "ListOfVersionSummary" : { - "type" : "list", - "member" : { - "shape" : "VersionSummary" - } - }, - "ListOf__string" : { - "type" : "list", - "member" : { - "shape" : "__string" - } - }, "MaxItems" : { "type" : "integer", "min" : 1, @@ -1137,15 +1182,15 @@ "ErrorCode" : { "shape" : "__string", "locationName" : "errorCode", - "documentation" : "404" + "documentation" : "404
" }, "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "The resource (for example, an access policy statement) specified in the request does not exist." + "documentation" : "The resource (for example, an access policy statement) specified in the request does not exist.
" } }, - "documentation" : "The resource (for example, an access policy statement) specified in the request does not exist.", + "documentation" : "The resource (for example, an access policy statement) specified in the request does not exist.
", "exception" : true, "error" : { "httpStatusCode" : 404 @@ -1157,70 +1202,71 @@ "AllowedPattern" : { "shape" : "__string", "locationName" : "allowedPattern", - "documentation" : "A regular expression that represents the patterns to allow for String types." + "documentation" : "A regular expression that represents the patterns to allow for String types.
" }, "AllowedValues" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "allowedValues", - "documentation" : "Array containing the list of values allowed for the parameter." + "documentation" : "Array containing the list of values allowed for the parameter.
" }, "ConstraintDescription" : { "shape" : "__string", "locationName" : "constraintDescription", - "documentation" : "A string that explains a constraint when the constraint is violated. For example, without a constraint description,\\n a parameter that has an allowed pattern of [A-Za-z0-9]+ displays the following error message when the user\\n specifies an invalid value:\\n\\n Malformed input-Parameter MyParameter must match pattern [A-Za-z0-9]+ \\n \\nBy adding a constraint description, such as \"must contain only uppercase and lowercase letters, and numbers,\" you can display\\n the following customized error message:\\n\\n Malformed input-Parameter MyParameter must contain only uppercase and lowercase letters and numbers." + "documentation" : "A string that explains a constraint when the constraint is violated. For example, without a constraint description,\n a parameter that has an allowed pattern of [A-Za-z0-9]+ displays the following error message when the user\n specifies an invalid value:
\n Malformed input-Parameter MyParameter must match pattern [A-Za-z0-9]+\n
By adding a constraint description, such as \"must contain only uppercase and lowercase letters, and numbers,\" you can display\n the following customized error message:
\n Malformed input-Parameter MyParameter must contain only uppercase and lowercase letters and numbers.\n
" }, "DefaultValue" : { "shape" : "__string", "locationName" : "defaultValue", - "documentation" : "A value of the appropriate type for the template to use if no value is specified when a stack is created.\\n If you define constraints for the parameter, you must specify a value that adheres to those constraints." + "documentation" : "A value of the appropriate type for the template to use if no value is specified when a stack is created.\n If you define constraints for the parameter, you must specify a value that adheres to those constraints.
" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "A string of up to 4,000 characters that describes the parameter." + "documentation" : "A string of up to 4,000 characters that describes the parameter.
" }, "MaxLength" : { "shape" : "__integer", "locationName" : "maxLength", - "documentation" : "An integer value that determines the largest number of characters you want to allow for String types." + "documentation" : "An integer value that determines the largest number of characters you want to allow for String types.
" }, "MaxValue" : { "shape" : "__integer", "locationName" : "maxValue", - "documentation" : "A numeric value that determines the largest numeric value you want to allow for Number types." + "documentation" : "A numeric value that determines the largest numeric value you want to allow for Number types.
" }, "MinLength" : { "shape" : "__integer", "locationName" : "minLength", - "documentation" : "An integer value that determines the smallest number of characters you want to allow for String types." + "documentation" : "An integer value that determines the smallest number of characters you want to allow for String types.
" }, "MinValue" : { "shape" : "__integer", "locationName" : "minValue", - "documentation" : "A numeric value that determines the smallest numeric value you want to allow for Number types." + "documentation" : "A numeric value that determines the smallest numeric value you want to allow for Number types.
" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "The name of the parameter." + "documentation" : "The name of the parameter.
" }, "NoEcho" : { "shape" : "__boolean", "locationName" : "noEcho", - "documentation" : "Whether to mask the parameter value whenever anyone makes a call that describes the stack. If you set the\\n value to true, the parameter value is masked with asterisks (*****)." + "documentation" : "Whether to mask the parameter value whenever anyone makes a call that describes the stack. If you set the\n value to true, the parameter value is masked with asterisks (*****).
" }, "ReferencedByResources" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "referencedByResources", - "documentation" : "A list of SAM resources that use this parameter." + "documentation" : "A list of AWS SAM resources that use this parameter.
" }, "Type" : { "shape" : "__string", "locationName" : "type", - "documentation" : "The type of the parameter.\\nValid values: String | Number | ListThe type of the parameter.
Valid values: String | Number | List<Number> | CommaDelimitedList\n
\n String: A literal string.
For example, users could specify \"MyUserName\".
\n Number: An integer or float. AWS CloudFormation validates the parameter value as a number; however, when you use the\n parameter elsewhere in your template (for example, by using the Ref intrinsic function), the parameter value becomes a string.
For example, users could specify \"8888\".
\n List<Number>: An array of integers or floats that are separated by commas. AWS CloudFormation validates the parameter value as numbers; however, when\n you use the parameter elsewhere in your template (for example, by using the Ref intrinsic function), the parameter value becomes a list of strings.
For example, users could specify \"80,20\", and a Ref results in [\"80\",\"20\"].
\n CommaDelimitedList: An array of literal strings that are separated by commas. The total number of strings should be one more than the total number of commas.\n Also, each member string is space-trimmed.
For example, users could specify \"test,dev,prod\", and a Ref results in [\"test\",\"dev\",\"prod\"].
" } }, - "documentation" : "Parameters supported by the application." + "documentation" : "Parameters supported by the application.
", + "required" : [ "ReferencedByResources", "Name" ] }, "ParameterValue" : { "type" : "structure", @@ -1228,15 +1274,16 @@ "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation\\n uses the default value that is specified in your template." + "documentation" : "The key associated with the parameter. If you don't specify a key and value for a particular parameter, AWS CloudFormation\n uses the default value that is specified in your template.
" }, "Value" : { "shape" : "__string", "locationName" : "value", - "documentation" : "The input value associated with the parameter." + "documentation" : "The input value associated with the parameter.
" } }, - "documentation" : "Parameter value of the application." + "documentation" : "Parameter value of the application.
", + "required" : [ "Value", "Name" ] }, "PutApplicationPolicyRequest" : { "type" : "structure", @@ -1245,24 +1292,23 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "The id of the application to put policy for" + "documentation" : "The ID of the application to get.
" }, "Statements" : { - "shape" : "ListOfApplicationPolicyStatement", + "shape" : "__listOfApplicationPolicyStatement", "locationName" : "statements", - "documentation" : "Array of policy statements applied to the application." + "documentation" : "Array of policy statements applied to the application.
" } }, - "documentation" : "Put policy request", "required" : [ "ApplicationId" ] }, "PutApplicationPolicyResponse" : { "type" : "structure", "members" : { "Statements" : { - "shape" : "ListOfApplicationPolicyStatement", + "shape" : "__listOfApplicationPolicyStatement", "locationName" : "statements", - "documentation" : "Array of policy statements applied to the application." + "documentation" : "Array of policy statements applied to the application.
" } } }, @@ -1272,15 +1318,15 @@ "ErrorCode" : { "shape" : "__string", "locationName" : "errorCode", - "documentation" : "429" + "documentation" : "429
" }, "Message" : { "shape" : "__string", "locationName" : "message", - "documentation" : "The client is sending more than the allowed number of requests per unit time." + "documentation" : "The client is sending more than the allowed number of requests per unit time.
" } }, - "documentation" : "The client is sending more than the allowed number of requests per unit time.", + "documentation" : "The client is sending more than the allowed number of requests per unit time.
", "exception" : true, "error" : { "httpStatusCode" : 429 @@ -1292,30 +1338,35 @@ "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "The name of the author publishing the app.\\nMin Length=1. Max Length=127.\\nPattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";" + "documentation" : "The name of the author publishing the app.
Min Length=1. Max Length=127.
Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";
" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "The description of the application.\\nMin Length=1. Max Length=256" + "documentation" : "The description of the application.
Min Length=1. Max Length=256
" + }, + "HomePageUrl" : { + "shape" : "__string", + "locationName" : "homePageUrl", + "documentation" : "A URL with more information about the application, for example\n the location of your GitHub repository for the application.
" }, "Labels" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "Labels to improve discovery of apps in search results.\\nMin Length=1. Max Length=127. Maximum number of labels: 10\\nPattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";" + "documentation" : "Labels to improve discovery of apps in search results.
Min Length=1. Max Length=127. Maximum number of labels: 10
Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";
" }, "ReadmeBody" : { "shape" : "__string", "locationName" : "readmeBody", - "documentation" : "A raw text Readme file that contains a more detailed description of the application and how it works in markdown language.\\nMax size 5 MB" + "documentation" : "A raw text Readme file that contains a more detailed description of the application and how it works in markdown language.
Max size 5 MB
" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.\\nMax size 5 MB" + "documentation" : "A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.
Max size 5 MB
" } }, - "documentation" : "Update application request." + "documentation" : "Update application request.
" }, "UpdateApplicationRequest" : { "type" : "structure", @@ -1324,32 +1375,37 @@ "shape" : "__string", "location" : "uri", "locationName" : "applicationId", - "documentation" : "The id of the application to update" + "documentation" : "The ID of the application to get.
" }, "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "The name of the author publishing the app.\\nMin Length=1. Max Length=127.\\nPattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";" + "documentation" : "The name of the author publishing the app.
Min Length=1. Max Length=127.
Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";
" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "The description of the application.\\nMin Length=1. Max Length=256" + "documentation" : "The description of the application.
Min Length=1. Max Length=256
" + }, + "HomePageUrl" : { + "shape" : "__string", + "locationName" : "homePageUrl", + "documentation" : "A URL with more information about the application, for example\n the location of your GitHub repository for the application.
" }, "Labels" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "Labels to improve discovery of apps in search results.\\nMin Length=1. Max Length=127. Maximum number of labels: 10\\nPattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";" + "documentation" : "Labels to improve discovery of apps in search results.
Min Length=1. Max Length=127. Maximum number of labels: 10
Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";
" }, "ReadmeBody" : { "shape" : "__string", "locationName" : "readmeBody", - "documentation" : "A raw text Readme file that contains a more detailed description of the application and how it works in markdown language.\\nMax size 5 MB" + "documentation" : "A raw text Readme file that contains a more detailed description of the application and how it works in markdown language.
Max size 5 MB
" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.\\nMax size 5 MB" + "documentation" : "A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.
Max size 5 MB
" } }, "required" : [ "ApplicationId" ] @@ -1360,52 +1416,57 @@ "ApplicationId" : { "shape" : "__string", "locationName" : "applicationId", - "documentation" : "The application Amazon Resource Name (ARN)." + "documentation" : "The application Amazon Resource Name (ARN).
" }, "Author" : { "shape" : "__string", "locationName" : "author", - "documentation" : "The name of the author publishing the app.\\nMin Length=1. Max Length=127.\\nPattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";" + "documentation" : "The name of the author publishing the app.
Min Length=1. Max Length=127.
Pattern \"^[a-z0-9](([a-z0-9]|-(?!-))*[a-z0-9])?$\";
" }, "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "The date/time this resource was created." + "documentation" : "The date/time this resource was created.
" }, "Description" : { "shape" : "__string", "locationName" : "description", - "documentation" : "The description of the application.\\nMin Length=1. Max Length=256" + "documentation" : "The description of the application.
Min Length=1. Max Length=256
" + }, + "HomePageUrl" : { + "shape" : "__string", + "locationName" : "homePageUrl", + "documentation" : "A URL with more information about the application, for example\n the location of your GitHub repository for the application.
" }, "Labels" : { - "shape" : "ListOf__string", + "shape" : "__listOf__string", "locationName" : "labels", - "documentation" : "Labels to improve discovery of apps in search results.\\nMin Length=1. Max Length=127. Maximum number of labels: 10\\nPattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";" + "documentation" : "Labels to improve discovery of apps in search results.
Min Length=1. Max Length=127. Maximum number of labels: 10
Pattern: \"^[a-zA-Z0-9+\\\\-_:\\\\/@]+$\";
" }, "LicenseUrl" : { "shape" : "__string", "locationName" : "licenseUrl", - "documentation" : "A link to a license file of the app that matches the spdxLicenseID of your application.\\nMax size 5 MB" + "documentation" : "A link to a license file of the app that matches the spdxLicenseID of your application.
Max size 5 MB
" }, "Name" : { "shape" : "__string", "locationName" : "name", - "documentation" : "The name of the application.\\nMin Length=1. Max Length=140\\nPattern: \"[a-zA-Z0-9\\\\-]+\";" + "documentation" : "The name of the application.
Min Length=1. Max Length=140
Pattern: \"[a-zA-Z0-9\\\\-]+\";
" }, "ReadmeUrl" : { "shape" : "__string", "locationName" : "readmeUrl", - "documentation" : "A link to the Readme file that contains a more detailed description of the application and how it works in markdown language.\\nMax size 5 MB" + "documentation" : "A link to the readme file that contains a more detailed description of the application and how it works in Markdown language.
Max size 5 MB
" }, "SpdxLicenseId" : { "shape" : "__string", "locationName" : "spdxLicenseId", - "documentation" : "A valid identifier from https://spdx.org/licenses/." + "documentation" : "A valid identifier from https://spdx.org/licenses/.
" }, "Version" : { "shape" : "Version", "locationName" : "version", - "documentation" : "Version information about the application." + "documentation" : "Version information about the application.
" } } }, @@ -1415,35 +1476,36 @@ "ApplicationId" : { "shape" : "__string", "locationName" : "applicationId", - "documentation" : "The application Amazon Resource Name (ARN)." + "documentation" : "The application Amazon Resource Name (ARN).
" }, "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "The date/time this resource was created." + "documentation" : "The date/time this resource was created.
" }, "ParameterDefinitions" : { - "shape" : "ListOfParameterDefinition", + "shape" : "__listOfParameterDefinition", "locationName" : "parameterDefinitions", - "documentation" : "Array of parameter types supported by the application." + "documentation" : "Array of parameter types supported by the application.
" }, "SemanticVersion" : { "shape" : "__string", "locationName" : "semanticVersion", - "documentation" : "The semantic version of the application:\\n\\n https://semver.org/" + "documentation" : "The semantic version of the application:
\n https://semver.org/\n
" }, "SourceCodeUrl" : { "shape" : "__string", "locationName" : "sourceCodeUrl", - "documentation" : "A link to a public repository for the source code of your application." + "documentation" : "A link to a public repository for the source code of your application.
" }, "TemplateUrl" : { "shape" : "__string", "locationName" : "templateUrl", - "documentation" : "A link to the packaged SAM template of your application." + "documentation" : "A link to the packaged AWS SAM template of your application.
" } }, - "documentation" : "Application version details." + "documentation" : "Application version details.
", + "required" : [ "TemplateUrl", "ParameterDefinitions", "CreationTime", "ApplicationId", "SemanticVersion" ] }, "VersionSummary" : { "type" : "structure", @@ -1451,25 +1513,26 @@ "ApplicationId" : { "shape" : "__string", "locationName" : "applicationId", - "documentation" : "The application Amazon Resource Name (ARN)." + "documentation" : "The application Amazon Resource Name (ARN).
" }, "CreationTime" : { "shape" : "__string", "locationName" : "creationTime", - "documentation" : "The date/time this resource was created." + "documentation" : "The date/time this resource was created.
" }, "SemanticVersion" : { "shape" : "__string", "locationName" : "semanticVersion", - "documentation" : "The semantic version of the application:\\n\\n https://semver.org/" + "documentation" : "The semantic version of the application:
\n https://semver.org/\n
" }, "SourceCodeUrl" : { "shape" : "__string", "locationName" : "sourceCodeUrl", - "documentation" : "A link to a public repository for the source code of your application." + "documentation" : "A link to a public repository for the source code of your application.
" } }, - "documentation" : "Application version summary." + "documentation" : "Application version summary.
", + "required" : [ "CreationTime", "ApplicationId", "SemanticVersion" ] }, "__boolean" : { "type" : "boolean" @@ -1480,22 +1543,48 @@ "__integer" : { "type" : "integer" }, + "__listOfApplicationPolicyStatement" : { + "type" : "list", + "member" : { + "shape" : "ApplicationPolicyStatement" + } + }, + "__listOfApplicationSummary" : { + "type" : "list", + "member" : { + "shape" : "ApplicationSummary" + } + }, + "__listOfParameterDefinition" : { + "type" : "list", + "member" : { + "shape" : "ParameterDefinition" + } + }, + "__listOfParameterValue" : { + "type" : "list", + "member" : { + "shape" : "ParameterValue" + } + }, + "__listOfVersionSummary" : { + "type" : "list", + "member" : { + "shape" : "VersionSummary" + } + }, + "__listOf__string" : { + "type" : "list", + "member" : { + "shape" : "__string" + } + }, + "__long" : { + "type" : "long" + }, "__string" : { "type" : "string" - }, - "__timestamp" : { - "type" : "timestamp" } }, - "authorizers" : { - "authorization_strategy" : { - "name" : "authorization_strategy", - "type" : "provided", - "placement" : { - "location" : "header", - "name" : "Authorization" - } - } - }, - "documentation" : "AWS Serverless Repository" -} \ No newline at end of file + "documentation" : "The AWS Serverless Application Repository makes it easy for developers and enterprises to quickly find\n and deploy serverless applications in the AWS Cloud. For more information about serverless applications,\n see Serverless Computing and Applications on the AWS website.
The AWS Serverless Application Repository is deeply integrated with the AWS Lambda console, so that developers of \n all levels can get started with serverless computing without needing to learn anything new. You can use category \n keywords to browse for applications such as web and mobile backends, data processing applications, or chatbots. \n You can also search for applications by name, publisher, or event source. To use an application, you simply choose it, \n configure any required fields, and deploy it with a few clicks.
You can also easily publish applications, sharing them publicly with the community at large, or privately\n within your team or across your organization. To publish a serverless application (or app), you can use the\n AWS Management Console, AWS Command Line Interface (AWS CLI), or AWS SDKs to upload the code. Along with the\n code, you upload a simple manifest file, also known as the AWS Serverless Application Model (AWS SAM) template.\n For more information about AWS SAM, see AWS Serverless Application Model (AWS SAM) on the AWS Labs\n GitHub repository.
The AWS Serverless Application Repository Developer Guide contains more information about the two developer\n experiences available:
Consuming Applications – Browse for applications and view information about them, including\n source code and readme files. Also install, configure, and deploy applications of your choosing.
\nPublishing Applications – Configure and upload applications to make them available to other\n developers, and publish new versions of applications.
\nPermanently deletes an IPSet. You can't delete an IPSet
if it's still used in any Rules
or if it still includes any IP addresses.
If you just want to remove an IPSet
from a Rule
, use UpdateRule.
To permanently delete an IPSet
from AWS WAF, perform the following steps:
Update the IPSet
to remove IP address ranges, if any. For more information, see UpdateIPSet.
Use GetChangeToken to get the change token that you provide in the ChangeToken
parameter of a DeleteIPSet
request.
Submit a DeleteIPSet
request.
Permanently deletes an IAM policy from the specified RuleGroup.
The user making the request must be the owner of the RuleGroup.
" + }, "DeleteRateBasedRule":{ "name":"DeleteRateBasedRule", "http":{ @@ -541,6 +556,20 @@ ], "documentation":"Returns the IPSet that is specified by IPSetId
.
Returns the IAM policy attached to the RuleGroup.
" + }, "GetRateBasedRule":{ "name":"GetRateBasedRule", "http":{ @@ -933,6 +962,22 @@ ], "documentation":"Returns an array of XssMatchSet objects.
" }, + "PutPermissionPolicy":{ + "name":"PutPermissionPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutPermissionPolicyRequest"}, + "output":{"shape":"PutPermissionPolicyResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidPermissionPolicyException"} + ], + "documentation":"Attaches a IAM policy to the specified resource. The only supported use for this action is to share a RuleGroup across accounts.
The PutPermissionPolicy
is subject to the following restrictions:
You can attach only one policy with each PutPermissionPolicy
request.
The policy must include an Effect
, Action
and Principal
.
Effect
must specify Allow
.
The Action
in the policy must be waf:UpdateWebACL
and waf-regional:UpdateWebACL
. Any extra or wildcard actions in the policy will be rejected.
The policy cannot include a Resource
parameter.
The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.
The user making the request must be the owner of the RuleGroup.
Your policy must be composed using IAM Policy version 2012-10-17.
For more information, see IAM Policies.
An example of a valid policy parameter is shown in the Examples section below.
" + }, "UpdateByteMatchSet":{ "name":"UpdateByteMatchSet", "http":{ @@ -1199,11 +1244,11 @@ }, "Action":{ "shape":"WafAction", - "documentation":"Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule
. Valid values for Action
include the following:
ALLOW
: CloudFront responds with the requested object.
BLOCK
: CloudFront responds with an HTTP 403 (Forbidden) status code.
COUNT
: AWS WAF increments a counter of requests that match the conditions in the rule and then continues to inspect the web request based on the remaining rules in the web ACL.
The Action
data type within ActivatedRule
is used only when submitting an UpdateWebACL
request. ActivatedRule|Action
is not applicable and therefore not available for UpdateRuleGroup
.
Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule
. Valid values for Action
include the following:
ALLOW
: CloudFront responds with the requested object.
BLOCK
: CloudFront responds with an HTTP 403 (Forbidden) status code.
COUNT
: AWS WAF increments a counter of requests that match the conditions in the rule and then continues to inspect the web request based on the remaining rules in the web ACL.
ActivatedRule|OverrideAction
applies only when updating or adding a RuleGroup
to a WebACL
. In this case you do not use ActivatedRule|Action
. For all other update requests, ActivatedRule|Action
is used instead of ActivatedRule|OverrideAction
.
Use the OverrideAction
to test your RuleGroup
.
Any rule in a RuleGroup
can potentially block a request. If you set the OverrideAction
to None
, the RuleGroup
will block a request if any individual rule in the RuleGroup
matches the request and is configured to block that request. However if you first want to test the RuleGroup
, set the OverrideAction
to Count
. The RuleGroup
will then override any block action specified by individual rules contained within the group. Instead of blocking matching requests, those requests will be counted. You can view a record of counted requests using GetSampledRequests.
The OverrideAction
data type within ActivatedRule
is used only when submitting an UpdateRuleGroup
request. ActivatedRule|OverrideAction
is not applicable and therefore not available for UpdateWebACL
.
Use the OverrideAction
to test your RuleGroup
.
Any rule in a RuleGroup
can potentially block a request. If you set the OverrideAction
to None
, the RuleGroup
will block a request if any individual rule in the RuleGroup
matches the request and is configured to block that request. However if you first want to test the RuleGroup
, set the OverrideAction
to Count
. The RuleGroup
will then override any block action specified by individual rules contained within the group. Instead of blocking matching requests, those requests will be counted. You can view a record of counted requests using GetSampledRequests.
ActivatedRule|OverrideAction
applies only when updating or adding a RuleGroup
to a WebACL
. In this case you do not use ActivatedRule|Action
. For all other update requests, ActivatedRule|Action
is used instead of ActivatedRule|OverrideAction
.
The Amazon Resource Name (ARN) of the RuleGroup from which you want to delete the policy.
The user making the request must be the owner of the RuleGroup.
" + } + } + }, + "DeletePermissionPolicyResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteRateBasedRuleRequest":{ "type":"structure", "required":[ @@ -2551,6 +2611,25 @@ } } }, + "GetPermissionPolicyRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the RuleGroup for which you want to get the policy.
" + } + } + }, + "GetPermissionPolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{ + "shape":"PolicyString", + "documentation":"The IAM policy attached to the specified RuleGroup.
" + } + } + }, "GetRateBasedRuleManagedKeysRequest":{ "type":"structure", "required":["RuleId"], @@ -3420,6 +3499,10 @@ "ILLEGAL_COMBINATION" ] }, + "PolicyString":{ + "type":"string", + "min":1 + }, "PopulationSize":{"type":"long"}, "PositionalConstraint":{ "type":"string", @@ -3470,6 +3553,28 @@ "type":"list", "member":{"shape":"Predicate"} }, + "PutPermissionPolicyRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Policy" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the RuleGroup to which you want to attach the policy.
" + }, + "Policy":{ + "shape":"PolicyString", + "documentation":"The policy to attach to the specified RuleGroup.
" + } + } + }, + "PutPermissionPolicyResponse":{ + "type":"structure", + "members":{ + } + }, "RateBasedRule":{ "type":"structure", "required":[ @@ -4328,7 +4433,7 @@ }, "Updates":{ "shape":"RuleGroupUpdates", - "documentation":"An array of RuleGroupUpdate
objects that you want to insert into or delete from a RuleGroup.
You can only insert REGULAR
rules into a rule group.
The Action
data type within ActivatedRule
is used only when submitting an UpdateWebACL
request. ActivatedRule|Action
is not applicable and therefore not available for UpdateRuleGroup
.
An array of RuleGroupUpdate
objects that you want to insert into or delete from a RuleGroup.
You can only insert REGULAR
rules into a rule group.
ActivatedRule|OverrideAction
applies only when updating or adding a RuleGroup
to a WebACL
. In this case you do not use ActivatedRule|Action
. For all other update requests, ActivatedRule|Action
is used instead of ActivatedRule|OverrideAction
.
An array of updates to make to the WebACL.
An array of WebACLUpdate
objects that you want to insert into or delete from a WebACL. For more information, see the applicable data types:
WebACLUpdate: Contains Action
and ActivatedRule
ActivatedRule: Contains Action
, Priority
, RuleId
, and Type
. The OverrideAction
data type within ActivatedRule
is used only when submitting an UpdateRuleGroup
request. ActivatedRule|OverrideAction
is not applicable and therefore not available for UpdateWebACL
.
WafAction: Contains Type
An array of updates to make to the WebACL.
An array of WebACLUpdate
objects that you want to insert into or delete from a WebACL. For more information, see the applicable data types:
WebACLUpdate: Contains Action
and ActivatedRule
ActivatedRule: Contains Action
, OverrideAction
, Priority
, RuleId
, and Type
. ActivatedRule|OverrideAction
applies only when updating or adding a RuleGroup
to a WebACL
. In this case you do not use ActivatedRule|Action
. For all other update requests, ActivatedRule|Action
is used instead of ActivatedRule|OverrideAction
.
WafAction: Contains Type
The operation failed because AWS WAF didn't recognize a parameter in the request. For example:
You specified an invalid parameter name.
You specified an invalid value.
You tried to update an object (ByteMatchSet
, IPSet
, Rule
, or WebACL
) using an action other than INSERT
or DELETE
.
You tried to create a WebACL
with a DefaultAction
Type
other than ALLOW
, BLOCK
, or COUNT
.
You tried to create a RateBasedRule
with a RateKey
value other than IP
.
You tried to update a WebACL
with a WafAction
Type
other than ALLOW
, BLOCK
, or COUNT
.
You tried to update a ByteMatchSet
with a FieldToMatch
Type
other than HEADER, METHOD, QUERY_STRING, URI, or BODY.
You tried to update a ByteMatchSet
with a Field
of HEADER
but no value for Data
.
Your request references an ARN that is malformed, or corresponds to a resource with which a web ACL cannot be associated.
The operation failed because the specified policy is not in the proper format.
The policy is subject to the following restrictions:
You can attach only one policy with each PutPermissionPolicy
request.
The policy must include an Effect
, Action
and Principal
.
Effect
must specify Allow
.
The Action
in the policy must be waf:UpdateWebACL
or waf-regional:UpdateWebACL
. Any extra or wildcard actions in the policy will be rejected.
The policy cannot include a Resource
parameter.
The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.
The user making the request must be the owner of the RuleGroup.
Your policy must be composed using IAM Policy version 2012-10-17.
Permanently deletes an IPSet. You can't delete an IPSet
if it's still used in any Rules
or if it still includes any IP addresses.
If you just want to remove an IPSet
from a Rule
, use UpdateRule.
To permanently delete an IPSet
from AWS WAF, perform the following steps:
Update the IPSet
to remove IP address ranges, if any. For more information, see UpdateIPSet.
Use GetChangeToken to get the change token that you provide in the ChangeToken
parameter of a DeleteIPSet
request.
Submit a DeleteIPSet
request.
Permanently deletes an IAM policy from the specified RuleGroup.
The user making the request must be the owner of the RuleGroup.
" + }, "DeleteRateBasedRule":{ "name":"DeleteRateBasedRule", "http":{ @@ -508,6 +523,20 @@ ], "documentation":"Returns the IPSet that is specified by IPSetId
.
Returns the IAM policy attached to the RuleGroup.
" + }, "GetRateBasedRule":{ "name":"GetRateBasedRule", "http":{ @@ -868,6 +897,22 @@ ], "documentation":"Returns an array of XssMatchSet objects.
" }, + "PutPermissionPolicy":{ + "name":"PutPermissionPolicy", + "http":{ + "method":"POST", + "requestUri":"/" + }, + "input":{"shape":"PutPermissionPolicyRequest"}, + "output":{"shape":"PutPermissionPolicyResponse"}, + "errors":[ + {"shape":"WAFInternalErrorException"}, + {"shape":"WAFStaleDataException"}, + {"shape":"WAFNonexistentItemException"}, + {"shape":"WAFInvalidPermissionPolicyException"} + ], + "documentation":"Attaches a IAM policy to the specified resource. The only supported use for this action is to share a RuleGroup across accounts.
The PutPermissionPolicy
is subject to the following restrictions:
You can attach only one policy with each PutPermissionPolicy
request.
The policy must include an Effect
, Action
and Principal
.
Effect
must specify Allow
.
The Action
in the policy must be waf:UpdateWebACL
and waf-regional:UpdateWebACL
. Any extra or wildcard actions in the policy will be rejected.
The policy cannot include a Resource
parameter.
The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.
The user making the request must be the owner of the RuleGroup.
Your policy must be composed using IAM Policy version 2012-10-17.
For more information, see IAM Policies.
An example of a valid policy parameter is shown in the Examples section below.
" + }, "UpdateByteMatchSet":{ "name":"UpdateByteMatchSet", "http":{ @@ -1134,11 +1179,11 @@ }, "Action":{ "shape":"WafAction", - "documentation":"Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule
. Valid values for Action
include the following:
ALLOW
: CloudFront responds with the requested object.
BLOCK
: CloudFront responds with an HTTP 403 (Forbidden) status code.
COUNT
: AWS WAF increments a counter of requests that match the conditions in the rule and then continues to inspect the web request based on the remaining rules in the web ACL.
The Action
data type within ActivatedRule
is used only when submitting an UpdateWebACL
request. ActivatedRule|Action
is not applicable and therefore not available for UpdateRuleGroup
.
Specifies the action that CloudFront or AWS WAF takes when a web request matches the conditions in the Rule
. Valid values for Action
include the following:
ALLOW
: CloudFront responds with the requested object.
BLOCK
: CloudFront responds with an HTTP 403 (Forbidden) status code.
COUNT
: AWS WAF increments a counter of requests that match the conditions in the rule and then continues to inspect the web request based on the remaining rules in the web ACL.
ActivatedRule|OverrideAction
applies only when updating or adding a RuleGroup
to a WebACL
. In this case you do not use ActivatedRule|Action
. For all other update requests, ActivatedRule|Action
is used instead of ActivatedRule|OverrideAction
.
Use the OverrideAction
to test your RuleGroup
.
Any rule in a RuleGroup
can potentially block a request. If you set the OverrideAction
to None
, the RuleGroup
will block a request if any individual rule in the RuleGroup
matches the request and is configured to block that request. However if you first want to test the RuleGroup
, set the OverrideAction
to Count
. The RuleGroup
will then override any block action specified by individual rules contained within the group. Instead of blocking matching requests, those requests will be counted. You can view a record of counted requests using GetSampledRequests.
The OverrideAction
data type within ActivatedRule
is used only when submitting an UpdateRuleGroup
request. ActivatedRule|OverrideAction
is not applicable and therefore not available for UpdateWebACL
.
Use the OverrideAction
to test your RuleGroup
.
Any rule in a RuleGroup
can potentially block a request. If you set the OverrideAction
to None
, the RuleGroup
will block a request if any individual rule in the RuleGroup
matches the request and is configured to block that request. However if you first want to test the RuleGroup
, set the OverrideAction
to Count
. The RuleGroup
will then override any block action specified by individual rules contained within the group. Instead of blocking matching requests, those requests will be counted. You can view a record of counted requests using GetSampledRequests.
ActivatedRule|OverrideAction
applies only when updating or adding a RuleGroup
to a WebACL
. In this case you do not use ActivatedRule|Action
. For all other update requests, ActivatedRule|Action
is used instead of ActivatedRule|OverrideAction
.
The Amazon Resource Name (ARN) of the RuleGroup from which you want to delete the policy.
The user making the request must be the owner of the RuleGroup.
" + } + } + }, + "DeletePermissionPolicyResponse":{ + "type":"structure", + "members":{ + } + }, "DeleteRateBasedRuleRequest":{ "type":"structure", "required":[ @@ -2449,6 +2509,25 @@ } } }, + "GetPermissionPolicyRequest":{ + "type":"structure", + "required":["ResourceArn"], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the RuleGroup for which you want to get the policy.
" + } + } + }, + "GetPermissionPolicyResponse":{ + "type":"structure", + "members":{ + "Policy":{ + "shape":"PolicyString", + "documentation":"The IAM policy attached to the specified RuleGroup.
" + } + } + }, "GetRateBasedRuleManagedKeysRequest":{ "type":"structure", "required":["RuleId"], @@ -3280,6 +3359,10 @@ "ILLEGAL_COMBINATION" ] }, + "PolicyString":{ + "type":"string", + "min":1 + }, "PopulationSize":{"type":"long"}, "PositionalConstraint":{ "type":"string", @@ -3330,6 +3413,28 @@ "type":"list", "member":{"shape":"Predicate"} }, + "PutPermissionPolicyRequest":{ + "type":"structure", + "required":[ + "ResourceArn", + "Policy" + ], + "members":{ + "ResourceArn":{ + "shape":"ResourceArn", + "documentation":"The Amazon Resource Name (ARN) of the RuleGroup to which you want to attach the policy.
" + }, + "Policy":{ + "shape":"PolicyString", + "documentation":"The policy to attach to the specified RuleGroup.
" + } + } + }, + "PutPermissionPolicyResponse":{ + "type":"structure", + "members":{ + } + }, "RateBasedRule":{ "type":"structure", "required":[ @@ -3540,6 +3645,11 @@ "member":{"shape":"RegexPatternString"}, "max":10 }, + "ResourceArn":{ + "type":"string", + "max":1224, + "min":1 + }, "ResourceId":{ "type":"string", "max":128, @@ -4179,7 +4289,7 @@ }, "Updates":{ "shape":"RuleGroupUpdates", - "documentation":"An array of RuleGroupUpdate
objects that you want to insert into or delete from a RuleGroup.
You can only insert REGULAR
rules into a rule group.
The Action
data type within ActivatedRule
is used only when submitting an UpdateWebACL
request. ActivatedRule|Action
is not applicable and therefore not available for UpdateRuleGroup
.
An array of RuleGroupUpdate
objects that you want to insert into or delete from a RuleGroup.
You can only insert REGULAR
rules into a rule group.
ActivatedRule|OverrideAction
applies only when updating or adding a RuleGroup
to a WebACL
. In this case you do not use ActivatedRule|Action
. For all other update requests, ActivatedRule|Action
is used instead of ActivatedRule|OverrideAction
.
An array of updates to make to the WebACL.
An array of WebACLUpdate
objects that you want to insert into or delete from a WebACL. For more information, see the applicable data types:
WebACLUpdate: Contains Action
and ActivatedRule
ActivatedRule: Contains Action
, Priority
, RuleId
, and Type
. The OverrideAction
data type within ActivatedRule
is used only when submitting an UpdateRuleGroup
request. ActivatedRule|OverrideAction
is not applicable and therefore not available for UpdateWebACL
.
WafAction: Contains Type
An array of updates to make to the WebACL.
An array of WebACLUpdate
objects that you want to insert into or delete from a WebACL. For more information, see the applicable data types:
WebACLUpdate: Contains Action
and ActivatedRule
ActivatedRule: Contains Action
, OverrideAction
, Priority
, RuleId
, and Type
. ActivatedRule|OverrideAction
applies only when updating or adding a RuleGroup
to a WebACL
. In this case you do not use ActivatedRule|Action
. For all other update requests, ActivatedRule|Action
is used instead of ActivatedRule|OverrideAction
.
WafAction: Contains Type
The operation failed because AWS WAF didn't recognize a parameter in the request. For example:
You specified an invalid parameter name.
You specified an invalid value.
You tried to update an object (ByteMatchSet
, IPSet
, Rule
, or WebACL
) using an action other than INSERT
or DELETE
.
You tried to create a WebACL
with a DefaultAction
Type
other than ALLOW
, BLOCK
, or COUNT
.
You tried to create a RateBasedRule
with a RateKey
value other than IP
.
You tried to update a WebACL
with a WafAction
Type
other than ALLOW
, BLOCK
, or COUNT
.
You tried to update a ByteMatchSet
with a FieldToMatch
Type
other than HEADER, METHOD, QUERY_STRING, URI, or BODY.
You tried to update a ByteMatchSet
with a Field
of HEADER
but no value for Data
.
Your request references an ARN that is malformed, or corresponds to a resource with which a web ACL cannot be associated.
The operation failed because the specified policy is not in the proper format.
The policy is subject to the following restrictions:
You can attach only one policy with each PutPermissionPolicy
request.
The policy must include an Effect
, Action
and Principal
.
Effect
must specify Allow
.
The Action
in the policy must be waf:UpdateWebACL
or waf-regional:UpdateWebACL
. Any extra or wildcard actions in the policy will be rejected.
The policy cannot include a Resource
parameter.
The ARN in the request must be a valid WAF RuleGroup ARN and the RuleGroup must exist in the same region.
The user making the request must be the owner of the RuleGroup.
Your policy must be composed using IAM Policy version 2012-10-17.