From 461bff4a19ef4f0491fe09f4800fa6174024c442 Mon Sep 17 00:00:00 2001 From: Trivikram Kamat <16024985+trivikr@users.noreply.github.com> Date: Fri, 17 Sep 2021 19:02:22 +0000 Subject: [PATCH 1/3] chore: update endpoints as of 09/17/2021 --- .../aws/typescript/codegen/endpoints.json | 60 +++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json index df6b64acae19..732962996d00 100644 --- a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json +++ b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json @@ -4641,6 +4641,19 @@ "us-east-1": {} } }, + "models-v2-lex": { + "endpoints": { + "ap-northeast-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "us-east-1": {}, + "us-west-2": {} + } + }, "models.lex": { "defaults": { "credentialScope": { @@ -5691,6 +5704,19 @@ "us-west-2": {} } }, + "runtime-v2-lex": { + "endpoints": { + "ap-northeast-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "us-east-1": {}, + "us-west-2": {} + } + }, "runtime.lex": { "defaults": { "credentialScope": { @@ -7215,6 +7241,30 @@ "eu-west-1": {}, "eu-west-2": {}, "sa-east-1": {}, + "transcribestreaming-fips-ca-central-1": { + "credentialScope": { + "region": "ca-central-1" + }, + "hostname": "transcribestreaming-fips.ca-central-1.amazonaws.com" + }, + "transcribestreaming-fips-us-east-1": { + "credentialScope": { + "region": "us-east-1" + }, + "hostname": "transcribestreaming-fips.us-east-1.amazonaws.com" + }, + "transcribestreaming-fips-us-east-2": { + "credentialScope": { + "region": "us-east-2" + }, + "hostname": "transcribestreaming-fips.us-east-2.amazonaws.com" + }, + "transcribestreaming-fips-us-west-2": { + "credentialScope": { + "region": "us-west-2" + }, + "hostname": "transcribestreaming-fips.us-west-2.amazonaws.com" + }, "us-east-1": {}, "us-east-2": {}, "us-west-2": {} @@ -9781,6 +9831,16 @@ "us-gov-west-1": {} } }, + "oidc": { + "endpoints": { + "us-gov-west-1": { + "credentialScope": { + "region": "us-gov-west-1" + }, + "hostname": "oidc.us-gov-west-1.amazonaws.com" + } + } + }, "organizations": { "endpoints": { "aws-us-gov-global": { From 1566f30e534058a33a7edd0b17852ab79325a694 Mon Sep 17 00:00:00 2001 From: Trivikram Kamat <16024985+trivikr@users.noreply.github.com> Date: Fri, 17 Sep 2021 19:04:54 +0000 Subject: [PATCH 2/3] chore(models): update models as of 09/17/2021 --- .../aws-models/chime.2018-05-01.json | 36 +- .../aws-models/comprehend.2017-11-27.json | 157 +- ...database-migration-service.2016-01-01.json | 22 +- .../aws-models/ec2.2016-11-15.json | 200 +- .../elasticsearch-service.2015-01-01.json | 64 +- .../aws-models/iot.2015-05-28.json | 55 +- .../aws-models/kafkaconnect.2021-09-14.json | 2725 +++++++++++++++++ .../aws-models/macie2.2020-01-01.json | 183 +- .../aws-models/opensearch.2021-01-01.json | 52 +- .../aws-models/pinpoint.2016-12-01.json | 1146 ++++++- .../aws-models/robomaker.2018-06-29.json | 160 +- .../sdk-codegen/aws-models/s3.2006-03-01.json | 453 +-- .../aws-models/sagemaker.2017-07-24.json | 178 +- .../transcribe-streaming.2017-10-26.json | 158 +- .../aws-models/transcribe.2017-10-26.json | 73 + .../aws-models/wafv2.2019-07-29.json | 82 +- 16 files changed, 5241 insertions(+), 503 deletions(-) create mode 100644 codegen/sdk-codegen/aws-models/kafkaconnect.2021-09-14.json diff --git a/codegen/sdk-codegen/aws-models/chime.2018-05-01.json b/codegen/sdk-codegen/aws-models/chime.2018-05-01.json index 4282b8be4ef4..62a6061da10e 100644 --- a/codegen/sdk-codegen/aws-models/chime.2018-05-01.json +++ b/codegen/sdk-codegen/aws-models/chime.2018-05-01.json @@ -4063,6 +4063,9 @@ "target": "com.amazonaws.chime#CreateSipMediaApplicationCallResponse" }, "errors": [ + { + "target": "com.amazonaws.chime#AccessDeniedException" + }, { "target": "com.amazonaws.chime#BadRequestException" }, @@ -4118,6 +4121,12 @@ "smithy.api#httpLabel": {}, "smithy.api#required": {} } + }, + "SipHeaders": { + "target": "com.amazonaws.chime#SipHeadersMap", + "traits": { + "smithy.api#documentation": "
The SIP headers added to an outbound call leg.
" + } } } }, @@ -13031,18 +13040,18 @@ "Routes": { "target": "com.amazonaws.chime#OriginationRouteList", "traits": { - "smithy.api#documentation": "The call distribution properties defined for your SIP hosts. Valid range: Minimum value of 1.\n Maximum value of 20.
" + "smithy.api#documentation": "The call distribution properties defined for your SIP hosts. Valid range: Minimum value of 1.\n Maximum value of 20. This parameter is not required, but you must specify this parameter or Disabled
.
When origination settings are disabled, inbound calls are not enabled for your Amazon Chime\n Voice Connector.
" + "smithy.api#documentation": "When origination settings are disabled, inbound calls are not enabled for your Amazon Chime\n Voice Connector. This parameter is not required, but you must specify this parameter or Routes
.
Origination settings enable your SIP hosts to receive inbound calls using your Amazon Chime\n Voice Connector.
" + "smithy.api#documentation": "Origination settings enable your SIP hosts to receive inbound calls using your Amazon Chime\n Voice Connector.
\nThe parameters listed below are not required, but you must use at least one.
\nOrigination routes define call distribution properties for your SIP hosts to receive inbound\n calls using your Amazon Chime Voice Connector. Limit: Ten origination routes for each\n Amazon Chime Voice Connector.
" + "smithy.api#documentation": "Origination routes define call distribution properties for your SIP hosts to receive inbound\n calls using your Amazon Chime Voice Connector. Limit: Ten origination routes for each\n Amazon Chime Voice Connector.
\nThe parameters listed below are not required, but you must use at least one.
\nStart transcription for the specified meetingId
.
Starts transcription for the specified meetingId
.
The transcription configuration settings passed to Amazon Transcribe.
" + "smithy.api#documentation": "The transcription configuration settings passed to Amazon Transcribe Medical.
" } } }, diff --git a/codegen/sdk-codegen/aws-models/comprehend.2017-11-27.json b/codegen/sdk-codegen/aws-models/comprehend.2017-11-27.json index 98cb4e51438e..cb3663504cba 100644 --- a/codegen/sdk-codegen/aws-models/comprehend.2017-11-27.json +++ b/codegen/sdk-codegen/aws-models/comprehend.2017-11-27.json @@ -45,7 +45,22 @@ "min": 1, "max": 63 }, - "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "smithy.api#pattern": "^[a-zA-Z0-9](-*[a-zA-Z0-9])*$" + } + }, + "com.amazonaws.comprehend#AugmentedManifestsDocumentTypeFormat": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PLAIN_TEXT_DOCUMENT", + "name": "PLAIN_TEXT_DOCUMENT" + }, + { + "value": "SEMI_STRUCTURED_DOCUMENT", + "name": "SEMI_STRUCTURED_DOCUMENT" + } + ] } }, "com.amazonaws.comprehend#AugmentedManifestsListItem": { @@ -64,6 +79,24 @@ "smithy.api#documentation": "The JSON attribute that contains the annotations for your training documents. The number\n of attribute names that you specify depends on whether your augmented manifest file is the\n output of a single labeling job or a chained labeling job.
\nIf your file is the output of a single labeling job, specify the LabelAttributeName key\n that was used when the job was created in Ground Truth.
\nIf your file is the output of a chained labeling job, specify the LabelAttributeName key\n for one or more jobs in the chain. Each LabelAttributeName key provides the annotations from\n an individual job.
", "smithy.api#required": {} } + }, + "AnnotationDataS3Uri": { + "target": "com.amazonaws.comprehend#S3Uri", + "traits": { + "smithy.api#documentation": "The S3 prefix to the annotation files that are referred in the augmented manifest file.
" + } + }, + "SourceDocumentsS3Uri": { + "target": "com.amazonaws.comprehend#S3Uri", + "traits": { + "smithy.api#documentation": "The S3 prefix to the source files (PDFs) that are referred to in the augmented manifest file.
" + } + }, + "DocumentType": { + "target": "com.amazonaws.comprehend#AugmentedManifestsDocumentTypeFormat", + "traits": { + "smithy.api#documentation": "The type of augmented manifest. PlainTextDocument or SemiStructuredDocument. If you don't specify, the default is PlainTextDocument.
\n\n PLAIN_TEXT_DOCUMENT
A document type that represents any unicode text that is encoded in UTF-8.
\n SEMI_STRUCTURED_DOCUMENT
A document type with positional and structural context, like a PDF. For training with Amazon Comprehend, only PDFs are supported. \n For inference, Amazon Comprehend support PDFs, DOCX and TXT.
Specifies one of the label or labels that categorize the document being analyzed.
" } }, + "com.amazonaws.comprehend#DocumentReadAction": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "TEXTRACT_DETECT_DOCUMENT_TEXT", + "name": "TEXTRACT_DETECT_DOCUMENT_TEXT" + }, + { + "value": "TEXTRACT_ANALYZE_DOCUMENT", + "name": "TEXTRACT_ANALYZE_DOCUMENT" + } + ] + } + }, + "com.amazonaws.comprehend#DocumentReadFeatureTypes": { + "type": "string", + "traits": { + "smithy.api#documentation": "A list of the types of analyses to perform. This field specifies what feature types need to be extracted from the document where entity recognition is \n expected.
\n \n\n TABLES
- Add TABLES to the list to return information about the tables\n that are detected in the input document.
\n FORMS
- Add FORMS to return detected form data.
This enum field will start with two values which will apply to PDFs:
\n\n TEXTRACT_DETECT_DOCUMENT_TEXT
- The service calls DetectDocumentText for PDF documents per page.
\n TEXTRACT_ANALYZE_DOCUMENT
- The service calls AnalyzeDocument for PDF documents per page.
This enum field provides two values:
\n\n SERVICE_DEFAULT
- use service defaults for Document reading. For Digital PDF it would mean using an internal parser instead of Textract APIs
\n FORCE_DOCUMENT_READ_ACTION
- Always use specified action for DocumentReadAction, including Digital PDF.\n
Specifies how the text in an input file should be processed:
" + } + } + }, + "traits": { + "smithy.api#documentation": "The input properties for a topic detection job.
" + } + }, "com.amazonaws.comprehend#DominantLanguage": { "type": "structure", "members": { @@ -3319,7 +3425,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:entity-recognizer/[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "smithy.api#pattern": "^arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:entity-recognizer/[a-zA-Z0-9](-*[a-zA-Z0-9])*(/version/[a-zA-Z0-9](-*[a-zA-Z0-9])*)?$" } }, "com.amazonaws.comprehend#EntityRecognizerAugmentedManifestsList": { @@ -3365,7 +3471,7 @@ "min": 0, "max": 256 }, - "smithy.api#pattern": "arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:entity-recognizer-endpoint/[a-zA-Z0-9](-*[a-zA-Z0-9])*" + "smithy.api#pattern": "^arn:aws(-[^:]+)?:comprehend:[a-zA-Z0-9-]*:[0-9]{12}:entity-recognizer-endpoint/[a-zA-Z0-9](-*[a-zA-Z0-9])*$" } }, "com.amazonaws.comprehend#EntityRecognizerEntityList": { @@ -3750,7 +3856,7 @@ "min": 1, "max": 40 }, - "smithy.api#pattern": "[A-Z_]*" + "smithy.api#pattern": "^[A-Z_]*$" } }, "com.amazonaws.comprehend#EventsDetectionJobFilter": { @@ -3884,7 +3990,7 @@ "min": 20, "max": 2048 }, - "smithy.api#pattern": "arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+" + "smithy.api#pattern": "^arn:aws(-[^:]+)?:iam::[0-9]{12}:role/.+$" } }, "com.amazonaws.comprehend#InferenceUnitsInteger": { @@ -3911,10 +4017,16 @@ "traits": { "smithy.api#documentation": "Specifies how the text in an input file should be processed:
\n\n ONE_DOC_PER_FILE
- Each file is considered a separate document. Use\n this option when you are processing large documents, such as newspaper articles or\n scientific papers.
\n ONE_DOC_PER_LINE
- Each line in a file is considered a separate\n document. Use this option when you are processing many short documents, such as text\n messages.
The document reader config field applies only for InputDataConfig of StartEntitiesDetectionJob.
\nUse DocumentReaderConfig to provide specifications about how you want your inference documents read.\n Currently it applies for PDF documents in StartEntitiesDetectionJob custom inference.
" + } } }, "traits": { - "smithy.api#documentation": "The input properties for a topic detection job.
" + "smithy.api#documentation": "The input properties for an inference job.
" } }, "com.amazonaws.comprehend#InputFormat": { @@ -4203,7 +4315,8 @@ "smithy.api#length": { "min": 0, "max": 2048 - } + }, + "smithy.api#pattern": ".*" } }, "com.amazonaws.comprehend#KmsKeyValidationException": { @@ -4880,6 +4993,18 @@ "target": "com.amazonaws.comprehend#BatchDetectSyntaxItemResult" } }, + "com.amazonaws.comprehend#ListOfDocumentReadFeatureTypes": { + "type": "list", + "member": { + "target": "com.amazonaws.comprehend#DocumentReadFeatureTypes" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2 + } + } + }, "com.amazonaws.comprehend#ListOfDominantLanguages": { "type": "list", "member": { @@ -5195,7 +5320,7 @@ "min": 1, "max": 1 }, - "smithy.api#pattern": "[!@#$%&*]" + "smithy.api#pattern": "^[!@#$%&*]$" } }, "com.amazonaws.comprehend#MaxResultsInteger": { @@ -5764,7 +5889,7 @@ "min": 0, "max": 1024 }, - "smithy.api#pattern": "s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/.*)?" + "smithy.api#pattern": "^s3://[a-z0-9][\\.\\-a-z0-9]{1,61}[a-z0-9](/.*)?$" } }, "com.amazonaws.comprehend#SecurityGroupId": { @@ -5774,7 +5899,7 @@ "min": 1, "max": 32 }, - "smithy.api#pattern": "[-0-9a-zA-Z]+" + "smithy.api#pattern": "^[-0-9a-zA-Z]+$" } }, "com.amazonaws.comprehend#SecurityGroupIds": { @@ -7313,7 +7438,7 @@ "min": 1, "max": 32 }, - "smithy.api#pattern": "[-0-9a-zA-Z]+" + "smithy.api#pattern": "^[-0-9a-zA-Z]+$" } }, "com.amazonaws.comprehend#Subnets": { diff --git a/codegen/sdk-codegen/aws-models/database-migration-service.2016-01-01.json b/codegen/sdk-codegen/aws-models/database-migration-service.2016-01-01.json index b9b4fc006ea6..299424466707 100644 --- a/codegen/sdk-codegen/aws-models/database-migration-service.2016-01-01.json +++ b/codegen/sdk-codegen/aws-models/database-migration-service.2016-01-01.json @@ -344,7 +344,7 @@ "ApplyAction": { "target": "com.amazonaws.databasemigrationservice#String", "traits": { - "smithy.api#documentation": "The pending maintenance action to apply to this resource.
", + "smithy.api#documentation": "The pending maintenance action to apply to this resource.
\nValid values: os-upgrade
, system-update
, db-upgrade
\n
The settings in JSON format for the DMS transfer type of source endpoint.
\nPossible settings include the following:
\n\n ServiceAccessRoleArn
- The IAM role that has permission to access the\n Amazon S3 bucket. The role must allow the iam:PassRole
action.
\n BucketName
- The name of the S3 bucket to use.
Shorthand syntax for these settings is as follows:\n ServiceAccessRoleArn=string,BucketName=string
\n
JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\":\n \"string\", \"BucketName\": \"string\", }
\n
The settings in JSON format for the DMS transfer type of source endpoint.
\nPossible settings include the following:
\n\n ServiceAccessRoleArn
- The Amazon Resource Name (ARN) used by the service access IAM role.\n The role must allow the iam:PassRole
action.
\n BucketName
- The name of the S3 bucket to use.
Shorthand syntax for these settings is as follows:\n ServiceAccessRoleArn=string,BucketName=string
\n
JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\":\n \"string\", \"BucketName\": \"string\", }
\n
Creates a replication subnet group given a list of the subnet IDs in a VPC.
" + "smithy.api#documentation": "Creates a replication subnet group given a list of the subnet IDs in a VPC.
\nThe VPC needs to have at least one subnet in at least two availability zones in the Amazon Web Services Region, otherwise the\n service will throw a ReplicationSubnetGroupDoesNotCoverEnoughAZs
exception.
The IAM role that has permission to access the Amazon S3 bucket. When specified as part of request syntax,\n such as for the CreateEndpoint
and ModifyEndpoint
actions,\n the role must allow the iam:PassRole
action.
The Amazon Resource Name (ARN) used by the service access IAM role. The role must allow the iam:PassRole
action.
The settings in JSON format for the DMS transfer type of source endpoint.
\nPossible settings include the following:
\n\n ServiceAccessRoleArn
- The IAM role that has permission to access the\n Amazon S3 bucket. The role must allow the iam:PassRole
action.
\n BucketName
- The name of the S3 bucket to use.
Shorthand syntax for these settings is as follows:\n ServiceAccessRoleArn=string,BucketName=string,
\n
JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\":\n \"string\", \"BucketName\": \"string\"}
\n
The settings in JSON format for the DMS transfer type of source endpoint.
\nPossible settings include the following:
\n\n ServiceAccessRoleArn
- - The Amazon Resource Name (ARN) used by the service access IAM role. \n The role must allow the iam:PassRole
action.
\n BucketName
- The name of the S3 bucket to use.
Shorthand syntax for these settings is as follows:\n ServiceAccessRoleArn=string,BucketName=string,
\n
JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\":\n \"string\", \"BucketName\": \"string\"}
\n
The settings in JSON format for the DMS transfer type of source endpoint.
\nAttributes include the following:
\nserviceAccessRoleArn - The Identity and Access Management (IAM) role that has\n permission to access the Amazon S3 bucket. The role must allow the iam:PassRole
action.
BucketName - The name of the S3 bucket to use.
\nShorthand syntax for these settings is as follows: ServiceAccessRoleArn=string\n ,BucketName=string
\n
JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\",\n \"BucketName\": \"string\"}
\n
The settings in JSON format for the DMS transfer type of source endpoint.
\nAttributes include the following:
\nserviceAccessRoleArn - The Amazon Resource Name (ARN) used by the service access IAM role. The role must allow the iam:PassRole
action.
BucketName - The name of the S3 bucket to use.
\nShorthand syntax for these settings is as follows: ServiceAccessRoleArn=string\n ,BucketName=string
\n
JSON syntax for these settings is as follows: { \"ServiceAccessRoleArn\": \"string\",\n \"BucketName\": \"string\"}
\n
If this parameter is true
, the reboot is conducted through a Multi-AZ\n failover. (If the instance isn't configured for Multi-AZ, then you can't specify\n true
.)
If this parameter is true
, the reboot is conducted through a Multi-AZ\n failover. If the instance isn't configured for Multi-AZ, then you can't specify\n true
. ( --force-planned-failover
and --force-failover
can't both be set to true
.)
If this parameter is true
, the reboot is conducted through a planned Multi-AZ failover \n where resources are released and cleaned up prior to conducting the failover. \n If the instance isn''t configured for Multi-AZ, then you can't specify true
. \n ( --force-planned-failover
and --force-failover
can't both be set to true
.)
Reloads the target database table with the source data.
" + "smithy.api#documentation": "Reloads the target database table with the source data.
\nYou can only use this operation with a task in the RUNNING
state, otherwise the service\n will throw an InvalidResourceStateFault
exception.
Initiates the copy of an AMI. You can copy an AMI from one Region to another, or from a\n Region to an AWS Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost\n to another, or within the same Outpost. To copy an AMI to another partition, see CreateStoreImageTask.
\n \t\n \tTo copy an AMI from one Region to another, specify the source Region using the \n \t\tSourceRegion parameter, and specify the \n \t\tdestination Region using its endpoint. Copies of encrypted backing snapshots for\n \t\tthe AMI are encrypted. Copies of unencrypted backing snapshots remain unencrypted, \n \t\tunless you set Encrypted
during the copy operation. You cannot \n \t\tcreate an unencrypted copy of an encrypted backing snapshot.
To copy an AMI from a Region to an Outpost, specify the source Region using the \n \t\tSourceRegion parameter, and specify the \n \t\tARN of the destination Outpost using DestinationOutpostArn. \n \t\tBacking snapshots copied to an Outpost are encrypted by default using the default\n \t\tencryption key for the Region, or a different key that you specify in the request using \n \t\tKmsKeyId. Outposts do not support unencrypted \n \t\tsnapshots. For more information, \n \t\t\tAmazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.
\n \nFor more information about the prerequisites and limits when copying an AMI, see Copying an AMI\n in the Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "Initiates the copy of an AMI. You can copy an AMI from one Region to another, or from a\n Region to an Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost\n to another, or within the same Outpost. To copy an AMI to another partition, see CreateStoreImageTask.
\n \t\n \tTo copy an AMI from one Region to another, specify the source Region using the \n \t\tSourceRegion parameter, and specify the \n \t\tdestination Region using its endpoint. Copies of encrypted backing snapshots for\n \t\tthe AMI are encrypted. Copies of unencrypted backing snapshots remain unencrypted, \n \t\tunless you set Encrypted
during the copy operation. You cannot \n \t\tcreate an unencrypted copy of an encrypted backing snapshot.
To copy an AMI from a Region to an Outpost, specify the source Region using the \n \t\tSourceRegion parameter, and specify the \n \t\tARN of the destination Outpost using DestinationOutpostArn. \n \t\tBacking snapshots copied to an Outpost are encrypted by default using the default\n \t\tencryption key for the Region, or a different key that you specify in the request using \n \t\tKmsKeyId. Outposts do not support unencrypted \n \t\tsnapshots. For more information, \n \t\t\tAmazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.
\n \nFor more information about the prerequisites and limits when copying an AMI, see Copying an AMI\n in the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#CopyImageRequest": { @@ -8431,7 +8435,7 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "Encrypted", - "smithy.api#documentation": "Specifies whether the destination snapshots of the copied image should be encrypted.\n You can encrypt a copy of an unencrypted snapshot, but you cannot create an unencrypted\n copy of an encrypted snapshot. The default CMK for EBS is used unless you specify a non-default \n AWS Key Management Service (AWS KMS) CMK using KmsKeyId
. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.
Specifies whether the destination snapshots of the copied image should be encrypted.\n You can encrypt a copy of an unencrypted snapshot, but you cannot create an unencrypted\n copy of an encrypted snapshot. The default KMS key for Amazon EBS is used unless you specify a non-default \n Key Management Service (KMS) KMS key using KmsKeyId
. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.
The identifier of the symmetric AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating\n encrypted volumes. If this parameter is not specified, your AWS managed CMK for EBS is used. \n If you specify a CMK, you must also set the encrypted state to true
.
You can specify a CMK using any of the following:
\n \tKey ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.
\n \t\tKey alias. For example, alias/ExampleAlias.
\n \tKey ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.
\n \t\tAlias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
\n \t\tAWS authenticates the CMK asynchronously. Therefore, if you specify an identifier that is not valid,\n the action can appear to complete, but eventually fails.
\nThe specified CMK must exist in the destination Region.
\nAmazon EBS does not support asymmetric CMKs.
", + "smithy.api#documentation": "The identifier of the symmetric Key Management Service (KMS) KMS key to use when creating\n \t\tencrypted volumes. If this parameter is not specified, your Amazon Web Services managed KMS key for Amazon EBS is used. \n \t\tIf you specify a KMS key, you must also set the encrypted state to true
.
You can specify a KMS key using any of the following:
\n \tKey ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.
\n \t\tKey alias. For example, alias/ExampleAlias.
\n \tKey ARN. For example, arn:aws:kms:us-east-1:012345678910:key/1234abcd-12ab-34cd-56ef-1234567890ab.
\n \t\tAlias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
\n \t\tAmazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an identifier that is not valid,\n the action can appear to complete, but eventually fails.
\n \tThe specified KMS key must exist in the destination Region.
\n \tAmazon EBS does not support asymmetric KMS keys.
", "smithy.api#xmlName": "kmsKeyId" } }, @@ -8467,7 +8471,7 @@ "DestinationOutpostArn": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Outpost to which to copy the AMI. Only \n \t\tspecify this parameter when copying an AMI from an AWS Region to an Outpost. \n \t\tThe AMI must be in the Region of the destination Outpost. You cannot copy an \n \t\tAMI from an Outpost to a Region, from one Outpost to another, or within the same \n \t\tOutpost.
\n \t\n \tFor more information, see \n \t\tCopying AMIs from an AWS Region to an Outpost in the \n \t\tAmazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Outpost to which to copy the AMI. Only \n \t\tspecify this parameter when copying an AMI from an Amazon Web Services Region to an Outpost. \n \t\tThe AMI must be in the Region of the destination Outpost. You cannot copy an \n \t\tAMI from an Outpost to a Region, from one Outpost to another, or within the same \n \t\tOutpost.
\n \t\n \tFor more information, see \n \t\tCopying AMIs from an Amazon Web Services Region to an Outpost in the \n \t\tAmazon Elastic Compute Cloud User Guide.
" } }, "DryRun": { @@ -9814,7 +9818,7 @@ "target": "com.amazonaws.ec2#CreateImageResult" }, "traits": { - "smithy.api#documentation": "Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance \n \tthat is either running or stopped.
\n \n \n \n \t\n \tIf you customized your instance with instance store volumes or EBS volumes in addition to the root device volume, the \n \tnew AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, \n \tthe instance automatically launches with those additional volumes.
\nFor more information, see Creating Amazon EBS-Backed Linux AMIs \n\t\t\t\tin the Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "Creates an Amazon EBS-backed AMI from an Amazon EBS-backed instance \n \tthat is either running or stopped.
\n \n \n \n \t\n \tIf you customized your instance with instance store volumes or Amazon EBS volumes in addition to the root device volume, the \n \tnew AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, \n \tthe instance automatically launches with those additional volumes.
\n \tFor more information, see Creating Amazon EBS-Backed Linux AMIs \n\t\t\t\tin the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#CreateImageRequest": { @@ -9866,14 +9870,14 @@ "target": "com.amazonaws.ec2#Boolean", "traits": { "aws.protocols#ec2QueryName": "NoReboot", - "smithy.api#documentation": "By default, Amazon EC2 attempts to shut down and reboot the instance before creating the image. If the No Reboot
option is set, Amazon EC2 doesn't shut down the instance before creating the image. When this option is used, file system integrity on the created image can't be guaranteed.
By default, Amazon EC2 attempts to shut down and reboot the instance before creating the image. \n If the No Reboot
option is set, Amazon EC2 doesn't shut down the instance before creating \n the image. Without a reboot, the AMI will be crash consistent (all the volumes are snapshotted \n at the same time), but not application consistent (all the operating system buffers are not flushed \n to disk before the snapshots are created).
The tags to apply to the AMI and snapshots on creation. You can tag the AMI, the\n snapshots, or both.
\nTo tag the AMI, the value for ResourceType
must be\n image
.
To tag the snapshots that are created of the root volume and of other EBS volumes that\n are attached to the instance, the value for ResourceType
must be\n snapshot
. The same tag is applied to all of the snapshots that are\n created.
If you specify other values for ResourceType
, the request fails.
To tag an AMI or snapshot after it has been created, see CreateTags.
", + "smithy.api#documentation": "The tags to apply to the AMI and snapshots on creation. You can tag the AMI, the\n snapshots, or both.
\nTo tag the AMI, the value for ResourceType
must be\n image
.
To tag the snapshots that are created of the root volume and of other Amazon EBS volumes that\n are attached to the instance, the value for ResourceType
must be\n snapshot
. The same tag is applied to all of the snapshots that are\n created.
If you specify other values for ResourceType
, the request fails.
To tag an AMI or snapshot after it has been created, see CreateTags.
", "smithy.api#xmlName": "TagSpecification" } } @@ -10397,7 +10401,7 @@ "target": "com.amazonaws.ec2#CreateManagedPrefixListResult" }, "traits": { - "smithy.api#documentation": "Creates a managed prefix list. You can specify one or more entries for the prefix list. Each entry consists of a CIDR block and an optional description.
\nYou must specify the maximum number of entries for the prefix list. The maximum number of entries cannot be changed later.
" + "smithy.api#documentation": "Creates a managed prefix list. You can specify one or more entries for the prefix list. \n Each entry consists of a CIDR block and an optional description.
" } }, "com.amazonaws.ec2#CreateManagedPrefixListRequest": { @@ -11225,7 +11229,7 @@ "target": "com.amazonaws.ec2#CreateRestoreImageTaskResult" }, "traits": { - "smithy.api#documentation": "Starts a task that restores an AMI from an S3 object that was previously created by using\n CreateStoreImageTask.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the\n Amazon Elastic Compute Cloud User Guide.
\nFor more information, see Store and restore an AMI using\n S3 in the Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "Starts a task that restores an AMI from an Amazon S3 object that was previously created by using\n CreateStoreImageTask.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon Elastic Compute Cloud User Guide.
\nFor more information, see Store and restore an AMI using\n \tAmazon S3 in the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#CreateRestoreImageTaskRequest": { @@ -11234,7 +11238,7 @@ "Bucket": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "The name of the S3 bucket that contains the stored AMI object.
", + "smithy.api#documentation": "The name of the Amazon S3 bucket that contains the stored AMI object.
", "smithy.api#required": {} } }, @@ -11737,7 +11741,7 @@ "target": "com.amazonaws.ec2#CreateStoreImageTaskResult" }, "traits": { - "smithy.api#documentation": "Stores an AMI as a single object in an S3 bucket.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the\n Amazon Elastic Compute Cloud User Guide.
\nFor more information, see Store and restore an AMI using\n S3 in the Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "Stores an AMI as a single object in an Amazon S3 bucket.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon Elastic Compute Cloud User Guide.
\nFor more information, see Store and restore an AMI using\n \tAmazon S3 in the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#CreateStoreImageTaskRequest": { @@ -11753,14 +11757,14 @@ "Bucket": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "The name of the S3 bucket in which the AMI object will be stored. The bucket must be in\n the Region in which the request is being made. The AMI object appears in the bucket only after\n the upload task has completed.
", + "smithy.api#documentation": "The name of the Amazon S3 bucket in which the AMI object will be stored. The bucket must be in\n the Region in which the request is being made. The AMI object appears in the bucket only after\n the upload task has completed.
", "smithy.api#required": {} } }, "S3ObjectTags": { "target": "com.amazonaws.ec2#S3ObjectTagList", "traits": { - "smithy.api#documentation": "The tags to apply to the AMI object that will be stored in the S3 bucket.
", + "smithy.api#documentation": "The tags to apply to the AMI object that will be stored in the Amazon S3 bucket.
", "smithy.api#xmlName": "S3ObjectTag" } }, @@ -13481,6 +13485,13 @@ "com.amazonaws.ec2#CreateVpcRequest": { "type": "structure", "members": { + "CidrBlock": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "The IPv4 network range for the VPC, in CIDR notation. For example,\n\t\t 10.0.0.0/16
. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18
, we modify it to 100.68.0.0/18
.
The tags to assign to the VPC.
", "smithy.api#xmlName": "TagSpecification" } - }, - "CidrBlock": { - "target": "com.amazonaws.ec2#String", - "traits": { - "smithy.api#documentation": "The IPv4 network range for the VPC, in CIDR notation. For example,\n\t\t 10.0.0.0/16
. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18
, we modify it to 100.68.0.0/18
.
Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch\n\t\t\tnew instances; however, it doesn't affect any instances that you've already launched\n\t\t\tfrom the AMI. You'll continue to incur usage costs for those instances until you\n\t\t\tterminate them.
\nWhen you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was\n\t\t\tcreated for the root volume of the instance during the AMI creation process. When you\n\t\t\tderegister an instance store-backed AMI, it doesn't affect the files that you uploaded\n\t\t\tto Amazon S3 when you created the AMI.
" + "smithy.api#documentation": "Deregisters the specified AMI. After you deregister an AMI, it can't be used to launch\n\t\t\tnew instances; however, it doesn't affect any instances that you've already launched\n\t\t\tfrom the AMI. You'll continue to incur usage costs for those instances until you\n\t\t\tterminate them.
\n \tWhen you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was\n\t\t\tcreated for the root volume of the instance during the AMI creation process. When you\n\t\t\tderegister an instance store-backed AMI, it doesn't affect the files that you uploaded\n\t\t\tto Amazon S3 when you created the AMI.
" } }, "com.amazonaws.ec2#DeregisterImageRequest": { @@ -19879,7 +19883,7 @@ "target": "com.amazonaws.ec2#DescribeImagesResult" }, "traits": { - "smithy.api#documentation": "Describes the specified images (AMIs, AKIs, and ARIs) available to you or all of the images available to you.
\nThe images available to you include public images, private images that you own, and private images owned by other AWS accounts for which you have explicit launch permissions.
\nRecently deregistered images appear in the returned results for a short interval and then\n return empty results. After all instances that reference a deregistered AMI are terminated,\n specifying the ID of the image will eventually return an error indicating that the AMI ID\n cannot be found.
", + "smithy.api#documentation": "Describes the specified images (AMIs, AKIs, and ARIs) available to you or all of the images available to you.
\nThe images available to you include public images, private images that you own, and private images owned by other \n Amazon Web Services accounts for which you have explicit launch permissions.
\nRecently deregistered images appear in the returned results for a short interval and then\n return empty results. After all instances that reference a deregistered AMI are terminated,\n specifying the ID of the image will eventually return an error indicating that the AMI ID\n cannot be found.
", "smithy.api#suppress": ["WaitableTraitInvalidErrorType"], "smithy.waiters#waitable": { "ImageAvailable": { @@ -19937,14 +19941,14 @@ "ExecutableUsers": { "target": "com.amazonaws.ec2#ExecutableByStringList", "traits": { - "smithy.api#documentation": "Scopes the images by users with explicit launch permissions. \n\t\t\t\tSpecify an AWS account ID, self
(the sender of the request),\n\t\t\t\tor all
(public AMIs).
Scopes the images by users with explicit launch permissions. \n Specify an Amazon Web Services account ID, self
(the sender of the request),\n\t\t\t\tor all
(public AMIs).
The filters.
\n\n architecture
- The image architecture (i386
|\n x86_64
| arm64
).
\n block-device-mapping.delete-on-termination
- A Boolean value that indicates\n whether the Amazon EBS volume is deleted on instance termination.
\n block-device-mapping.device-name
- The device name specified in the block device mapping (for\n example, /dev/sdh
or xvdh
).
\n block-device-mapping.snapshot-id
- The ID of the snapshot used for the EBS\n volume.
\n block-device-mapping.volume-size
- The volume size of the EBS volume, in GiB.
\n block-device-mapping.volume-type
- The volume type of the EBS volume\n (gp2
| io1
| io2
| st1
| sc1
|\n standard
).
\n \t\t\t block-device-mapping.encrypted
- A Boolean that indicates whether the EBS volume is encrypted.
\n description
- The description of the image (provided during image\n creation).
\n ena-support
- A Boolean that indicates whether enhanced networking\n with ENA is enabled.
\n hypervisor
- The hypervisor type (ovm
|\n xen
).
\n image-id
- The ID of the image.
\n image-type
- The image type (machine
| kernel
|\n ramdisk
).
\n is-public
- A Boolean that indicates whether the image is public.
\n kernel-id
- The kernel ID.
\n manifest-location
- The location of the image manifest.
\n name
- The name of the AMI (provided during image creation).
\n owner-alias
- The owner alias (amazon
| aws-marketplace
). \n \tThe valid aliases are defined in an Amazon-maintained list. This is not the AWS account alias that can be \n \tset using the IAM console. We recommend that you use the Owner \n \trequest parameter instead of this filter.
\n owner-id
- The AWS account ID of the owner. We recommend that you use the \n \t\tOwner request parameter instead of this filter.
\n platform
- The platform. To only list Windows-based AMIs, use\n windows
.
\n product-code
- The product code.
\n product-code.type
- The type of the product code (devpay
|\n marketplace
).
\n ramdisk-id
- The RAM disk ID.
\n root-device-name
- The device name of the root device volume (for example, /dev/sda1
).
\n root-device-type
- The type of the root device volume (ebs
|\n instance-store
).
\n state
- The state of the image (available
| pending
\n | failed
).
\n state-reason-code
- The reason code for the state change.
\n state-reason-message
- The message for the state change.
\n sriov-net-support
- A value of simple
indicates\n that enhanced networking with the Intel 82599 VF interface is enabled.
\n tag
:Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
\n tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
\n virtualization-type
- The virtualization type (paravirtual
|\n hvm
).
The filters.
\n\n architecture
- The image architecture (i386
|\n x86_64
| arm64
).
\n block-device-mapping.delete-on-termination
- A Boolean value that indicates\n \twhether the Amazon EBS volume is deleted on instance termination.
\n block-device-mapping.device-name
- The device name specified in the block device mapping (for\n example, /dev/sdh
or xvdh
).
\n \t block-device-mapping.snapshot-id
- The ID of the snapshot used for the Amazon EBS\n volume.
\n \t block-device-mapping.volume-size
- The volume size of the Amazon EBS volume, in GiB.
\n block-device-mapping.volume-type
- The volume type of the Amazon EBS volume\n (io1
| io2
| gp2
| gp3
| sc1\n
| st1
| standard
).
\n \t\t\t block-device-mapping.encrypted
- A Boolean that indicates whether the Amazon EBS volume is encrypted.
\n description
- The description of the image (provided during image\n creation).
\n ena-support
- A Boolean that indicates whether enhanced networking\n with ENA is enabled.
\n hypervisor
- The hypervisor type (ovm
|\n xen
).
\n image-id
- The ID of the image.
\n image-type
- The image type (machine
| kernel
|\n ramdisk
).
\n is-public
- A Boolean that indicates whether the image is public.
\n kernel-id
- The kernel ID.
\n manifest-location
- The location of the image manifest.
\n name
- The name of the AMI (provided during image creation).
\n owner-alias
- The owner alias (amazon
| aws-marketplace
). \n The valid aliases are defined in an Amazon-maintained list. This is not the Amazon Web Services account alias that can be \n \tset using the IAM console. We recommend that you use the Owner \n \trequest parameter instead of this filter.
\n owner-id
- The Amazon Web Services account ID of the owner. We recommend that you use the \n \t\tOwner request parameter instead of this filter.
\n platform
- The platform. To only list Windows-based AMIs, use\n windows
.
\n product-code
- The product code.
\n product-code.type
- The type of the product code (marketplace
).
\n ramdisk-id
- The RAM disk ID.
\n root-device-name
- The device name of the root device volume (for example, /dev/sda1
).
\n root-device-type
- The type of the root device volume (ebs
|\n instance-store
).
\n state
- The state of the image (available
| pending
\n | failed
).
\n state-reason-code
- The reason code for the state change.
\n state-reason-message
- The message for the state change.
\n sriov-net-support
- A value of simple
indicates\n that enhanced networking with the Intel 82599 VF interface is enabled.
\n tag
:Owner
and the value TeamA
, specify tag:Owner
for the filter name and TeamA
for the filter value.
\n tag-key
- The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.
\n virtualization-type
- The virtualization type (paravirtual
|\n hvm
).
Scopes the results to images with the specified owners. You can specify a combination of \n AWS account IDs, self
, amazon
, and aws-marketplace
. \n If you omit this parameter, the results include all images for which you have launch permissions, \n regardless of ownership.
Scopes the results to images with the specified owners. You can specify a combination of \n Amazon Web Services account IDs, self
, amazon
, and aws-marketplace
. \n If you omit this parameter, the results include all images for which you have launch permissions, \n regardless of ownership.
One or more filters. Filter names and values are case-sensitive.
\n\n auto-recovery-supported
- Indicates whether auto recovery is supported\n (true
| false
).
\n bare-metal
- Indicates whether it is a bare metal instance type\n (true
| false
).
\n burstable-performance-supported
- Indicates whether it is a burstable\n performance instance type (true
| false
).
\n current-generation
- Indicates whether this instance type is the latest\n generation instance type of an instance family (true
| false
).
\n ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps
- The baseline\n bandwidth performance for an EBS-optimized instance type, in Mbps.
\n ebs-info.ebs-optimized-info.baseline-iops
- The baseline input/output storage\n operations per second for an EBS-optimized instance type.
\n ebs-info.ebs-optimized-info.baseline-throughput-in-mbps
- The baseline\n throughput performance for an EBS-optimized instance type, in MB/s.
\n ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps
- The maximum bandwidth\n performance for an EBS-optimized instance type, in Mbps.
\n ebs-info.ebs-optimized-info.maximum-iops
- The maximum input/output storage\n operations per second for an EBS-optimized instance type.
\n ebs-info.ebs-optimized-info.maximum-throughput-in-mbps
- The maximum\n throughput performance for an EBS-optimized instance type, in MB/s.
\n ebs-info.ebs-optimized-support
- Indicates whether the instance type is\n EBS-optimized (supported
| unsupported
|\n default
).
\n ebs-info.encryption-support
- Indicates whether EBS encryption is supported\n (supported
| unsupported
).
\n ebs-info.nvme-support
- Indicates whether non-volatile memory express (NVMe)\n is supported for EBS volumes (required
| supported
|\n unsupported
).
\n free-tier-eligible
- Indicates whether the instance type is eligible to use\n in the free tier (true
| false
).
\n hibernation-supported
- Indicates whether On-Demand hibernation is supported\n (true
| false
).
\n hypervisor
- The hypervisor (nitro
| xen
).
\n instance-storage-info.disk.count
- The number of local disks.
\n instance-storage-info.disk.size-in-gb
- The storage size of each instance storage disk, in\n GB.
\n instance-storage-info.disk.type
- The storage technology for the local\n instance storage disks (hdd
| ssd
).
\n instance-storage-info.nvme-support
- Indicates whether non-volatile memory\n express (NVMe) is supported for instance store (required
| supported
)\n | unsupported
).
\n instance-storage-info.total-size-in-gb
- The total amount of storage available from all local\n instance storage, in GB.
\n instance-storage-supported
- Indicates whether the instance type has local\n instance storage (true
| false
).
\n instance-type
- The instance type (for example c5.2xlarge
or\n c5*).
\n memory-info.size-in-mib
- The memory size.
\n network-info.efa-info.maximum-efa-interfaces
- The maximum number of Elastic \n Fabric Adapters (EFAs) per instance.
\n network-info.efa-supported
- Indicates whether the instance type supports\n Elastic Fabric Adapter (EFA) (true
| false
).
\n network-info.ena-support
- Indicates whether Elastic Network Adapter (ENA) is\n supported or required (required
| supported
|\n unsupported
).
\n network-info.encryption-in-transit-supported
- Indicates whether the instance type \n automatically encrypts in-transit traffic between instances.
\n network-info.ipv4-addresses-per-interface
- The maximum number of private IPv4 addresses per\n network interface.
\n network-info.ipv6-addresses-per-interface
- The maximum number of private IPv6 addresses per\n network interface.
\n network-info.ipv6-supported
- Indicates whether the instance type supports\n IPv6 (true
| false
).
\n network-info.maximum-network-interfaces
- The maximum number of network interfaces per instance.
\n network-info.network-performance
- The network performance (for example, \"25\n Gigabit\").
\n processor-info.supported-architecture
- The CPU architecture\n (arm64
| i386
| x86_64
).
\n processor-info.sustained-clock-speed-in-ghz
- The CPU clock speed, in GHz.
\n supported-boot-mode
- The boot mode (legacy-bios
|\n uefi
).
\n supported-root-device-type
- The root device type (ebs
|\n instance-store
).
\n supported-usage-class
- The usage class (on-demand
|\n spot
).
\n supported-virtualization-type
- The virtualization type (hvm
|\n paravirtual
).
\n vcpu-info.default-cores
- The default number of cores for the instance type.
\n vcpu-info.default-threads-per-core
- The default number of threads per core for the instance\n type.
\n vcpu-info.default-vcpus
- The default number of vCPUs for the instance type.
\n vcpu-info.valid-cores
- The number of cores that can be configured for the instance type.
\n vcpu-info.valid-threads-per-core
- The number of threads per core that can be configured for the instance type.\n For example, \"1\" or \"1,2\".
One or more filters. Filter names and values are case-sensitive.
\n\n auto-recovery-supported
- Indicates whether auto recovery is supported (true
| false
).
\n bare-metal
- Indicates whether it is a bare metal instance type (true
| false
).
\n burstable-performance-supported
- Indicates whether it is a burstable\n performance instance type (true
| false
).
\n current-generation
- Indicates whether this instance type is the latest\n generation instance type of an instance family (true
| false
).
\n ebs-info.ebs-optimized-info.baseline-bandwidth-in-mbps
- The baseline\n bandwidth performance for an EBS-optimized instance type, in Mbps.
\n ebs-info.ebs-optimized-info.baseline-iops
- The baseline input/output storage\n operations per second for an EBS-optimized instance type.
\n ebs-info.ebs-optimized-info.baseline-throughput-in-mbps
- The baseline\n throughput performance for an EBS-optimized instance type, in MB/s.
\n ebs-info.ebs-optimized-info.maximum-bandwidth-in-mbps
- The maximum bandwidth\n performance for an EBS-optimized instance type, in Mbps.
\n ebs-info.ebs-optimized-info.maximum-iops
- The maximum input/output storage\n operations per second for an EBS-optimized instance type.
\n ebs-info.ebs-optimized-info.maximum-throughput-in-mbps
- The maximum\n throughput performance for an EBS-optimized instance type, in MB/s.
\n ebs-info.ebs-optimized-support
- Indicates whether the instance type is\n EBS-optimized (supported
| unsupported
|\n default
).
\n ebs-info.encryption-support
- Indicates whether EBS encryption is supported\n (supported
| unsupported
).
\n ebs-info.nvme-support
- Indicates whether non-volatile memory express (NVMe)\n is supported for EBS volumes (required
| supported
| unsupported
).
\n free-tier-eligible
- Indicates whether the instance type is eligible to use\n in the free tier (true
| false
).
\n hibernation-supported
- Indicates whether On-Demand hibernation is supported (true
| false
).
\n hypervisor
- The hypervisor (nitro
| xen
).
\n instance-storage-info.disk.count
- The number of local disks.
\n instance-storage-info.disk.size-in-gb
- The storage size of each instance storage disk, in\n GB.
\n instance-storage-info.disk.type
- The storage technology for the local\n instance storage disks (hdd
| ssd
).
\n instance-storage-info.nvme-support
- Indicates whether non-volatile memory\n express (NVMe) is supported for instance store (required
| supported
)\n | unsupported
).
\n instance-storage-info.total-size-in-gb
- The total amount of storage available from all local\n instance storage, in GB.
\n instance-storage-supported
- Indicates whether the instance type has local\n instance storage (true
| false
).
\n instance-type
- The instance type (for example c5.2xlarge
or\n c5*).
\n memory-info.size-in-mib
- The memory size.
\n network-info.efa-info.maximum-efa-interfaces
- The maximum number of Elastic \n Fabric Adapters (EFAs) per instance.
\n network-info.efa-supported
- Indicates whether the instance type supports\n Elastic Fabric Adapter (EFA) (true
| false
).
\n network-info.ena-support
- Indicates whether Elastic Network Adapter (ENA) is\n supported or required (required
| supported
|\n unsupported
).
\n network-info.encryption-in-transit-supported
- Indicates whether the instance type \n automatically encrypts in-transit traffic between instances (true
| false
).
\n network-info.ipv4-addresses-per-interface
- The maximum number of private IPv4 addresses per\n network interface.
\n network-info.ipv6-addresses-per-interface
- The maximum number of private IPv6 addresses per\n network interface.
\n network-info.ipv6-supported
- Indicates whether the instance type supports IPv6 (true
| false
).
\n network-info.maximum-network-interfaces
- The maximum number of network interfaces per instance.
\n network-info.network-performance
- The network performance (for example, \"25\n Gigabit\").
\n processor-info.supported-architecture
- The CPU architecture\n (arm64
| i386
| x86_64
).
\n processor-info.sustained-clock-speed-in-ghz
- The CPU clock speed, in GHz.
\n supported-boot-mode
- The boot mode (legacy-bios
|\n uefi
).
\n supported-root-device-type
- The root device type (ebs
|\n instance-store
).
\n supported-usage-class
- The usage class (on-demand
|\n spot
).
\n supported-virtualization-type
- The virtualization type (hvm
|\n paravirtual
).
\n vcpu-info.default-cores
- The default number of cores for the instance type.
\n vcpu-info.default-threads-per-core
- The default number of threads per core for the instance\n type.
\n vcpu-info.default-vcpus
- The default number of vCPUs for the instance type.
\n vcpu-info.valid-cores
- The number of cores that can be configured for the instance type.
\n vcpu-info.valid-threads-per-core
- The number of threads per core that can be configured for the instance type.\n For example, \"1\" or \"1,2\".
Describes the progress of the AMI store tasks. You can describe the store tasks for\n specified AMIs. If you don't specify the AMIs, you get a paginated list of store tasks from\n the last 31 days.
\nFor each AMI task, the response indicates if the task is InProgress
,\n Completed
, or Failed
. For tasks InProgress
, the\n response shows the estimated progress as a percentage.
Tasks are listed in reverse chronological order. Currently, only tasks from the past 31\n days can be viewed.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the\n Amazon Elastic Compute Cloud User Guide.
\nFor more information, see Store and restore an AMI using\n S3 in the Amazon Elastic Compute Cloud User Guide.
", + "smithy.api#documentation": "Describes the progress of the AMI store tasks. You can describe the store tasks for\n specified AMIs. If you don't specify the AMIs, you get a paginated list of store tasks from\n the last 31 days.
\nFor each AMI task, the response indicates if the task is InProgress
,\n Completed
, or Failed
. For tasks InProgress
, the\n response shows the estimated progress as a percentage.
Tasks are listed in reverse chronological order. Currently, only tasks from the past 31\n days can be viewed.
\nTo use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the\n Amazon Elastic Compute Cloud User Guide.
\nFor more information, see Store and restore an AMI using\n \tAmazon S3 in the Amazon Elastic Compute Cloud User Guide.
", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -29278,7 +29282,7 @@ } }, "SnapshotId": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#SnapshotId", "traits": { "aws.protocols#ec2QueryName": "SnapshotId", "smithy.api#documentation": "The ID of the snapshot.
", @@ -30677,7 +30681,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "EventSubType", - "smithy.api#documentation": "The event.
\n\nThe following are the error
events:
\n iamFleetRoleInvalid
- The EC2 Fleet or Spot Fleet did not have the required\n permissions either to launch or terminate an instance.
\n spotFleetRequestConfigurationInvalid
- The configuration is not\n valid. For more information, see the description of the event.
\n spotInstanceCountLimitExceeded
- You've reached the limit on the\n number of Spot Instances that you can launch.
The following are the fleetRequestChange
events:
\n active
- The EC2 Fleet or Spot Fleet request has been validated and Amazon EC2 is\n attempting to maintain the target number of running Spot Instances.
\n cancelled
- The EC2 Fleet or Spot Fleet request is canceled and has no running\n Spot Instances. The EC2 Fleet or Spot Fleet will be deleted two days after its instances\n were terminated.
\n cancelled_running
- The EC2 Fleet or Spot Fleet request is canceled and does\n not launch additional Spot Instances. Existing Spot Instances continue to run\n until they are interrupted or terminated.
\n cancelled_terminating
- The EC2 Fleet or Spot Fleet request is canceled and\n its Spot Instances are terminating.
\n expired
- The EC2 Fleet or Spot Fleet request has expired. A subsequent event\n indicates that the instances were terminated, if the request was created with\n TerminateInstancesWithExpiration
set.
\n modify_in_progress
- A request to modify the EC2 Fleet or Spot Fleet request\n was accepted and is in progress.
\n modify_succeeded
- The EC2 Fleet or Spot Fleet request was modified.
\n price_update
- The price for a launch configuration was adjusted\n because it was too high. This change is permanent.
\n submitted
- The EC2 Fleet or Spot Fleet request is being evaluated and Amazon EC2\n is preparing to launch the target number of Spot Instances.
The following are the instanceChange
events:
\n launched
- A request was fulfilled and a new instance was\n launched.
\n terminated
- An instance was terminated by the user.
The following are the Information
events:
\n launchSpecTemporarilyBlacklisted
- The configuration is not valid\n and several attempts to launch instances have failed. For more information, see\n the description of the event.
\n launchSpecUnusable
- The price in a launch specification is not\n valid because it is below the Spot price or the Spot price is above the\n On-Demand price.
\n fleetProgressHalted
- The price in every launch specification is\n not valid. A launch specification might become valid if the Spot price\n changes.
The event.
\n\nThe following are the error
events:
\n iamFleetRoleInvalid
- The EC2 Fleet or Spot Fleet did not have the required\n permissions either to launch or terminate an instance.
\n spotFleetRequestConfigurationInvalid
- The configuration is not\n valid. For more information, see the description of the event.
\n spotInstanceCountLimitExceeded
- You've reached the limit on the\n number of Spot Instances that you can launch.
The following are the fleetRequestChange
events:
\n active
- The EC2 Fleet or Spot Fleet request has been validated and Amazon EC2 is\n attempting to maintain the target number of running Spot Instances.
\n cancelled
- The EC2 Fleet or Spot Fleet request is canceled and has no running\n Spot Instances. The EC2 Fleet or Spot Fleet will be deleted two days after its instances\n were terminated.
\n cancelled_running
- The EC2 Fleet or Spot Fleet request is canceled and does\n not launch additional Spot Instances. Existing Spot Instances continue to run\n until they are interrupted or terminated.
\n cancelled_terminating
- The EC2 Fleet or Spot Fleet request is canceled and\n its Spot Instances are terminating.
\n expired
- The EC2 Fleet or Spot Fleet request has expired. A subsequent event\n indicates that the instances were terminated, if the request was created with\n TerminateInstancesWithExpiration
set.
\n modify_in_progress
- A request to modify the EC2 Fleet or Spot Fleet request\n was accepted and is in progress.
\n modify_succeeded
- The EC2 Fleet or Spot Fleet request was modified.
\n submitted
- The EC2 Fleet or Spot Fleet request is being evaluated and Amazon EC2\n is preparing to launch the target number of Spot Instances.
The following are the instanceChange
events:
\n launched
- A request was fulfilled and a new instance was\n launched.
\n terminated
- An instance was terminated by the user.
The following are the Information
events:
\n launchSpecTemporarilyBlacklisted
- The configuration is not valid\n and several attempts to launch instances have failed. For more information, see\n the description of the event.
\n launchSpecUnusable
- The price in a launch specification is not\n valid because it is below the Spot price or the Spot price is above the\n On-Demand price.
\n fleetProgressHalted
- The price in every launch specification is\n not valid. A launch specification might become valid if the Spot price\n changes.
The AWS account ID of the image owner.
", + "smithy.api#documentation": "The ID of the Amazon Web Services account that owns the image.
", "smithy.api#xmlName": "imageOwnerId" } }, @@ -35927,7 +35931,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "PlatformDetails", - "smithy.api#documentation": "The platform details associated with the billing code of the AMI. For more information,\n see Obtaining\n Billing Information in the Amazon Elastic Compute Cloud User Guide.
", + "smithy.api#documentation": "The platform details associated with the billing code of the AMI. For more information,\n see Understanding \n AMI billing in the Amazon Elastic Compute Cloud User Guide.
", "smithy.api#xmlName": "platformDetails" } }, @@ -35935,7 +35939,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "UsageOperation", - "smithy.api#documentation": "The operation of the Amazon EC2 instance and the billing code that is associated with the AMI.\n usageOperation
corresponds to the lineitem/Operation column on your AWS Cost and Usage Report and in the AWS Price\n List API. For the list of UsageOperation
codes, see Platform Details and Usage Operation Billing Codes in the\n Amazon Elastic Compute Cloud User Guide.
The operation of the Amazon EC2 instance and the billing code that is associated with the AMI.\n usageOperation
corresponds to the lineitem/Operation column on your Amazon Web Services Cost and Usage Report and in the Amazon Web Services Price\n \tList API. You can view these fields on the Instances or \n \tAMIs pages in the Amazon EC2 console, or in the responses that are \n \treturned by the DescribeImages \n \tcommand in the Amazon EC2 API, or the describe-images \n \tcommand in the CLI.
The AWS account alias (for example, amazon
, self
) or\n the AWS account ID of the AMI owner.
The Amazon Web Services account alias (for example, amazon
, self
) or\n the Amazon Web Services account ID of the AMI owner.
The type of root device used by the AMI. The AMI can use an EBS volume or an instance store volume.
", + "smithy.api#documentation": "The type of root device used by the AMI. The AMI can use an Amazon EBS volume or an instance store volume.
", "smithy.api#xmlName": "rootDeviceType" } }, @@ -38384,7 +38388,7 @@ "CpuCredits": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "The credit option for CPU usage of the instance. Valid values are\n standard
and unlimited
.
The credit option for CPU usage of the instance. Valid values are\n standard
and unlimited
.
T3 instances with host
tenancy do not support the unlimited
\n CPU credit option.
The AWS account ID.
\nConstraints: Up to 10 000 account IDs can be specified in a single request.
", + "smithy.api#documentation": "The Amazon Web Services account ID.
\nConstraints: Up to 10 000 account IDs can be specified in a single request.
", "smithy.api#xmlName": "userId" } } @@ -42830,13 +42846,13 @@ "Add": { "target": "com.amazonaws.ec2#LaunchPermissionList", "traits": { - "smithy.api#documentation": "The AWS account ID to add to the list of launch permissions for the AMI.
" + "smithy.api#documentation": "The Amazon Web Services account ID to add to the list of launch permissions for the AMI.
" } }, "Remove": { "target": "com.amazonaws.ec2#LaunchPermissionList", "traits": { - "smithy.api#documentation": "The AWS account ID to remove from the list of launch permissions for the AMI.
" + "smithy.api#documentation": "The Amazon Web Services account ID to remove from the list of launch permissions for the AMI.
" } } }, @@ -45564,7 +45580,7 @@ "target": "com.amazonaws.ec2#PrefixListState", "traits": { "aws.protocols#ec2QueryName": "State", - "smithy.api#documentation": "The state of the prefix list.
", + "smithy.api#documentation": "The current state of the prefix list.
", "smithy.api#xmlName": "state" } }, @@ -45882,7 +45898,7 @@ "InstanceCount": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "The number of instances for which to reserve capacity.
\n\t \tValid range: 1 - 1000
" + "smithy.api#documentation": "The number of instances for which to reserve capacity. The number of instances can't be increased or \n\t\t \tdecreased by more than 1000
in a single request.
Modifies the specified attribute of the specified AMI. You can specify only one attribute at a time.\n You can use the Attribute
parameter to specify the attribute or one of the following parameters: \n Description
, LaunchPermission
, or ProductCode
.
AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace product code cannot be made public.
\nTo enable the SriovNetSupport enhanced networking attribute of an image, enable SriovNetSupport on an instance \n and create an AMI from the instance.
" + "smithy.api#documentation": "Modifies the specified attribute of the specified AMI. You can specify only one attribute at a time.\n You can use the Attribute
parameter to specify the attribute or one of the following parameters: \n Description
or LaunchPermission
.
Images with an Amazon Web Services Marketplace product code cannot be made public.
\nTo enable the SriovNetSupport enhanced networking attribute of an image, enable SriovNetSupport on an instance \n and create an AMI from the instance.
" } }, "com.amazonaws.ec2#ModifyImageAttributeRequest": { @@ -46440,7 +46456,7 @@ "Attribute": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "The name of the attribute to modify. \n The valid values are description
, launchPermission
, and productCodes
.
The name of the attribute to modify. \n The valid values are description
and launchPermission
.
The DevPay product codes. After you add a product code to an AMI, it can't be removed.
", + "smithy.api#documentation": "Not supported.
", "smithy.api#xmlName": "ProductCode" } }, @@ -46485,14 +46501,14 @@ "UserIds": { "target": "com.amazonaws.ec2#UserIdStringList", "traits": { - "smithy.api#documentation": "The AWS account IDs. \n This parameter can be used only when the Attribute
parameter is launchPermission
.
The Amazon Web Services account IDs. \n This parameter can be used only when the Attribute
parameter is launchPermission
.
The value of the attribute being modified. \n This parameter can be used only when the Attribute
parameter is description
or productCodes
.
The value of the attribute being modified. \n This parameter can be used only when the Attribute
parameter is description
.
The tenancy for the instance.
", + "smithy.api#documentation": "The tenancy for the instance.
\n \nFor T3 instances, you can't change the tenancy from dedicated
to \n host
, or from host
to dedicated
. Attempting \n to make one of these unsupported tenancy changes results in the InvalidTenancy
\n error code.
The maximum number of entries for the prefix list. You cannot modify the entries \n of a prefix list and modify the size of a prefix list at the same time.
" + "smithy.api#documentation": "The maximum number of entries for the prefix list. You cannot modify the entries \n of a prefix list and modify the size of a prefix list at the same time.
\nIf any of the resources that reference the prefix list cannot support the new\n maximum size, the modify operation fails. Check the state message for the IDs of \n the first ten resources that do not support the new maximum size.
" } } } @@ -51525,7 +51541,7 @@ "target": "com.amazonaws.ec2#Tenancy", "traits": { "aws.protocols#ec2QueryName": "Tenancy", - "smithy.api#documentation": "The tenancy of the instance (if the instance is running in a VPC). An instance with a\n tenancy of dedicated
runs on single-tenant hardware. The host
\n tenancy is not supported for the ImportInstance command.
This parameter is not supported by CreateFleet.
", + "smithy.api#documentation": "The tenancy of the instance (if the instance is running in a VPC). An instance with a\n tenancy of dedicated
runs on single-tenant hardware. The host
\n tenancy is not supported for the ImportInstance command.
This parameter is not supported by CreateFleet.
\n \nT3 instances that use the unlimited
CPU credit option do not support host
tenancy.
The name of the placement group that the instance is in.
", @@ -53362,7 +53378,7 @@ "target": "com.amazonaws.ec2#RegisterImageResult" }, "traits": { - "smithy.api#documentation": "Registers an AMI. When you're creating an AMI, this is the final step you must complete\n before you can launch an instance from the AMI. For more information about creating AMIs, see\n Creating your\n own AMIs in the Amazon Elastic Compute Cloud User Guide.
\nFor Amazon EBS-backed instances, CreateImage creates and registers \n \tthe AMI in a single request, so you don't have to register the AMI yourself.
\nIf needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. \n If you make changes to an image, deregister the previous image and register the new image.
\n\n\n Register a snapshot of a root device volume\n
\nYou can use RegisterImage
to create an Amazon EBS-backed Linux AMI from\n a snapshot of a root device volume. You specify the snapshot using a block device mapping.\n You can't set the encryption state of the volume using the block device mapping. If the \n snapshot is encrypted, or encryption by default is enabled, the root volume of an instance \n launched from the AMI is encrypted.
For more information, see Create a Linux AMI from a snapshot and Use encryption with EBS-backed AMIs\n in the Amazon Elastic Compute Cloud User Guide.
\n \n\n AWS Marketplace product codes\n
\nIf any snapshots have AWS Marketplace product codes, they are copied to the new\n AMI.
\nWindows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE\n Linux Enterprise Server (SLES), use the EC2 billing product code associated with an AMI to\n verify the subscription status for package updates. To create a new AMI for operating systems\n that require a billing product code, instead of registering the AMI, do the following to\n preserve the billing product code association:
\nLaunch an instance from an existing AMI with that billing product code.
\nCustomize the instance.
\nCreate an AMI from the instance using CreateImage.
\nIf you purchase a Reserved Instance to apply to an On-Demand Instance that was launched\n from an AMI with a billing product code, make sure that the Reserved Instance has the matching\n billing product code. If you purchase a Reserved Instance without the matching billing product\n code, the Reserved Instance will not be applied to the On-Demand Instance. For information\n about how to obtain the platform details and billing information of an AMI, see Obtaining billing\n information in the Amazon Elastic Compute Cloud User Guide.
" + "smithy.api#documentation": "Registers an AMI. When you're creating an AMI, this is the final step you must complete\n before you can launch an instance from the AMI. For more information about creating AMIs, see\n Creating your\n own AMIs in the Amazon Elastic Compute Cloud User Guide.
\nFor Amazon EBS-backed instances, CreateImage creates and registers \n \tthe AMI in a single request, so you don't have to register the AMI yourself.
\nIf needed, you can deregister an AMI at any time. Any modifications you make to an AMI backed by an instance store volume invalidates its registration. \n If you make changes to an image, deregister the previous image and register the new image.
\n\n\n Register a snapshot of a root device volume\n
\n \tYou can use RegisterImage
to create an Amazon EBS-backed Linux AMI from\n a snapshot of a root device volume. You specify the snapshot using a block device mapping.\n You can't set the encryption state of the volume using the block device mapping. If the \n snapshot is encrypted, or encryption by default is enabled, the root volume of an instance \n launched from the AMI is encrypted.
For more information, see Create a Linux AMI from a snapshot and Use encryption with Amazon EBS-backed AMIs\n in the Amazon Elastic Compute Cloud User Guide.
\n \n \t\n Amazon Web Services Marketplace product codes\n
\n \tIf any snapshots have Amazon Web Services Marketplace product codes, they are copied to the new\n AMI.
\nWindows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE\n Linux Enterprise Server (SLES), use the Amazon EC2 billing product code associated with an AMI to\n verify the subscription status for package updates. To create a new AMI for operating systems\n that require a billing product code, instead of registering the AMI, do the following to\n preserve the billing product code association:
\nLaunch an instance from an existing AMI with that billing product code.
\nCustomize the instance.
\nCreate an AMI from the instance using CreateImage.
\nIf you purchase a Reserved Instance to apply to an On-Demand Instance that was launched\n from an AMI with a billing product code, make sure that the Reserved Instance has the matching\n billing product code. If you purchase a Reserved Instance without the matching billing product\n code, the Reserved Instance will not be applied to the On-Demand Instance. For information\n about how to obtain the platform details and billing information of an AMI, see Understanding AMI \n \tbilling in the Amazon Elastic Compute Cloud User Guide.
" } }, "com.amazonaws.ec2#RegisterImageRequest": { @@ -53378,14 +53394,14 @@ "target": "com.amazonaws.ec2#ArchitectureValues", "traits": { "aws.protocols#ec2QueryName": "Architecture", - "smithy.api#documentation": "The architecture of the AMI.
\nDefault: For Amazon EBS-backed AMIs, i386
.\n For instance store-backed AMIs, the architecture specified in the manifest file.
The architecture of the AMI.
\n \tDefault: For Amazon EBS-backed AMIs, i386
.\n For instance store-backed AMIs, the architecture specified in the manifest file.
The block device mapping entries.
\nIf you specify an EBS volume using the ID of an EBS snapshot, you can't specify the encryption state of the volume.
\nIf you create an AMI on an Outpost, then all backing snapshots must be on the same Outpost or in the Region \n \t of that Outpost. AMIs on an Outpost that include local snapshots can be used to launch instances on the same Outpost \n \t only. For more information, \n \t \tAmazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.
", + "smithy.api#documentation": "The block device mapping entries.
\n \tIf you specify an Amazon EBS volume using the ID of an Amazon EBS snapshot, you can't specify the encryption state of the volume.
\nIf you create an AMI on an Outpost, then all backing snapshots must be on the same Outpost or in the Region \n \t of that Outpost. AMIs on an Outpost that include local snapshots can be used to launch instances on the same Outpost \n \t only. For more information, \n \t \tAmazon EBS local snapshots on Outposts in the Amazon Elastic Compute Cloud User Guide.
", "smithy.api#xmlName": "BlockDeviceMapping" } }, @@ -53433,7 +53449,7 @@ "BillingProducts": { "target": "com.amazonaws.ec2#BillingProductList", "traits": { - "smithy.api#documentation": "The billing product codes. Your account must be authorized to specify billing product codes. Otherwise,\n you can use the AWS Marketplace to bill for the use of an AMI.
", + "smithy.api#documentation": "The billing product codes. Your account must be authorized to specify billing product codes. Otherwise,\n \tyou can use the Amazon Web Services Marketplace to bill for the use of an AMI.
", "smithy.api#xmlName": "BillingProduct" } }, @@ -56266,7 +56282,7 @@ "target": "com.amazonaws.ec2#ResetImageAttributeRequest" }, "traits": { - "smithy.api#documentation": "Resets an attribute of an AMI to its default value.
\nThe productCodes attribute can't be reset.
\nResets an attribute of an AMI to its default value.
" } }, "com.amazonaws.ec2#ResetImageAttributeName": { @@ -56456,6 +56472,10 @@ "type": "string", "traits": { "smithy.api#enum": [ + { + "value": "capacity-reservation", + "name": "capacity_reservation" + }, { "value": "client-vpn-endpoint", "name": "client_vpn_endpoint" @@ -56464,6 +56484,10 @@ "value": "customer-gateway", "name": "customer_gateway" }, + { + "value": "carrier-gateway", + "name": "carrier_gateway" + }, { "value": "dedicated-host", "name": "dedicated_host" @@ -56528,6 +56552,14 @@ "value": "internet-gateway", "name": "internet_gateway" }, + { + "value": "ipv4pool-ec2", + "name": "ipv4pool_ec2" + }, + { + "value": "ipv6pool-ec2", + "name": "ipv6pool_ec2" + }, { "value": "key-pair", "name": "key_pair" @@ -56536,10 +56568,30 @@ "value": "launch-template", "name": "launch_template" }, + { + "value": "local-gateway", + "name": "local_gateway" + }, + { + "value": "local-gateway-route-table", + "name": "local_gateway_route_table" + }, + { + "value": "local-gateway-virtual-interface", + "name": "local_gateway_virtual_interface" + }, + { + "value": "local-gateway-virtual-interface-group", + "name": "local_gateway_virtual_interface_group" + }, { "value": "local-gateway-route-table-vpc-association", "name": "local_gateway_route_table_vpc_association" }, + { + "value": "local-gateway-route-table-virtual-interface-group-association", + "name": "local_gateway_route_table_virtual_interface_group_association" + }, { "value": "natgateway", "name": "natgateway" @@ -56564,6 +56616,14 @@ "value": "placement-group", "name": "placement_group" }, + { + "value": "prefix-list", + "name": "prefix_list" + }, + { + "value": "replace-root-volume-task", + "name": "replace_root_volume_task" + }, { "value": "reserved-instances", "name": "reserved_instances" @@ -56636,6 +56696,14 @@ "value": "vpc", "name": "vpc" }, + { + "value": "vpc-endpoint", + "name": "vpc_endpoint" + }, + { + "value": "vpc-endpoint-service", + "name": "vpc_endpoint_service" + }, { "value": "vpc-peering-connection", "name": "vpc_peering_connection" @@ -57994,7 +58062,7 @@ "CreditSpecification": { "target": "com.amazonaws.ec2#CreditSpecificationRequest", "traits": { - "smithy.api#documentation": "The credit option for CPU usage of the burstable performance instance. Valid values\n are standard
and unlimited
. To change this attribute after\n launch, use \n ModifyInstanceCreditSpecification. For more information, see Burstable\n performance instances in the Amazon EC2 User Guide.
Default: standard
(T2 instances) or unlimited
(T3/T3a\n instances)
The credit option for CPU usage of the burstable performance instance. Valid values\n are standard
and unlimited
. To change this attribute after\n launch, use \n ModifyInstanceCreditSpecification. For more information, see Burstable\n performance instances in the Amazon EC2 User Guide.
Default: standard
(T2 instances) or unlimited
(T3/T3a\n instances)
For T3 instances with host
tenancy, only standard
is \n supported.
The tags to apply to the AMI object that will be stored in the S3 bucket. For more\n information, see Categorizing your storage using\n tags in the Amazon Simple Storage Service User Guide.
" + "smithy.api#documentation": "The tags to apply to the AMI object that will be stored in the Amazon S3 bucket. For more\n information, see Categorizing your storage using\n tags in the Amazon Simple Storage Service User Guide.
" } }, "com.amazonaws.ec2#S3ObjectTagList": { @@ -58140,7 +58208,7 @@ "AWSAccessKeyId": { "target": "com.amazonaws.ec2#String", "traits": { - "smithy.api#documentation": "The access key ID of the owner of the bucket. Before you specify a value for your access key ID, review and follow the guidance \n in Best Practices for Managing AWS Access Keys.
" + "smithy.api#documentation": "The access key ID of the owner of the bucket. Before you specify a value for your access key ID, review and follow the guidance \n in Best Practices for Managing Amazon Web Services Access Keys.
" } }, "Bucket": { @@ -58177,7 +58245,7 @@ } }, "traits": { - "smithy.api#documentation": "Describes the storage parameters for S3 and S3 buckets for an instance store-backed AMI.
" + "smithy.api#documentation": "Describes the storage parameters for Amazon S3 and Amazon S3 buckets for an instance store-backed AMI.
" } }, "com.amazonaws.ec2#ScheduledInstance": { @@ -60612,7 +60680,7 @@ } }, "ImageId": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#ImageId", "traits": { "aws.protocols#ec2QueryName": "ImageId", "smithy.api#documentation": "The ID of the AMI.
", @@ -60636,7 +60704,7 @@ } }, "KeyName": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#KeyPairName", "traits": { "aws.protocols#ec2QueryName": "KeyName", "smithy.api#documentation": "The name of the key pair.
", @@ -60684,7 +60752,7 @@ } }, "SubnetId": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#SubnetId", "traits": { "aws.protocols#ec2QueryName": "SubnetId", "smithy.api#documentation": "The IDs of the subnets in which to launch the instances. To specify multiple subnets, separate\n them using commas; for example, \"subnet-1234abcdeexample1, subnet-0987cdef6example2\".
", @@ -61547,7 +61615,7 @@ } }, "GroupName": { - "target": "com.amazonaws.ec2#String", + "target": "com.amazonaws.ec2#PlacementGroupName", "traits": { "aws.protocols#ec2QueryName": "GroupName", "smithy.api#documentation": "The name of the placement group.
", @@ -61763,7 +61831,7 @@ "target": "com.amazonaws.ec2#StartInstancesResult" }, "traits": { - "smithy.api#documentation": "Starts an Amazon EBS-backed instance that you've previously stopped.
\nInstances that use Amazon EBS volumes as their root devices can be quickly stopped and\n started. When an instance is stopped, the compute resources are released and you are not\n billed for instance usage. However, your root partition Amazon EBS volume remains and\n continues to persist your data, and you are charged for Amazon EBS volume usage. You can\n restart your instance at any time. Every time you start your instance, Amazon EC2\n charges a one-minute minimum for instance usage, and thereafter charges per second for\n instance usage.
\nBefore stopping an instance, make sure it is in a state from which it can be\n restarted. Stopping an instance does not preserve data stored in RAM.
\nPerforming this operation on an instance that uses an instance store as its root\n device returns an error.
\nFor more information, see Stopping instances in the\n Amazon EC2 User Guide.
" + "smithy.api#documentation": "Starts an Amazon EBS-backed instance that you've previously stopped.
\nInstances that use Amazon EBS volumes as their root devices can be quickly stopped and\n started. When an instance is stopped, the compute resources are released and you are not\n billed for instance usage. However, your root partition Amazon EBS volume remains and\n continues to persist your data, and you are charged for Amazon EBS volume usage. You can\n restart your instance at any time. Every time you start your instance, Amazon EC2\n charges a one-minute minimum for instance usage, and thereafter charges per second for\n instance usage.
\nBefore stopping an instance, make sure it is in a state from which it can be\n restarted. Stopping an instance does not preserve data stored in RAM.
\nPerforming this operation on an instance that uses an instance store as its root\n device returns an error.
\n \nIf you attempt to start a T3 instance with host
tenancy and the unlimted
\n CPU credit option, the request fails. The unlimited
CPU credit option is not \n supported on Dedicated Hosts. Before you start the instance, either change its CPU credit \n option to standard
, or change its tenancy to default
or dedicated
.
For more information, see Stopping instances in the\n Amazon EC2 User Guide.
" } }, "com.amazonaws.ec2#StartInstancesRequest": { @@ -62165,7 +62233,7 @@ "target": "com.amazonaws.ec2#String", "traits": { "aws.protocols#ec2QueryName": "Bucket", - "smithy.api#documentation": "The name of the S3 bucket that contains the stored AMI object.
", + "smithy.api#documentation": "The name of the Amazon S3 bucket that contains the stored AMI object.
", "smithy.api#xmlName": "bucket" } }, @@ -62796,7 +62864,7 @@ "target": "com.amazonaws.ec2#ResourceType", "traits": { "aws.protocols#ec2QueryName": "ResourceType", - "smithy.api#documentation": "The type of resource to tag. Currently, the resource types that support tagging on\n creation are: capacity-reservation
| carrier-gateway
|\n client-vpn-endpoint
| customer-gateway
|\n \t dedicated-host
| dhcp-options
| egress-only-internet-gateway
| elastic-ip
| elastic-gpu
|\n \t export-image-task
\n | export-instance-task
| fleet
| fpga-image
|\n \t host-reservation
| image
| import-image-task
|\n \t import-snapshot-task
| instance
| instance-event-window
|\n internet-gateway
| ipv4pool-ec2
| ipv6pool-ec2
|\n \t key-pair
| launch-template
| local-gateway-route-table-vpc-association
| placement-group
|\n \t prefix-list
| natgateway
| network-acl
| network-interface
| \n \t reserved-instances
|route-table
| security-group
| snapshot
| spot-fleet-request
\n | spot-instances-request
| snapshot
| subnet
|\n traffic-mirror-filter
| traffic-mirror-session
|\n traffic-mirror-target
| transit-gateway
|\n \t transit-gateway-attachment
| transit-gateway-multicast-domain
| transit-gateway-route-table
|\n volume
|vpc
| vpc-peering-connection
|\n vpc-endpoint
(for interface and gateway endpoints) |\n \t vpc-endpoint-service
(for Amazon Web Services PrivateLink) | vpc-flow-log
|\n vpn-connection
| vpn-gateway
.
To tag a resource after it has been created, see CreateTags.
", + "smithy.api#documentation": "The type of resource to tag on creation. The possible values are: \n \t capacity-reservation
| carrier-gateway
|\n client-vpn-endpoint
| customer-gateway
|\n \t dedicated-host
| dhcp-options
| \n \t egress-only-internet-gateway
| elastic-gpu
| \n \t elastic-ip
| export-image-task
|\n export-instance-task
| fleet
| fpga-image
|\n \t host-reservation
| image
| import-image-task
|\n \t import-snapshot-task
| instance
| instance-event-window
|\n internet-gateway
| ipv4pool-ec2
| ipv6pool-ec2
|\n \t key-pair
| launch-template
| local-gateway-route-table-vpc-association
|\n \t natgateway
| network-acl
| network-insights-analysis
| \n \t network-insights-path
| network-interface
| \n \t placement-group
| prefix-list
| reserved-instances
| \n \t route-table
| security-group
| security-group-rule
| \n \t snapshot
| spot-fleet-request
| spot-instances-request
| subnet
| \n traffic-mirror-filter
| traffic-mirror-session
| traffic-mirror-target
| \n \t transit-gateway
| transit-gateway-attachment
| \n \t transit-gateway-multicast-domain
| transit-gateway-route-table
|\n volume
| vpc
| vpc-endpoint
| vpc-endpoint-service
| \n \t vpc-flow-log
| vpc-peering-connection
|\n \t vpn-connection
| vpn-gateway
.
To tag a resource after it has been created, see CreateTags.
", "smithy.api#xmlName": "resourceType" } }, @@ -62927,7 +62995,7 @@ "InstanceCount": { "target": "com.amazonaws.ec2#Integer", "traits": { - "smithy.api#documentation": "The number of instances the Covertible Reserved Instance offering can be applied to. This parameter is reserved and cannot \n be specified in a request
" + "smithy.api#documentation": "The number of instances the Convertible Reserved Instance offering can be applied to. This parameter is reserved and cannot \n be specified in a request
" } }, "OfferingId": { diff --git a/codegen/sdk-codegen/aws-models/elasticsearch-service.2015-01-01.json b/codegen/sdk-codegen/aws-models/elasticsearch-service.2015-01-01.json index 6ecbf7679f83..1834a7e35a5c 100644 --- a/codegen/sdk-codegen/aws-models/elasticsearch-service.2015-01-01.json +++ b/codegen/sdk-codegen/aws-models/elasticsearch-service.2015-01-01.json @@ -982,13 +982,13 @@ "Enabled": { "target": "com.amazonaws.elasticsearchservice#Boolean", "traits": { - "smithy.api#documentation": "True to enable cold storage for an Elasticsearch domain.
", + "smithy.api#documentation": "Enable cold storage option. Accepted values true or false
", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "Specifies settings for cold storage.
" + "smithy.api#documentation": "Specifies the configuration for cold storage options such as enabled
" } }, "com.amazonaws.elasticsearchservice#CommitMessage": { @@ -2668,6 +2668,12 @@ "traits": { "smithy.api#documentation": " Specifies the DomainName
.
Specifies the EngineType
of the domain.
Specifies the ColdStorageOptions
configuration for an Elasticsearch domain.
Specifies the ColdStorageOptions
config for Elasticsearch Domain
Optional parameter to filter the output by domain engine type. Acceptable values are 'Elasticsearch' and 'OpenSearch'.
", + "smithy.api#httpQuery": "engineType" + } + } + }, + "traits": { + "smithy.api#documentation": " Container for the parameters to the ListDomainNames
operation.
List of Elasticsearch domain names.
" + "smithy.api#documentation": "List of domain names and respective engine types.
" } } }, "traits": { - "smithy.api#documentation": "The result of a ListDomainNames
operation. Contains the names of all Elasticsearch domains owned by this account.
The result of a ListDomainNames
operation. Contains the names of all domains owned by this account and their respective engine types.
Write data to an Amazon Elasticsearch Service domain.
" + "smithy.api#documentation": "Write data to an Amazon Elasticsearch Service domain.
\nThis action is deprecated. Use the OpenSearch action instead.
\nSend messages to an Amazon Managed Streaming for Apache Kafka (Amazon MSK) or self-managed Apache Kafka cluster.
" } + }, + "openSearch": { + "target": "com.amazonaws.iot#OpenSearchAction", + "traits": { + "smithy.api#documentation": "Write data to an Amazon OpenSearch Service domain.
" + } } }, "traits": { @@ -13118,7 +13124,7 @@ } }, "traits": { - "smithy.api#documentation": "Describes an action that writes data to an Amazon Elasticsearch Service\n domain.
" + "smithy.api#documentation": "Describes an action that writes data to an Amazon Elasticsearch Service\n domain.
\nThis action is deprecated. Use the OpenSearch action instead.
\nThe IAM role ARN that has access to OpenSearch.
", + "smithy.api#required": {} + } + }, + "endpoint": { + "target": "com.amazonaws.iot#ElasticsearchEndpoint", + "traits": { + "smithy.api#documentation": "The endpoint of your OpenSearch domain.
", + "smithy.api#required": {} + } + }, + "index": { + "target": "com.amazonaws.iot#ElasticsearchIndex", + "traits": { + "smithy.api#documentation": "The OpenSearch index where you want to store your data.
", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.iot#ElasticsearchType", + "traits": { + "smithy.api#documentation": "The type of document you are storing.
", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.iot#ElasticsearchId", + "traits": { + "smithy.api#documentation": "The unique identifier for the document you are storing.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Describes an action that writes data to an Amazon OpenSearch Service\n domain.
" + } + }, "com.amazonaws.iot#OptionalVersion": { "type": "long", "traits": { @@ -26185,7 +26234,7 @@ "disconnectReason": { "target": "com.amazonaws.iot#DisconnectReason", "traits": { - "smithy.api#documentation": "The reason why the client is disconnected.
" + "smithy.api#documentation": "The reason why the client is disconnected. If the thing has been disconnected for approximately an hour, the disconnectReason
value might be missing.
The bootstrap servers of the cluster.
", + "smithy.api#required": {} + } + }, + "vpc": { + "target": "com.amazonaws.kafkaconnect#Vpc", + "traits": { + "smithy.api#documentation": "Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The details of the Apache Kafka cluster to which the connector is connected.
" + } + }, + "com.amazonaws.kafkaconnect#ApacheKafkaClusterDescription": { + "type": "structure", + "members": { + "bootstrapServers": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The bootstrap servers of the cluster.
" + } + }, + "vpc": { + "target": "com.amazonaws.kafkaconnect#VpcDescription", + "traits": { + "smithy.api#documentation": "Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The description of the Apache Kafka cluster to which the connector is connected.
" + } + }, + "com.amazonaws.kafkaconnect#AutoScaling": { + "type": "structure", + "members": { + "maxWorkerCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "traits": { + "smithy.api#documentation": "The maximum number of workers allocated to the connector.
", + "smithy.api#required": {} + } + }, + "mcuCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max8", + "traits": { + "smithy.api#documentation": "The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
", + "smithy.api#required": {} + } + }, + "minWorkerCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "traits": { + "smithy.api#documentation": "The minimum number of workers allocated to the connector.
", + "smithy.api#required": {} + } + }, + "scaleInPolicy": { + "target": "com.amazonaws.kafkaconnect#ScaleInPolicy", + "traits": { + "smithy.api#documentation": "The sacle-in policy for the connector.
" + } + }, + "scaleOutPolicy": { + "target": "com.amazonaws.kafkaconnect#ScaleOutPolicy", + "traits": { + "smithy.api#documentation": "The sacle-out policy for the connector.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Specifies how the connector scales.
" + } + }, + "com.amazonaws.kafkaconnect#AutoScalingDescription": { + "type": "structure", + "members": { + "maxWorkerCount": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "The maximum number of workers allocated to the connector.
" + } + }, + "mcuCount": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
" + } + }, + "minWorkerCount": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "The minimum number of workers allocated to the connector.
" + } + }, + "scaleInPolicy": { + "target": "com.amazonaws.kafkaconnect#ScaleInPolicyDescription", + "traits": { + "smithy.api#documentation": "The sacle-in policy for the connector.
" + } + }, + "scaleOutPolicy": { + "target": "com.amazonaws.kafkaconnect#ScaleOutPolicyDescription", + "traits": { + "smithy.api#documentation": "The sacle-out policy for the connector.>
" + } + } + }, + "traits": { + "smithy.api#documentation": "Information about the auto scaling parameters for the connector.
" + } + }, + "com.amazonaws.kafkaconnect#AutoScalingUpdate": { + "type": "structure", + "members": { + "maxWorkerCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "traits": { + "smithy.api#documentation": "The target maximum number of workers allocated to the connector.
", + "smithy.api#required": {} + } + }, + "mcuCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max8", + "traits": { + "smithy.api#documentation": "The target number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
", + "smithy.api#required": {} + } + }, + "minWorkerCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "traits": { + "smithy.api#documentation": "The target minimum number of workers allocated to the connector.
", + "smithy.api#required": {} + } + }, + "scaleInPolicy": { + "target": "com.amazonaws.kafkaconnect#ScaleInPolicyUpdate", + "traits": { + "smithy.api#documentation": "The target sacle-in policy for the connector.
", + "smithy.api#required": {} + } + }, + "scaleOutPolicy": { + "target": "com.amazonaws.kafkaconnect#ScaleOutPolicyUpdate", + "traits": { + "smithy.api#documentation": "The target sacle-out policy for the connector.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The updates to the auto scaling parameters for the connector.
" + } + }, + "com.amazonaws.kafkaconnect#BadRequestException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.
", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.kafkaconnect#Capacity": { + "type": "structure", + "members": { + "autoScaling": { + "target": "com.amazonaws.kafkaconnect#AutoScaling", + "traits": { + "smithy.api#documentation": "Information about the auto scaling parameters for the connector.
" + } + }, + "provisionedCapacity": { + "target": "com.amazonaws.kafkaconnect#ProvisionedCapacity", + "traits": { + "smithy.api#documentation": "Details about a fixed capacity allocated to a connector.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Information about the capacity of the connector, whether it is auto scaled or provisioned.
" + } + }, + "com.amazonaws.kafkaconnect#CapacityDescription": { + "type": "structure", + "members": { + "autoScaling": { + "target": "com.amazonaws.kafkaconnect#AutoScalingDescription", + "traits": { + "smithy.api#documentation": "Describes the connector's auto scaling capacity.
" + } + }, + "provisionedCapacity": { + "target": "com.amazonaws.kafkaconnect#ProvisionedCapacityDescription", + "traits": { + "smithy.api#documentation": "Describes a connector's provisioned capacity.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A description of the connector's capacity.
" + } + }, + "com.amazonaws.kafkaconnect#CapacityUpdate": { + "type": "structure", + "members": { + "autoScaling": { + "target": "com.amazonaws.kafkaconnect#AutoScalingUpdate", + "traits": { + "smithy.api#documentation": "The target auto scaling setting.
" + } + }, + "provisionedCapacity": { + "target": "com.amazonaws.kafkaconnect#ProvisionedCapacityUpdate", + "traits": { + "smithy.api#documentation": "The target settings for provisioned capacity.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The target capacity for the connector. The capacity can be auto scaled or provisioned.
" + } + }, + "com.amazonaws.kafkaconnect#CloudWatchLogsLogDelivery": { + "type": "structure", + "members": { + "enabled": { + "target": "com.amazonaws.kafkaconnect#__boolean", + "traits": { + "smithy.api#documentation": "Whether log delivery to Amazon CloudWatch Logs is enabled.
", + "smithy.api#required": {} + } + }, + "logGroup": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the CloudWatch log group that is the destination for log delivery.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The settings for delivering connector logs to Amazon CloudWatch Logs.
" + } + }, + "com.amazonaws.kafkaconnect#CloudWatchLogsLogDeliveryDescription": { + "type": "structure", + "members": { + "enabled": { + "target": "com.amazonaws.kafkaconnect#__boolean", + "traits": { + "smithy.api#documentation": "Whether log delivery to Amazon CloudWatch Logs is enabled.
" + } + }, + "logGroup": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the CloudWatch log group that is the destination for log delivery.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A description of the log delivery settings.
" + } + }, + "com.amazonaws.kafkaconnect#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "HTTP Status Code 409: Conflict. A resource with this name already exists. Retry your request with another name.
", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.kafkaconnect#ConnectorState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "RUNNING", + "name": "RUNNING" + }, + { + "value": "CREATING", + "name": "CREATING" + }, + { + "value": "UPDATING", + "name": "UPDATING" + }, + { + "value": "DELETING", + "name": "DELETING" + }, + { + "value": "FAILED", + "name": "FAILED" + } + ] + } + }, + "com.amazonaws.kafkaconnect#ConnectorSummary": { + "type": "structure", + "members": { + "capacity": { + "target": "com.amazonaws.kafkaconnect#CapacityDescription", + "traits": { + "smithy.api#documentation": "The connector's compute capacity settings.
" + } + }, + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the connector.
" + } + }, + "connectorDescription": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The description of the connector.
" + } + }, + "connectorName": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the connector.
" + } + }, + "connectorState": { + "target": "com.amazonaws.kafkaconnect#ConnectorState", + "traits": { + "smithy.api#documentation": "The state of the connector.
" + } + }, + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "The time that the connector was created.
" + } + }, + "currentVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The current version of the connector.
" + } + }, + "kafkaCluster": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterDescription", + "traits": { + "smithy.api#documentation": "The details of the Apache Kafka cluster to which the connector is connected.
" + } + }, + "kafkaClusterClientAuthentication": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationDescription", + "traits": { + "smithy.api#documentation": "The type of client authentication used to connect to the Apache Kafka cluster. The value is NONE when no client authentication is used.
" + } + }, + "kafkaClusterEncryptionInTransit": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransitDescription", + "traits": { + "smithy.api#documentation": "Details of encryption in transit to the Apache Kafka cluster.
" + } + }, + "kafkaConnectVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
" + } + }, + "logDelivery": { + "target": "com.amazonaws.kafkaconnect#LogDeliveryDescription", + "traits": { + "smithy.api#documentation": "The settings for delivering connector logs to Amazon CloudWatch Logs.
" + } + }, + "plugins": { + "target": "com.amazonaws.kafkaconnect#__listOfPluginDescription", + "traits": { + "smithy.api#documentation": "Specifies which plugins were used for this connector.
" + } + }, + "serviceExecutionRoleArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.
" + } + }, + "workerConfiguration": { + "target": "com.amazonaws.kafkaconnect#WorkerConfigurationDescription", + "traits": { + "smithy.api#documentation": "The worker configurations that are in use with the connector.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Summary of a connector.
" + } + }, + "com.amazonaws.kafkaconnect#CreateConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#CreateConnectorRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#CreateConnectorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ConflictException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "Creates a connector using the specified properties.
", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/connectors", + "code": 200 + } + } + }, + "com.amazonaws.kafkaconnect#CreateConnectorRequest": { + "type": "structure", + "members": { + "capacity": { + "target": "com.amazonaws.kafkaconnect#Capacity", + "traits": { + "smithy.api#documentation": "Information about the capacity allocated to the connector. Exactly one of the two properties must be specified.
", + "smithy.api#required": {} + } + }, + "connectorConfiguration": { + "target": "com.amazonaws.kafkaconnect#__mapOf__string", + "traits": { + "smithy.api#documentation": "A map of keys to values that represent the configuration for the connector.
", + "smithy.api#required": {} + } + }, + "connectorDescription": { + "target": "com.amazonaws.kafkaconnect#__stringMax1024", + "traits": { + "smithy.api#documentation": "A summary description of the connector.
" + } + }, + "connectorName": { + "target": "com.amazonaws.kafkaconnect#__stringMin1Max128", + "traits": { + "smithy.api#documentation": "The name of the connector.
", + "smithy.api#required": {} + } + }, + "kafkaCluster": { + "target": "com.amazonaws.kafkaconnect#KafkaCluster", + "traits": { + "smithy.api#documentation": "Specifies which Apache Kafka cluster to connect to.
", + "smithy.api#required": {} + } + }, + "kafkaClusterClientAuthentication": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthentication", + "traits": { + "smithy.api#documentation": "Details of the client authentication used by the Apache Kafka cluster.
", + "smithy.api#required": {} + } + }, + "kafkaClusterEncryptionInTransit": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransit", + "traits": { + "smithy.api#documentation": "Details of encryption in transit to the Apache Kafka cluster.
", + "smithy.api#required": {} + } + }, + "kafkaConnectVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
", + "smithy.api#required": {} + } + }, + "logDelivery": { + "target": "com.amazonaws.kafkaconnect#LogDelivery", + "traits": { + "smithy.api#documentation": "Details about log delivery.
" + } + }, + "plugins": { + "target": "com.amazonaws.kafkaconnect#__listOfPlugin", + "traits": { + "smithy.api#documentation": "Specifies which plugins to use for the connector.
", + "smithy.api#required": {} + } + }, + "serviceExecutionRoleArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
", + "smithy.api#required": {} + } + }, + "workerConfiguration": { + "target": "com.amazonaws.kafkaconnect#WorkerConfiguration", + "traits": { + "smithy.api#documentation": "Specifies which worker configuration to use with the connector.
" + } + } + } + }, + "com.amazonaws.kafkaconnect#CreateConnectorResponse": { + "type": "structure", + "members": { + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) that Amazon assigned to the connector.
" + } + }, + "connectorName": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the connector.
" + } + }, + "connectorState": { + "target": "com.amazonaws.kafkaconnect#ConnectorState", + "traits": { + "smithy.api#documentation": "The state of the connector.
" + } + } + } + }, + "com.amazonaws.kafkaconnect#CreateCustomPlugin": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#CreateCustomPluginRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#CreateCustomPluginResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ConflictException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "Creates a custom plugin using the specified properties.
", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/custom-plugins", + "code": 200 + } + } + }, + "com.amazonaws.kafkaconnect#CreateCustomPluginRequest": { + "type": "structure", + "members": { + "contentType": { + "target": "com.amazonaws.kafkaconnect#CustomPluginContentType", + "traits": { + "smithy.api#documentation": "The type of the plugin file.
", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__stringMax1024", + "traits": { + "smithy.api#documentation": "A summary description of the custom plugin.
" + } + }, + "location": { + "target": "com.amazonaws.kafkaconnect#CustomPluginLocation", + "traits": { + "smithy.api#documentation": "Information about the location of a custom plugin.
", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__stringMin1Max128", + "traits": { + "smithy.api#documentation": "The name of the custom plugin.
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafkaconnect#CreateCustomPluginResponse": { + "type": "structure", + "members": { + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) that Amazon assigned to the custom plugin.
" + } + }, + "customPluginState": { + "target": "com.amazonaws.kafkaconnect#CustomPluginState", + "traits": { + "smithy.api#documentation": "The state of the custom plugin.
" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the custom plugin.
" + } + }, + "revision": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "The revision of the custom plugin.
" + } + } + } + }, + "com.amazonaws.kafkaconnect#CreateWorkerConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#CreateWorkerConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#CreateWorkerConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ConflictException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "Creates a worker configuration using the specified properties.
", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/worker-configurations", + "code": 200 + } + } + }, + "com.amazonaws.kafkaconnect#CreateWorkerConfigurationRequest": { + "type": "structure", + "members": { + "description": { + "target": "com.amazonaws.kafkaconnect#__stringMax1024", + "traits": { + "smithy.api#documentation": "A summary description of the worker configuration.
" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__stringMin1Max128", + "traits": { + "smithy.api#documentation": "The name of the worker configuration.
", + "smithy.api#required": {} + } + }, + "propertiesFileContent": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "Base64 encoded contents of connect-distributed.properties file.
", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafkaconnect#CreateWorkerConfigurationResponse": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "The time that the worker configuration was created.
" + } + }, + "latestRevision": { + "target": "com.amazonaws.kafkaconnect#WorkerConfigurationRevisionSummary", + "traits": { + "smithy.api#documentation": "The latest revision of the worker configuration.
" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the worker configuration.
" + } + }, + "workerConfigurationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) that Amazon assigned to the worker configuration.
" + } + } + } + }, + "com.amazonaws.kafkaconnect#CustomPlugin": { + "type": "structure", + "members": { + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the custom plugin.
", + "smithy.api#required": {} + } + }, + "revision": { + "target": "com.amazonaws.kafkaconnect#__longMin1", + "traits": { + "smithy.api#documentation": "The revision of the custom plugin.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "A plugin is an AWS resource that contains the code that defines a connector's logic.
" + } + }, + "com.amazonaws.kafkaconnect#CustomPluginContentType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "JAR", + "name": "JAR" + }, + { + "value": "ZIP", + "name": "ZIP" + } + ] + } + }, + "com.amazonaws.kafkaconnect#CustomPluginDescription": { + "type": "structure", + "members": { + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the custom plugin.
" + } + }, + "revision": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "The revision of the custom plugin.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Details about a custom plugin.
" + } + }, + "com.amazonaws.kafkaconnect#CustomPluginFileDescription": { + "type": "structure", + "members": { + "fileMd5": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The hex-encoded MD5 checksum of the custom plugin file. You can use it to validate the file.
" + } + }, + "fileSize": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "The size in bytes of the custom plugin file. You can use it to validate the file.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Details about a custom plugin file.
" + } + }, + "com.amazonaws.kafkaconnect#CustomPluginLocation": { + "type": "structure", + "members": { + "s3Location": { + "target": "com.amazonaws.kafkaconnect#S3Location", + "traits": { + "smithy.api#documentation": "The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Information about the location of a custom plugin.
" + } + }, + "com.amazonaws.kafkaconnect#CustomPluginLocationDescription": { + "type": "structure", + "members": { + "s3Location": { + "target": "com.amazonaws.kafkaconnect#S3LocationDescription", + "traits": { + "smithy.api#documentation": "The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Information about the location of a custom plugin.
" + } + }, + "com.amazonaws.kafkaconnect#CustomPluginRevisionSummary": { + "type": "structure", + "members": { + "contentType": { + "target": "com.amazonaws.kafkaconnect#CustomPluginContentType", + "traits": { + "smithy.api#documentation": "The format of the plugin file.
" + } + }, + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "The time that the custom plugin was created.
" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The description of the custom plugin.
" + } + }, + "fileDescription": { + "target": "com.amazonaws.kafkaconnect#CustomPluginFileDescription", + "traits": { + "smithy.api#documentation": "Details about the custom plugin file.
" + } + }, + "location": { + "target": "com.amazonaws.kafkaconnect#CustomPluginLocationDescription", + "traits": { + "smithy.api#documentation": "Information about the location of the custom plugin.
" + } + }, + "revision": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "The revision of the custom plugin.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Details about the revision of a custom plugin.
" + } + }, + "com.amazonaws.kafkaconnect#CustomPluginState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATING", + "name": "CREATING" + }, + { + "value": "CREATE_FAILED", + "name": "CREATE_FAILED" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "UPDATING", + "name": "UPDATING" + }, + { + "value": "UPDATE_FAILED", + "name": "UPDATE_FAILED" + }, + { + "value": "DELETING", + "name": "DELETING" + } + ] + } + }, + "com.amazonaws.kafkaconnect#CustomPluginSummary": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "The time that the custom plugin was created.
" + } + }, + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the custom plugin.
" + } + }, + "customPluginState": { + "target": "com.amazonaws.kafkaconnect#CustomPluginState", + "traits": { + "smithy.api#documentation": "The state of the custom plugin.
" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "A description of the custom plugin.
" + } + }, + "latestRevision": { + "target": "com.amazonaws.kafkaconnect#CustomPluginRevisionSummary", + "traits": { + "smithy.api#documentation": "The latest revision of the custom plugin.
" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the custom plugin.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A summary of the custom plugin.
" + } + }, + "com.amazonaws.kafkaconnect#DeleteConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#DeleteConnectorRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#DeleteConnectorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "Deletes the specified connector.
", + "smithy.api#http": { + "method": "DELETE", + "uri": "/v1/connectors/{connectorArn}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.kafkaconnect#DeleteConnectorRequest": { + "type": "structure", + "members": { + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the connector that you want to delete.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "currentVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The current version of the connector that you want to delete.
", + "smithy.api#httpQuery": "currentVersion" + } + } + } + }, + "com.amazonaws.kafkaconnect#DeleteConnectorResponse": { + "type": "structure", + "members": { + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the connector that you requested to delete.
" + } + }, + "connectorState": { + "target": "com.amazonaws.kafkaconnect#ConnectorState", + "traits": { + "smithy.api#documentation": "The state of the connector that you requested to delete.
" + } + } + } + }, + "com.amazonaws.kafkaconnect#DescribeConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#DescribeConnectorRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#DescribeConnectorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "Returns summary information about the connector.
", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/connectors/{connectorArn}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#DescribeConnectorRequest": { + "type": "structure", + "members": { + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the connector that you want to describe.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafkaconnect#DescribeConnectorResponse": { + "type": "structure", + "members": { + "capacity": { + "target": "com.amazonaws.kafkaconnect#CapacityDescription", + "traits": { + "smithy.api#documentation": "Information about the capacity of the connector, whether it is auto scaled or provisioned.
" + } + }, + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the connector.
" + } + }, + "connectorConfiguration": { + "target": "com.amazonaws.kafkaconnect#__mapOf__string", + "traits": { + "smithy.api#documentation": "A map of keys to values that represent the configuration for the connector.
" + } + }, + "connectorDescription": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "A summary description of the connector.
" + } + }, + "connectorName": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the connector.
" + } + }, + "connectorState": { + "target": "com.amazonaws.kafkaconnect#ConnectorState", + "traits": { + "smithy.api#documentation": "The state of the connector.
" + } + }, + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "The time the connector was created.
" + } + }, + "currentVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The current version of the connector.
" + } + }, + "kafkaCluster": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterDescription", + "traits": { + "smithy.api#documentation": "The Apache Kafka cluster that the connector is connected to.
" + } + }, + "kafkaClusterClientAuthentication": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationDescription", + "traits": { + "smithy.api#documentation": "The type of client authentication used to connect to the Apache Kafka cluster. The value is NONE when no client authentication is used.
" + } + }, + "kafkaClusterEncryptionInTransit": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransitDescription", + "traits": { + "smithy.api#documentation": "Details of encryption in transit to the Apache Kafka cluster.
" + } + }, + "kafkaConnectVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
" + } + }, + "logDelivery": { + "target": "com.amazonaws.kafkaconnect#LogDeliveryDescription", + "traits": { + "smithy.api#documentation": "Details about delivering logs to Amazon CloudWatch Logs.
" + } + }, + "plugins": { + "target": "com.amazonaws.kafkaconnect#__listOfPluginDescription", + "traits": { + "smithy.api#documentation": "Specifies which plugins were used for this connector.
" + } + }, + "serviceExecutionRoleArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.
" + } + }, + "workerConfiguration": { + "target": "com.amazonaws.kafkaconnect#WorkerConfigurationDescription", + "traits": { + "smithy.api#documentation": "Specifies which worker configuration was used for the connector.
" + } + } + } + }, + "com.amazonaws.kafkaconnect#DescribeCustomPlugin": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#DescribeCustomPluginRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#DescribeCustomPluginResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "A summary description of the custom plugin.
", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/custom-plugins/{customPluginArn}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#DescribeCustomPluginRequest": { + "type": "structure", + "members": { + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "Returns information about a custom plugin.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafkaconnect#DescribeCustomPluginResponse": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "The time that the custom plugin was created.
" + } + }, + "customPluginArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the custom plugin.
" + } + }, + "customPluginState": { + "target": "com.amazonaws.kafkaconnect#CustomPluginState", + "traits": { + "smithy.api#documentation": "The state of the custom plugin.
" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The description of the custom plugin.
" + } + }, + "latestRevision": { + "target": "com.amazonaws.kafkaconnect#CustomPluginRevisionSummary", + "traits": { + "smithy.api#documentation": "The latest successfully created revision of the custom plugin. If there are no successfully created revisions, this field will be absent.
" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the custom plugin.
" + } + } + } + }, + "com.amazonaws.kafkaconnect#DescribeWorkerConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#DescribeWorkerConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#DescribeWorkerConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "Returns information about a worker configuration.
", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/worker-configurations/{workerConfigurationArn}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#DescribeWorkerConfigurationRequest": { + "type": "structure", + "members": { + "workerConfigurationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the worker configuration that you want to get information about.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafkaconnect#DescribeWorkerConfigurationResponse": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "The time that the worker configuration was created.
" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The description of the worker configuration.
" + } + }, + "latestRevision": { + "target": "com.amazonaws.kafkaconnect#WorkerConfigurationRevisionDescription", + "traits": { + "smithy.api#documentation": "The latest revision of the custom configuration.
" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the worker configuration.
" + } + }, + "workerConfigurationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the custom configuration.
" + } + } + } + }, + "com.amazonaws.kafkaconnect#FirehoseLogDelivery": { + "type": "structure", + "members": { + "deliveryStream": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
" + } + }, + "enabled": { + "target": "com.amazonaws.kafkaconnect#__boolean", + "traits": { + "smithy.api#documentation": "Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The settings for delivering logs to Amazon Kinesis Data Firehose.
" + } + }, + "com.amazonaws.kafkaconnect#FirehoseLogDeliveryDescription": { + "type": "structure", + "members": { + "deliveryStream": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
" + } + }, + "enabled": { + "target": "com.amazonaws.kafkaconnect#__boolean", + "traits": { + "smithy.api#documentation": "Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
" + } + } + }, + "traits": { + "smithy.api#documentation": "A description of the settings for delivering logs to Amazon Kinesis Data Firehose.
" + } + }, + "com.amazonaws.kafkaconnect#ForbiddenException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.
", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.kafkaconnect#InternalServerErrorException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.
", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.kafkaconnect#KafkaCluster": { + "type": "structure", + "members": { + "apacheKafkaCluster": { + "target": "com.amazonaws.kafkaconnect#ApacheKafkaCluster", + "traits": { + "smithy.api#documentation": "The Apache Kafka cluster to which the connector is connected.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The details of the Apache Kafka cluster to which the connector is connected.
" + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterClientAuthentication": { + "type": "structure", + "members": { + "authenticationType": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationType", + "traits": { + "smithy.api#documentation": "The type of client authentication used to connect to the Apache Kafka cluster. Value NONE means that no client authentication is used.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The client authentication information used in order to authenticate with the Apache Kafka cluster.
" + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationDescription": { + "type": "structure", + "members": { + "authenticationType": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationType", + "traits": { + "smithy.api#documentation": "The type of client authentication used to connect to the Apache Kafka cluster. Value NONE means that no client authentication is used.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The client authentication information used in order to authenticate with the Apache Kafka cluster.
" + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterClientAuthenticationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "NONE", + "name": "NONE" + }, + { + "value": "IAM", + "name": "IAM" + } + ] + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterDescription": { + "type": "structure", + "members": { + "apacheKafkaCluster": { + "target": "com.amazonaws.kafkaconnect#ApacheKafkaClusterDescription", + "traits": { + "smithy.api#documentation": "The Apache Kafka cluster to which the connector is connected.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Details of how to connect to the Apache Kafka cluster.
" + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransit": { + "type": "structure", + "members": { + "encryptionType": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransitType", + "traits": { + "smithy.api#documentation": "The type of encryption in transit to the Apache Kafka cluster.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Details of encryption in transit to the Apache Kafka cluster.
" + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransitDescription": { + "type": "structure", + "members": { + "encryptionType": { + "target": "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransitType", + "traits": { + "smithy.api#documentation": "The type of encryption in transit to the Apache Kafka cluster.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The description of the encryption in transit to the Apache Kafka cluster.
" + } + }, + "com.amazonaws.kafkaconnect#KafkaClusterEncryptionInTransitType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PLAINTEXT", + "name": "PLAINTEXT" + }, + { + "value": "TLS", + "name": "TLS" + } + ] + } + }, + "com.amazonaws.kafkaconnect#KafkaConnect": { + "type": "service", + "version": "2021-09-14", + "operations": [ + { + "target": "com.amazonaws.kafkaconnect#CreateConnector" + }, + { + "target": "com.amazonaws.kafkaconnect#CreateCustomPlugin" + }, + { + "target": "com.amazonaws.kafkaconnect#CreateWorkerConfiguration" + }, + { + "target": "com.amazonaws.kafkaconnect#DeleteConnector" + }, + { + "target": "com.amazonaws.kafkaconnect#DescribeConnector" + }, + { + "target": "com.amazonaws.kafkaconnect#DescribeCustomPlugin" + }, + { + "target": "com.amazonaws.kafkaconnect#DescribeWorkerConfiguration" + }, + { + "target": "com.amazonaws.kafkaconnect#ListConnectors" + }, + { + "target": "com.amazonaws.kafkaconnect#ListCustomPlugins" + }, + { + "target": "com.amazonaws.kafkaconnect#ListWorkerConfigurations" + }, + { + "target": "com.amazonaws.kafkaconnect#UpdateConnector" + } + ], + "traits": { + "aws.api#service": { + "sdkId": "KafkaConnect", + "arnNamespace": "kafkaconnect", + "cloudFormationName": "KafkaConnect", + "cloudTrailEventSource": "kafkaconnect.amazonaws.com", + "endpointPrefix": "kafkaconnect" + }, + "aws.auth#sigv4": { + "name": "kafkaconnect" + }, + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "additionalAllowedHeaders": ["X-Api-Key"] + }, + "smithy.api#documentation": "", + "smithy.api#title": "Managed Streaming for Kafka Connect" + } + }, + "com.amazonaws.kafkaconnect#ListConnectors": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#ListConnectorsRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#ListConnectorsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "Returns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors.
", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/connectors", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "connectors", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#ListConnectorsRequest": { + "type": "structure", + "members": { + "connectorNamePrefix": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name prefix that you want to use to search for and list connectors.
", + "smithy.api#httpQuery": "connectorNamePrefix" + } + }, + "maxResults": { + "target": "com.amazonaws.kafkaconnect#MaxResults", + "traits": { + "smithy.api#documentation": "The maximum number of connectors to list in one response.
", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
", + "smithy.api#httpQuery": "nextToken" + } + } + } + }, + "com.amazonaws.kafkaconnect#ListConnectorsResponse": { + "type": "structure", + "members": { + "connectors": { + "target": "com.amazonaws.kafkaconnect#__listOfConnectorSummary", + "traits": { + "smithy.api#documentation": "An array of connector descriptions.
" + } + }, + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off.
" + } + } + } + }, + "com.amazonaws.kafkaconnect#ListCustomPlugins": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#ListCustomPluginsRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#ListCustomPluginsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "Returns a list of all of the custom plugins in this account and Region.
", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/custom-plugins", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "customPlugins", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#ListCustomPluginsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.kafkaconnect#MaxResults", + "traits": { + "smithy.api#documentation": "The maximum number of custom plugins to list in one response.
", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
", + "smithy.api#httpQuery": "nextToken" + } + } + } + }, + "com.amazonaws.kafkaconnect#ListCustomPluginsResponse": { + "type": "structure", + "members": { + "customPlugins": { + "target": "com.amazonaws.kafkaconnect#__listOfCustomPluginSummary", + "traits": { + "smithy.api#documentation": "An array of custom plugin descriptions.
" + } + }, + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
" + } + } + } + }, + "com.amazonaws.kafkaconnect#ListWorkerConfigurations": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#ListWorkerConfigurationsRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#ListWorkerConfigurationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "Returns a list of all of the worker configurations in this account and Region.
", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/worker-configurations", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "workerConfigurations", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.kafkaconnect#ListWorkerConfigurationsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.kafkaconnect#MaxResults", + "traits": { + "smithy.api#documentation": "The maximum number of worker configurations to list in one response.
", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
", + "smithy.api#httpQuery": "nextToken" + } + } + } + }, + "com.amazonaws.kafkaconnect#ListWorkerConfigurationsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
" + } + }, + "workerConfigurations": { + "target": "com.amazonaws.kafkaconnect#__listOfWorkerConfigurationSummary", + "traits": { + "smithy.api#documentation": "An array of worker configuration descriptions.
" + } + } + } + }, + "com.amazonaws.kafkaconnect#LogDelivery": { + "type": "structure", + "members": { + "workerLogDelivery": { + "target": "com.amazonaws.kafkaconnect#WorkerLogDelivery", + "traits": { + "smithy.api#documentation": "The workers can send worker logs to different destination types. This configuration specifies the details of these destinations.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Details about log delivery.
" + } + }, + "com.amazonaws.kafkaconnect#LogDeliveryDescription": { + "type": "structure", + "members": { + "workerLogDelivery": { + "target": "com.amazonaws.kafkaconnect#WorkerLogDeliveryDescription", + "traits": { + "smithy.api#documentation": "The workers can send worker logs to different destination types. This configuration specifies the details of these destinations.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The description of the log delivery settings.
" + } + }, + "com.amazonaws.kafkaconnect#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.kafkaconnect#NotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.
", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.kafkaconnect#Plugin": { + "type": "structure", + "members": { + "customPlugin": { + "target": "com.amazonaws.kafkaconnect#CustomPlugin", + "traits": { + "smithy.api#documentation": "Details about a custom plugin.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "A plugin is an AWS resource that contains the code that defines your connector logic.
" + } + }, + "com.amazonaws.kafkaconnect#PluginDescription": { + "type": "structure", + "members": { + "customPlugin": { + "target": "com.amazonaws.kafkaconnect#CustomPluginDescription", + "traits": { + "smithy.api#documentation": "Details about a custom plugin.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The description of the plugin.
" + } + }, + "com.amazonaws.kafkaconnect#ProvisionedCapacity": { + "type": "structure", + "members": { + "mcuCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max8", + "traits": { + "smithy.api#documentation": "The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
", + "smithy.api#required": {} + } + }, + "workerCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "traits": { + "smithy.api#documentation": "The number of workers that are allocated to the connector.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Details about a connector's provisioned capacity.
" + } + }, + "com.amazonaws.kafkaconnect#ProvisionedCapacityDescription": { + "type": "structure", + "members": { + "mcuCount": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
" + } + }, + "workerCount": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "The number of workers that are allocated to the connector.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The description of a connector's provisioned capacity.
" + } + }, + "com.amazonaws.kafkaconnect#ProvisionedCapacityUpdate": { + "type": "structure", + "members": { + "mcuCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max8", + "traits": { + "smithy.api#documentation": "The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
", + "smithy.api#required": {} + } + }, + "workerCount": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max10", + "traits": { + "smithy.api#documentation": "The number of workers that are allocated to the connector.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "An update to a connector's fixed capacity.
" + } + }, + "com.amazonaws.kafkaconnect#S3Location": { + "type": "structure", + "members": { + "bucketArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of an S3 bucket.
", + "smithy.api#required": {} + } + }, + "fileKey": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The file key for an object in an S3 bucket.
", + "smithy.api#required": {} + } + }, + "objectVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The version of an object in an S3 bucket.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The location of an object in Amazon S3.
" + } + }, + "com.amazonaws.kafkaconnect#S3LocationDescription": { + "type": "structure", + "members": { + "bucketArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of an S3 bucket.
" + } + }, + "fileKey": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The file key for an object in an S3 bucket.
" + } + }, + "objectVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The version of an object in an S3 bucket.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The description of the location of an object in Amazon S3.
" + } + }, + "com.amazonaws.kafkaconnect#S3LogDelivery": { + "type": "structure", + "members": { + "bucket": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the S3 bucket that is the destination for log delivery.
" + } + }, + "enabled": { + "target": "com.amazonaws.kafkaconnect#__boolean", + "traits": { + "smithy.api#documentation": "Specifies whether connector logs get sent to the specified Amazon S3 destination.
", + "smithy.api#required": {} + } + }, + "prefix": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The S3 prefix that is the destination for log delivery.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Details about delivering logs to Amazon S3.
" + } + }, + "com.amazonaws.kafkaconnect#S3LogDeliveryDescription": { + "type": "structure", + "members": { + "bucket": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the S3 bucket that is the destination for log delivery.
" + } + }, + "enabled": { + "target": "com.amazonaws.kafkaconnect#__boolean", + "traits": { + "smithy.api#documentation": "Specifies whether connector logs get sent to the specified Amazon S3 destination.
" + } + }, + "prefix": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The S3 prefix that is the destination for log delivery.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The description of the details about delivering logs to Amazon S3.
" + } + }, + "com.amazonaws.kafkaconnect#ScaleInPolicy": { + "type": "structure", + "members": { + "cpuUtilizationPercentage": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max100", + "traits": { + "smithy.api#documentation": "Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The scale-in policy for the connector.
" + } + }, + "com.amazonaws.kafkaconnect#ScaleInPolicyDescription": { + "type": "structure", + "members": { + "cpuUtilizationPercentage": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The description of the scale-in policy for the connector.
" + } + }, + "com.amazonaws.kafkaconnect#ScaleInPolicyUpdate": { + "type": "structure", + "members": { + "cpuUtilizationPercentage": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max100", + "traits": { + "smithy.api#documentation": "The target CPU utilization percentage threshold at which you want connector scale in to be triggered.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "An update to the connector's scale-in policy.
" + } + }, + "com.amazonaws.kafkaconnect#ScaleOutPolicy": { + "type": "structure", + "members": { + "cpuUtilizationPercentage": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max100", + "traits": { + "smithy.api#documentation": "The CPU utilization percentage threshold at which you want connector scale out to be triggered.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The scale-out policy for the connector.
" + } + }, + "com.amazonaws.kafkaconnect#ScaleOutPolicyDescription": { + "type": "structure", + "members": { + "cpuUtilizationPercentage": { + "target": "com.amazonaws.kafkaconnect#__integer", + "traits": { + "smithy.api#documentation": "The CPU utilization percentage threshold at which you want connector scale out to be triggered.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The description of the scale-out policy for the connector.
" + } + }, + "com.amazonaws.kafkaconnect#ScaleOutPolicyUpdate": { + "type": "structure", + "members": { + "cpuUtilizationPercentage": { + "target": "com.amazonaws.kafkaconnect#__integerMin1Max100", + "traits": { + "smithy.api#documentation": "The target CPU utilization percentage threshold at which you want connector scale out to be triggered.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "An update to the connector's scale-out policy.
" + } + }, + "com.amazonaws.kafkaconnect#ServiceUnavailableException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "HTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.
", + "smithy.api#error": "server", + "smithy.api#httpError": 503 + } + }, + "com.amazonaws.kafkaconnect#TooManyRequestsException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "HTTP Status Code 429: Limit exceeded. Resource limit reached.
", + "smithy.api#error": "client", + "smithy.api#httpError": 429 + } + }, + "com.amazonaws.kafkaconnect#UnauthorizedException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "traits": { + "smithy.api#documentation": "HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.
", + "smithy.api#error": "client", + "smithy.api#httpError": 401 + } + }, + "com.amazonaws.kafkaconnect#UpdateConnector": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafkaconnect#UpdateConnectorRequest" + }, + "output": { + "target": "com.amazonaws.kafkaconnect#UpdateConnectorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafkaconnect#BadRequestException" + }, + { + "target": "com.amazonaws.kafkaconnect#ForbiddenException" + }, + { + "target": "com.amazonaws.kafkaconnect#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafkaconnect#NotFoundException" + }, + { + "target": "com.amazonaws.kafkaconnect#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafkaconnect#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafkaconnect#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "Updates the specified connector.
", + "smithy.api#http": { + "method": "PUT", + "uri": "/v1/connectors/{connectorArn}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.kafkaconnect#UpdateConnectorRequest": { + "type": "structure", + "members": { + "capacity": { + "target": "com.amazonaws.kafkaconnect#CapacityUpdate", + "traits": { + "smithy.api#documentation": "The target capacity.
", + "smithy.api#required": {} + } + }, + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the connector that you want to update.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "currentVersion": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The current version of the connector that you want to update.
", + "smithy.api#httpQuery": "currentVersion", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafkaconnect#UpdateConnectorResponse": { + "type": "structure", + "members": { + "connectorArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the connector.
" + } + }, + "connectorState": { + "target": "com.amazonaws.kafkaconnect#ConnectorState", + "traits": { + "smithy.api#documentation": "The state of the connector.
" + } + } + } + }, + "com.amazonaws.kafkaconnect#Vpc": { + "type": "structure", + "members": { + "securityGroups": { + "target": "com.amazonaws.kafkaconnect#__listOf__string", + "traits": { + "smithy.api#documentation": "The security groups for the connector.
" + } + }, + "subnets": { + "target": "com.amazonaws.kafkaconnect#__listOf__string", + "traits": { + "smithy.api#documentation": "The subnets for the connector.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Information about the VPC in which the connector resides.
" + } + }, + "com.amazonaws.kafkaconnect#VpcDescription": { + "type": "structure", + "members": { + "securityGroups": { + "target": "com.amazonaws.kafkaconnect#__listOf__string", + "traits": { + "smithy.api#documentation": "The security groups for the connector.
" + } + }, + "subnets": { + "target": "com.amazonaws.kafkaconnect#__listOf__string", + "traits": { + "smithy.api#documentation": "The subnets for the connector.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The description of the VPC in which the connector resides.
" + } + }, + "com.amazonaws.kafkaconnect#WorkerConfiguration": { + "type": "structure", + "members": { + "revision": { + "target": "com.amazonaws.kafkaconnect#__longMin1", + "traits": { + "smithy.api#documentation": "The revision of the worker configuration.
", + "smithy.api#required": {} + } + }, + "workerConfigurationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the worker configuration.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration of the workers, which are the processes that run the connector logic.
" + } + }, + "com.amazonaws.kafkaconnect#WorkerConfigurationDescription": { + "type": "structure", + "members": { + "revision": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "The revision of the worker configuration.
" + } + }, + "workerConfigurationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the worker configuration.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The description of the worker configuration.
" + } + }, + "com.amazonaws.kafkaconnect#WorkerConfigurationRevisionDescription": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "The time that the worker configuration was created.
" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The description of the worker configuration revision.
" + } + }, + "propertiesFileContent": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "Base64 encoded contents of the connect-distributed.properties file.
" + } + }, + "revision": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "The description of a revision of the worker configuration.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The description of the worker configuration revision.
" + } + }, + "com.amazonaws.kafkaconnect#WorkerConfigurationRevisionSummary": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "The time that a worker configuration revision was created.
" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The description of a worker configuration revision.
" + } + }, + "revision": { + "target": "com.amazonaws.kafkaconnect#__long", + "traits": { + "smithy.api#documentation": "The revision of a worker configuration.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The summary of a worker configuration revision.
" + } + }, + "com.amazonaws.kafkaconnect#WorkerConfigurationSummary": { + "type": "structure", + "members": { + "creationTime": { + "target": "com.amazonaws.kafkaconnect#__timestampIso8601", + "traits": { + "smithy.api#documentation": "The time that a worker configuration was created.
" + } + }, + "description": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The description of a worker configuration.
" + } + }, + "latestRevision": { + "target": "com.amazonaws.kafkaconnect#WorkerConfigurationRevisionSummary", + "traits": { + "smithy.api#documentation": "The latest revision of a worker configuration.
" + } + }, + "name": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The name of the worker configuration.
" + } + }, + "workerConfigurationArn": { + "target": "com.amazonaws.kafkaconnect#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the worker configuration.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The summary of a worker configuration.
" + } + }, + "com.amazonaws.kafkaconnect#WorkerLogDelivery": { + "type": "structure", + "members": { + "cloudWatchLogs": { + "target": "com.amazonaws.kafkaconnect#CloudWatchLogsLogDelivery", + "traits": { + "smithy.api#documentation": "Details about delivering logs to Amazon CloudWatch Logs.
" + } + }, + "firehose": { + "target": "com.amazonaws.kafkaconnect#FirehoseLogDelivery", + "traits": { + "smithy.api#documentation": "Details about delivering logs to Amazon Kinesis Data Firehose.
" + } + }, + "s3": { + "target": "com.amazonaws.kafkaconnect#S3LogDelivery", + "traits": { + "smithy.api#documentation": "Details about delivering logs to Amazon S3.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Workers can send worker logs to different destination types. This configuration specifies the details of these destinations.
" + } + }, + "com.amazonaws.kafkaconnect#WorkerLogDeliveryDescription": { + "type": "structure", + "members": { + "cloudWatchLogs": { + "target": "com.amazonaws.kafkaconnect#CloudWatchLogsLogDeliveryDescription", + "traits": { + "smithy.api#documentation": "Details about delivering logs to Amazon CloudWatch Logs.
" + } + }, + "firehose": { + "target": "com.amazonaws.kafkaconnect#FirehoseLogDeliveryDescription", + "traits": { + "smithy.api#documentation": "Details about delivering logs to Amazon Kinesis Data Firehose.
" + } + }, + "s3": { + "target": "com.amazonaws.kafkaconnect#S3LogDeliveryDescription", + "traits": { + "smithy.api#documentation": "Details about delivering logs to Amazon S3.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Workers can send worker logs to different destination types. This configuration specifies the details of these destinations.
" + } + }, + "com.amazonaws.kafkaconnect#__boolean": { + "type": "boolean" + }, + "com.amazonaws.kafkaconnect#__integer": { + "type": "integer" + }, + "com.amazonaws.kafkaconnect#__integerMin1Max10": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.kafkaconnect#__integerMin1Max100": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.kafkaconnect#__integerMin1Max8": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 8 + } + } + }, + "com.amazonaws.kafkaconnect#__listOfConnectorSummary": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#ConnectorSummary" + } + }, + "com.amazonaws.kafkaconnect#__listOfCustomPluginSummary": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#CustomPluginSummary" + } + }, + "com.amazonaws.kafkaconnect#__listOfPlugin": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#Plugin" + } + }, + "com.amazonaws.kafkaconnect#__listOfPluginDescription": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#PluginDescription" + } + }, + "com.amazonaws.kafkaconnect#__listOfWorkerConfigurationSummary": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#WorkerConfigurationSummary" + } + }, + "com.amazonaws.kafkaconnect#__listOf__string": { + "type": "list", + "member": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "com.amazonaws.kafkaconnect#__long": { + "type": "long" + }, + "com.amazonaws.kafkaconnect#__longMin1": { + "type": "long", + "traits": { + "smithy.api#range": { + "min": 1, + "max": 9223372036854775807 + } + } + }, + "com.amazonaws.kafkaconnect#__mapOf__string": { + "type": "map", + "key": { + "target": "com.amazonaws.kafkaconnect#__string" + }, + "value": { + "target": "com.amazonaws.kafkaconnect#__string" + } + }, + "com.amazonaws.kafkaconnect#__string": { + "type": "string" + }, + "com.amazonaws.kafkaconnect#__stringMax1024": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, + "com.amazonaws.kafkaconnect#__stringMin1Max128": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.kafkaconnect#__timestampIso8601": { + "type": "timestamp", + "traits": { + "smithy.api#timestampFormat": "date-time" + } + } + } +} diff --git a/codegen/sdk-codegen/aws-models/macie2.2020-01-01.json b/codegen/sdk-codegen/aws-models/macie2.2020-01-01.json index f81b1f9ebe12..bbf0ee5a2b7a 100644 --- a/codegen/sdk-codegen/aws-models/macie2.2020-01-01.json +++ b/codegen/sdk-codegen/aws-models/macie2.2020-01-01.json @@ -447,7 +447,7 @@ "ids": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "An array of strings that lists the unique identifiers for the custom data identifiers to retrieve information about.
", + "smithy.api#documentation": "An array of custom data identifier IDs, one for each custom data identifier to retrieve information about.
", "smithy.api#jsonName": "ids" } } @@ -466,7 +466,7 @@ "notFoundIdentifierIds": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "An array of identifiers, one for each identifier that was specified in the request, but doesn't correlate to an existing custom data identifier.
", + "smithy.api#documentation": "An array of custom data identifier IDs, one for each custom data identifier that was specified in the request but doesn't correlate to an existing custom data identifier.
", "smithy.api#jsonName": "notFoundIdentifierIds" } } @@ -1255,7 +1255,7 @@ "customDataIdentifierIds": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "The custom data identifiers to use for data analysis and classification.
", + "smithy.api#documentation": "An array of unique identifiers, one for each custom data identifier for the job to use when it analyzes data. To use only managed data identifiers, don't specify a value for this property and specify a value other than NONE for the managedDataIdentifierSelector property.
", "smithy.api#jsonName": "customDataIdentifierIds" } }, @@ -1269,7 +1269,7 @@ "initialRun": { "target": "com.amazonaws.macie2#__boolean", "traits": { - "smithy.api#documentation": "Specifies whether to analyze all existing, eligible objects immediately after the job is created.
", + "smithy.api#documentation": "For a recurring job, specifies whether to analyze all existing, eligible objects immediately after the job is created (true). To analyze only those objects that are created or changed after you create the job and before the job's first scheduled run, set this value to false.
If you configure the job to run only once, don't specify a value for this property.
", "smithy.api#jsonName": "initialRun" } }, @@ -1281,6 +1281,20 @@ "smithy.api#required": {} } }, + "managedDataIdentifierIds": { + "target": "com.amazonaws.macie2#__listOf__string", + "traits": { + "smithy.api#documentation": "An array of unique identifiers, one for each managed data identifier for the job to include (use) or exclude (not use) when it analyzes data. Inclusion or exclusion depends on the managed data identifier selection type that you specify for the job (managedDataIdentifierSelector).
To retrieve a list of valid values for this property, use the ListManagedDataIdentifiers operation.
", + "smithy.api#jsonName": "managedDataIdentifierIds" + } + }, + "managedDataIdentifierSelector": { + "target": "com.amazonaws.macie2#ManagedDataIdentifierSelector", + "traits": { + "smithy.api#documentation": "The selection type to apply when determining which managed data identifiers the job uses to analyze data. Valid values are:
ALL - Use all the managed data identifiers that Amazon Macie provides. If you specify this value, don't specify any values for the managedDataIdentifierIds property.
EXCLUDE - Use all the managed data identifiers that Macie provides except the managed data identifiers specified by the managedDataIdentifierIds property.
INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.
NONE - Don't use any managed data identifiers. If you specify this value, specify at least one custom data identifier for the job (customDataIdentifierIds) and don't specify any values for the managedDataIdentifierIds property.
If you don't specify a value for this property, the job uses all managed data identifiers. If you don't specify a value for this property or you specify ALL or EXCLUDE for a recurring job, the job also uses new managed data identifiers as they are released.
", + "smithy.api#jsonName": "managedDataIdentifierSelector" + } + }, "name": { "target": "com.amazonaws.macie2#__string", "traits": { @@ -1300,7 +1314,7 @@ "samplingPercentage": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "The sampling depth, as a percentage, to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at random, up to the specified percentage, and analyzes all the data in those objects.
", + "smithy.api#documentation": "The sampling depth, as a percentage, for the job to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at random, up to the specified percentage, and analyzes all the data in those objects.
", "smithy.api#jsonName": "samplingPercentage" } }, @@ -1400,21 +1414,21 @@ "ignoreWords": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 characters. Ignore words are case sensitive.
", + "smithy.api#documentation": "An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 UTF-8 characters. Ignore words are case sensitive.
", "smithy.api#jsonName": "ignoreWords" } }, "keywords": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 characters. Keywords aren't case sensitive.
", + "smithy.api#documentation": "An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 UTF-8 characters. Keywords aren't case sensitive.
", "smithy.api#jsonName": "keywords" } }, "maximumMatchDistance": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.
", + "smithy.api#documentation": "The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.
", "smithy.api#jsonName": "maximumMatchDistance" } }, @@ -2138,7 +2152,7 @@ } }, "traits": { - "smithy.api#documentation": "Provides information about a type of sensitive data that was detected by managed data identifiers and produced a sensitive data finding.
" + "smithy.api#documentation": "Provides information about a type of sensitive data that was detected by a managed data identifier and produced a sensitive data finding.
" } }, "com.amazonaws.macie2#DefaultDetections": { @@ -2558,7 +2572,7 @@ "customDataIdentifierIds": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "The custom data identifiers that the job uses to analyze data.
", + "smithy.api#documentation": "An array of unique identifiers, one for each custom data identifier that the job uses to analyze data. This value is null if the job uses only managed data identifiers to analyze data.
", "smithy.api#jsonName": "customDataIdentifierIds" } }, @@ -2572,7 +2586,7 @@ "initialRun": { "target": "com.amazonaws.macie2#__boolean", "traits": { - "smithy.api#documentation": "Specifies whether the job is configured to analyze all existing, eligible objects immediately after it's created.
", + "smithy.api#documentation": "For a recurring job, specifies whether you configured the job to analyze all existing, eligible objects immediately after the job was created (true). If you configured the job to analyze only those objects that were created or changed after the job was created and before the job's first scheduled run, this value is false. This value is also false for a one-time job.
", "smithy.api#jsonName": "initialRun" } }, @@ -2593,7 +2607,7 @@ "jobStatus": { "target": "com.amazonaws.macie2#JobStatus", "traits": { - "smithy.api#documentation": "The current status of the job. Possible values are:
CANCELLED - You cancelled the job or, if it's a one-time job, you paused the job and didn't resume it within 30 days.
COMPLETE - For a one-time job, Amazon Macie finished processing the data specified for the job. This value doesn't apply to recurring jobs.
IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to one-time jobs.
PAUSED - Amazon Macie started running the job but additional processing would exceed the monthly sensitive data discovery quota for your account or one or more member accounts that the job analyzes data for.
RUNNING - For a one-time job, the job is in progress. For a recurring job, a scheduled run is in progress.
USER_PAUSED - You paused the job. If you paused the job while it had a status of RUNNING and you don't resume it within 30 days of pausing it, the job or job run will expire and be cancelled, depending on the job's type. To check the expiration date, refer to the UserPausedDetails.jobExpiresAt property.
The current status of the job. Possible values are:
CANCELLED - You cancelled the job or, if it's a one-time job, you paused the job and didn't resume it within 30 days.
COMPLETE - For a one-time job, Amazon Macie finished processing the data specified for the job. This value doesn't apply to recurring jobs.
IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to one-time jobs.
PAUSED - Macie started running the job but additional processing would exceed the monthly sensitive data discovery quota for your account or one or more member accounts that the job analyzes data for.
RUNNING - For a one-time job, the job is in progress. For a recurring job, a scheduled run is in progress.
USER_PAUSED - You paused the job. If you paused the job while it had a status of RUNNING and you don't resume it within 30 days of pausing it, the job or job run will expire and be cancelled, depending on the job's type. To check the expiration date, refer to the UserPausedDetails.jobExpiresAt property.
An array of unique identifiers, one for each managed data identifier that the job is explicitly configured to include (use) or exclude (not use) when it analyzes data. Inclusion or exclusion depends on the managed data identifier selection type specified for the job (managedDataIdentifierSelector). This value is null if the job's managed data identifier selection type is ALL or the job uses only custom data identifiers (customDataIdentifierIds) to analyze data.
", + "smithy.api#jsonName": "managedDataIdentifierIds" + } + }, + "managedDataIdentifierSelector": { + "target": "com.amazonaws.macie2#ManagedDataIdentifierSelector", + "traits": { + "smithy.api#documentation": "The selection type that determines which managed data identifiers the job uses to analyze data. Possible values are:
ALL - Use all the managed data identifiers that Amazon Macie provides.
EXCLUDE - Use all the managed data identifiers that Macie provides except the managed data identifiers specified by the managedDataIdentifierIds property.
INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.
NONE - Don't use any managed data identifiers.
If this value is null, the job uses all managed data identifiers. If this value is null, ALL, or EXCLUDE for a recurring job, the job also uses new managed data identifiers as they are released.
", + "smithy.api#jsonName": "managedDataIdentifierSelector" + } + }, "name": { "target": "com.amazonaws.macie2#__string", "traits": { @@ -2642,7 +2670,7 @@ "scheduleFrequency": { "target": "com.amazonaws.macie2#JobScheduleFrequency", "traits": { - "smithy.api#documentation": "The recurrence pattern for running the job. If the job is configured to run only once, this value is null.
", + "smithy.api#documentation": "The recurrence pattern for running the job. This value is null if the job is configured to run only once.
", "smithy.api#jsonName": "scheduleFrequency" } }, @@ -4004,7 +4032,7 @@ "maximumMatchDistance": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern.
", + "smithy.api#documentation": "The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern.
", "smithy.api#jsonName": "maximumMatchDistance" } }, @@ -5370,7 +5398,7 @@ "jobStatus": { "target": "com.amazonaws.macie2#JobStatus", "traits": { - "smithy.api#documentation": "The current status of the job. Possible values are:
CANCELLED - You cancelled the job or, if it's a one-time job, you paused the job and didn't resume it within 30 days.
COMPLETE - For a one-time job, Amazon Macie finished processing the data specified for the job. This value doesn't apply to recurring jobs.
IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to one-time jobs.
PAUSED - Amazon Macie started running the job but additional processing would exceed the monthly sensitive data discovery quota for your account or one or more member accounts that the job analyzes data for.
RUNNING - For a one-time job, the job is in progress. For a recurring job, a scheduled run is in progress.
USER_PAUSED - You paused the job. If you paused the job while it had a status of RUNNING and you don't resume it within 30 days of pausing it, the job or job run will expire and be cancelled, depending on the job's type. To check the expiration date, refer to the UserPausedDetails.jobExpiresAt property.
The current status of the job. Possible values are:
CANCELLED - You cancelled the job or, if it's a one-time job, you paused the job and didn't resume it within 30 days.
COMPLETE - For a one-time job, Amazon Macie finished processing the data specified for the job. This value doesn't apply to recurring jobs.
IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to one-time jobs.
PAUSED - Macie started running the job but additional processing would exceed the monthly sensitive data discovery quota for your account or one or more member accounts that the job analyzes data for.
RUNNING - For a one-time job, the job is in progress. For a recurring job, a scheduled run is in progress.
USER_PAUSED - You paused the job. If you paused the job while it had a status of RUNNING and you don't resume it within 30 days of pausing it, the job or job run will expire and be cancelled, depending on the job's type. To check the expiration date, refer to the UserPausedDetails.jobExpiresAt property.
Retrieves information about all the Amazon Macie membership invitations that were received by an account.
", + "smithy.api#documentation": "Retrieves information about the Amazon Macie membership invitations that were received by an account.
", "smithy.api#http": { "method": "GET", "uri": "/invitations", @@ -6061,6 +6089,54 @@ "smithy.api#documentation": "Specifies criteria for sorting the results of a request for information about classification jobs.
" } }, + "com.amazonaws.macie2#ListManagedDataIdentifiers": { + "type": "operation", + "input": { + "target": "com.amazonaws.macie2#ListManagedDataIdentifiersRequest" + }, + "output": { + "target": "com.amazonaws.macie2#ListManagedDataIdentifiersResponse" + }, + "traits": { + "smithy.api#documentation": "Retrieves information about all the managed data identifiers that Amazon Macie currently provides.
", + "smithy.api#http": { + "method": "POST", + "uri": "/managed-data-identifiers/list", + "code": 200 + } + } + }, + "com.amazonaws.macie2#ListManagedDataIdentifiersRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.macie2#__string", + "traits": { + "smithy.api#documentation": "The nextToken string that specifies which page of results to return in a paginated response.
", + "smithy.api#jsonName": "nextToken" + } + } + } + }, + "com.amazonaws.macie2#ListManagedDataIdentifiersResponse": { + "type": "structure", + "members": { + "items": { + "target": "com.amazonaws.macie2#__listOfManagedDataIdentifierSummary", + "traits": { + "smithy.api#documentation": "An array of objects, one for each managed data identifier.
", + "smithy.api#jsonName": "items" + } + }, + "nextToken": { + "target": "com.amazonaws.macie2#__string", + "traits": { + "smithy.api#documentation": "The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.
", + "smithy.api#jsonName": "nextToken" + } + } + } + }, "com.amazonaws.macie2#ListMembers": { "type": "operation", "input": { @@ -6408,6 +6484,9 @@ { "target": "com.amazonaws.macie2#ListInvitations" }, + { + "target": "com.amazonaws.macie2#ListManagedDataIdentifiers" + }, { "target": "com.amazonaws.macie2#ListMembers" }, @@ -6483,6 +6562,52 @@ ] } }, + "com.amazonaws.macie2#ManagedDataIdentifierSelector": { + "type": "string", + "traits": { + "smithy.api#documentation": "The selection type that determines which managed data identifiers a classification job uses to analyze data. Valid values are:
", + "smithy.api#enum": [ + { + "value": "ALL", + "name": "ALL" + }, + { + "value": "EXCLUDE", + "name": "EXCLUDE" + }, + { + "value": "INCLUDE", + "name": "INCLUDE" + }, + { + "value": "NONE", + "name": "NONE" + } + ] + } + }, + "com.amazonaws.macie2#ManagedDataIdentifierSummary": { + "type": "structure", + "members": { + "category": { + "target": "com.amazonaws.macie2#SensitiveDataItemCategory", + "traits": { + "smithy.api#documentation": "The category of sensitive data that the managed data identifier detects: CREDENTIALS, for credentials data such as private keys or Amazon Web Services secret keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as passport numbers.
", + "smithy.api#jsonName": "category" + } + }, + "id": { + "target": "com.amazonaws.macie2#__string", + "traits": { + "smithy.api#documentation": "The unique identifier for the managed data identifier. This is a string that describes the type of sensitive data that the managed data identifier detects. For example: OPENSSH_PRIVATE_KEY for OpenSSH private keys, CREDIT_CARD_NUMBER for credit card numbers, or USA_PASSPORT_NUMBER for US passport numbers.
", + "smithy.api#jsonName": "id" + } + } + }, + "traits": { + "smithy.api#documentation": "Provides information about a managed data identifier. For additional information, see Using managed data identifiers in the Amazon Macie User Guide.
" + } + }, "com.amazonaws.macie2#MatchingBucket": { "type": "structure", "members": { @@ -6790,7 +6915,7 @@ } }, "traits": { - "smithy.api#documentation": "Specifies the location of 1-15 occurrences of sensitive data that was detected by managed data identifiers or a custom data identifier and produced a sensitive data finding.
" + "smithy.api#documentation": "Specifies the location of 1-15 occurrences of sensitive data that was detected by a managed data identifier or a custom data identifier and produced a sensitive data finding.
" } }, "com.amazonaws.macie2#OrderBy": { @@ -7217,7 +7342,7 @@ "owner": { "target": "com.amazonaws.macie2#S3BucketOwner", "traits": { - "smithy.api#documentation": "The display name and Amazon Web Services account ID for the user who owns the bucket.
", + "smithy.api#documentation": "The display name and canonical user ID for the Amazon Web Services account that owns the bucket.
", "smithy.api#jsonName": "owner" } }, @@ -7292,20 +7417,20 @@ "displayName": { "target": "com.amazonaws.macie2#__string", "traits": { - "smithy.api#documentation": "The display name of the user who owns the bucket.
", + "smithy.api#documentation": "The display name of the account that owns the bucket.
", "smithy.api#jsonName": "displayName" } }, "id": { "target": "com.amazonaws.macie2#__string", "traits": { - "smithy.api#documentation": "The Amazon Web Services account ID for the user who owns the bucket.
", + "smithy.api#documentation": "The canonical user ID for the account that owns the bucket.
", "smithy.api#jsonName": "id" } } }, "traits": { - "smithy.api#documentation": "Provides information about the user who owns an S3 bucket.
" + "smithy.api#documentation": "Provides information about the Amazon Web Services account that owns an S3 bucket.
" } }, "com.amazonaws.macie2#S3Destination": { @@ -7861,7 +7986,7 @@ "category": { "target": "com.amazonaws.macie2#SensitiveDataItemCategory", "traits": { - "smithy.api#documentation": "The category of sensitive data that was detected. For example: CREDENTIALS, for credentials data such as private keys or Amazon Web Services secret keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as driver's license identification numbers.
", + "smithy.api#documentation": "The category of sensitive data that was detected. For example: CREDENTIALS, for credentials data such as private keys or Amazon Web Services secret keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as passport numbers.
", "smithy.api#jsonName": "category" } }, @@ -7887,7 +8012,7 @@ "com.amazonaws.macie2#SensitiveDataItemCategory": { "type": "string", "traits": { - "smithy.api#documentation": "The category of sensitive data that was detected and produced the finding. Possible values are:
", + "smithy.api#documentation": "For a finding, the category of sensitive data that was detected and produced the finding. For a managed data identifier, the category of sensitive data that the managed data identifier detects. Possible values are:
", "smithy.api#enum": [ { "value": "FINANCIAL_INFORMATION", @@ -8504,21 +8629,21 @@ "ignoreWords": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 characters. Ignore words are case sensitive.
", + "smithy.api#documentation": "An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 UTF-8 characters. Ignore words are case sensitive.
", "smithy.api#jsonName": "ignoreWords" } }, "keywords": { "target": "com.amazonaws.macie2#__listOf__string", "traits": { - "smithy.api#documentation": "An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 characters. Keywords aren't case sensitive.
", + "smithy.api#documentation": "An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 UTF-8 characters. Keywords aren't case sensitive.
", "smithy.api#jsonName": "keywords" } }, "maximumMatchDistance": { "target": "com.amazonaws.macie2#__integer", "traits": { - "smithy.api#documentation": "The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.
", + "smithy.api#documentation": "The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.
", "smithy.api#jsonName": "maximumMatchDistance" } }, @@ -9568,6 +9693,12 @@ "target": "com.amazonaws.macie2#ListJobsFilterTerm" } }, + "com.amazonaws.macie2#__listOfManagedDataIdentifierSummary": { + "type": "list", + "member": { + "target": "com.amazonaws.macie2#ManagedDataIdentifierSummary" + } + }, "com.amazonaws.macie2#__listOfMatchingResource": { "type": "list", "member": { diff --git a/codegen/sdk-codegen/aws-models/opensearch.2021-01-01.json b/codegen/sdk-codegen/aws-models/opensearch.2021-01-01.json index 98dee42f15a9..1c34bd53cd18 100644 --- a/codegen/sdk-codegen/aws-models/opensearch.2021-01-01.json +++ b/codegen/sdk-codegen/aws-models/opensearch.2021-01-01.json @@ -1010,7 +1010,10 @@ } }, "ColdStorageOptions": { - "target": "com.amazonaws.opensearch#ColdStorageOptions" + "target": "com.amazonaws.opensearch#ColdStorageOptions", + "traits": { + "smithy.api#documentation": "Specifies the ColdStorageOptions
config for a Domain
Enable cold storage option. Accepted values true or false
", "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#documentation": "Specifies the configuration for cold storage options such as enabled
" } }, "com.amazonaws.opensearch#CommitMessage": { @@ -2859,6 +2866,12 @@ "traits": { "smithy.api#documentation": "The DomainName
.\n
Specifies the EngineType
of the domain.
Optional parameter to filter the output by domain engine type. Acceptable values are 'Elasticsearch' and 'OpenSearch'.
", + "smithy.api#httpQuery": "engineType" + } + } + }, + "traits": { + "smithy.api#documentation": " Container for the parameters to the ListDomainNames
operation.
List of domain names.
" + "smithy.api#documentation": "List of domain names and respective engine types.
" } } }, "traits": { - "smithy.api#documentation": "The result of a ListDomainNames
operation. Contains the names of all domains owned by\n this account.\n
The result of a ListDomainNames
operation. Contains the names of all domains owned by this account and their respective engine types.
Specifies address-based configuration settings for a message that's sent directly to an endpoint.
" } }, + "com.amazonaws.pinpoint#Alignment": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "LEFT", + "name": "LEFT" + }, + { + "value": "CENTER", + "name": "CENTER" + }, + { + "value": "RIGHT", + "name": "RIGHT" + } + ] + } + }, "com.amazonaws.pinpoint#AndroidPushNotificationTemplate": { "type": "structure", "members": { @@ -1708,6 +1727,25 @@ "smithy.api#documentation": "Provides the results of a query that retrieved the data for a standard metric that applies to an application, campaign, or journey.
" } }, + "com.amazonaws.pinpoint#ButtonAction": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "LINK", + "name": "LINK" + }, + { + "value": "DEEP_LINK", + "name": "DEEP_LINK" + }, + { + "value": "CLOSE", + "name": "CLOSE" + } + ] + } + }, "com.amazonaws.pinpoint#CampaignCustomMessage": { "type": "structure", "members": { @@ -1858,6 +1896,38 @@ "smithy.api#documentation": "Specifies settings for invoking an AWS Lambda function that customizes a segment for a campaign.
" } }, + "com.amazonaws.pinpoint#CampaignInAppMessage": { + "type": "structure", + "members": { + "Body": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The message body of the notification, the email body or the text message.
" + } + }, + "Content": { + "target": "com.amazonaws.pinpoint#ListOfInAppMessageContent", + "traits": { + "smithy.api#documentation": "In-app message content.
" + } + }, + "CustomConfig": { + "target": "com.amazonaws.pinpoint#MapOf__string", + "traits": { + "smithy.api#documentation": "Custom config to be sent to client.
" + } + }, + "Layout": { + "target": "com.amazonaws.pinpoint#Layout", + "traits": { + "smithy.api#documentation": "In-app message layout.
" + } + } + }, + "traits": { + "smithy.api#documentation": "In-app message configuration.
" + } + }, "com.amazonaws.pinpoint#CampaignLimits": { "type": "structure", "members": { @@ -1884,6 +1954,12 @@ "traits": { "smithy.api#documentation": "The maximum number of messages that a campaign can send to a single endpoint during the course of the campaign. If a campaign recurs, this setting applies to all runs of the campaign. The maximum value is 100.
" } + }, + "Session": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "The maximum total number of messages that the campaign can send per user session.
" + } } }, "traits": { @@ -2044,6 +2120,12 @@ "traits": { "smithy.api#documentation": "The version number of the campaign.
" } + }, + "Priority": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "Defines the priority of the campaign, used to decide the order of messages displayed to user if there are multiple messages scheduled to be displayed at the same moment.
" + } } }, "traits": { @@ -2277,6 +2359,10 @@ { "value": "CUSTOM", "name": "CUSTOM" + }, + { + "value": "IN_APP", + "name": "IN_APP" } ] } @@ -2738,6 +2824,72 @@ } } }, + "com.amazonaws.pinpoint#CreateInAppTemplate": { + "type": "operation", + "input": { + "target": "com.amazonaws.pinpoint#CreateInAppTemplateRequest" + }, + "output": { + "target": "com.amazonaws.pinpoint#CreateInAppTemplateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pinpoint#BadRequestException" + }, + { + "target": "com.amazonaws.pinpoint#ForbiddenException" + }, + { + "target": "com.amazonaws.pinpoint#InternalServerErrorException" + }, + { + "target": "com.amazonaws.pinpoint#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.pinpoint#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "Creates a new message template for messages using the in-app message channel.
", + "smithy.api#http": { + "method": "POST", + "uri": "/v1/templates/{TemplateName}/inapp", + "code": 201 + } + } + }, + "com.amazonaws.pinpoint#CreateInAppTemplateRequest": { + "type": "structure", + "members": { + "InAppTemplateRequest": { + "target": "com.amazonaws.pinpoint#InAppTemplateRequest", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + }, + "TemplateName": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.pinpoint#CreateInAppTemplateResponse": { + "type": "structure", + "members": { + "TemplateCreateMessageBody": { + "target": "com.amazonaws.pinpoint#TemplateCreateMessageBody", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + } + } + }, "com.amazonaws.pinpoint#CreateJourney": { "type": "operation", "input": { @@ -3299,6 +3451,52 @@ "smithy.api#documentation": "The settings for a custom message activity. This type of activity calls an AWS Lambda function or web hook that sends messages to participants.
" } }, + "com.amazonaws.pinpoint#DefaultButtonConfiguration": { + "type": "structure", + "members": { + "BackgroundColor": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The background color of the button.
" + } + }, + "BorderRadius": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "The border radius of the button.
" + } + }, + "ButtonAction": { + "target": "com.amazonaws.pinpoint#ButtonAction", + "traits": { + "smithy.api#documentation": "Action triggered by the button.
", + "smithy.api#required": {} + } + }, + "Link": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "Button destination.
" + } + }, + "Text": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "Button text.
", + "smithy.api#required": {} + } + }, + "TextColor": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The text color of the button.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Default button configuration.
" + } + }, "com.amazonaws.pinpoint#DefaultMessage": { "type": "structure", "members": { @@ -4275,6 +4473,78 @@ } } }, + "com.amazonaws.pinpoint#DeleteInAppTemplate": { + "type": "operation", + "input": { + "target": "com.amazonaws.pinpoint#DeleteInAppTemplateRequest" + }, + "output": { + "target": "com.amazonaws.pinpoint#DeleteInAppTemplateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pinpoint#BadRequestException" + }, + { + "target": "com.amazonaws.pinpoint#ForbiddenException" + }, + { + "target": "com.amazonaws.pinpoint#InternalServerErrorException" + }, + { + "target": "com.amazonaws.pinpoint#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.pinpoint#NotFoundException" + }, + { + "target": "com.amazonaws.pinpoint#PayloadTooLargeException" + }, + { + "target": "com.amazonaws.pinpoint#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "Deletes a message template for messages sent using the in-app message channel.
", + "smithy.api#http": { + "method": "DELETE", + "uri": "/v1/templates/{TemplateName}/inapp", + "code": 202 + } + } + }, + "com.amazonaws.pinpoint#DeleteInAppTemplateRequest": { + "type": "structure", + "members": { + "TemplateName": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Version": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.
If specified, this value must match the identifier for an existing template version. If specified for an update operation, this value must match the identifier for the latest existing version of the template. This restriction helps ensure that race conditions don't occur.
If you don't specify a value for this parameter, Amazon Pinpoint does the following:
For a get operation, retrieves information about the active version of the template.
For an update operation, saves the updates to (overwrites) the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.
For a delete operation, deletes the template, including all versions of the template.
Retrieves information about the status, configuration, and other settings for a journey.
", + "smithy.api#documentation": "Retrieves the in-app messages targeted for the provided endpoint ID.
", "smithy.api#http": { "method": "GET", - "uri": "/v1/apps/{ApplicationId}/journeys/{JourneyId}", + "uri": "/v1/apps/{ApplicationId}/endpoints/{EndpointId}/inappmessages", "code": 200 } } }, - "com.amazonaws.pinpoint#GetJourneyDateRangeKpi": { + "com.amazonaws.pinpoint#GetInAppMessagesRequest": { + "type": "structure", + "members": { + "ApplicationId": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "EndpointId": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The unique identifier for the endpoint.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.pinpoint#GetInAppMessagesResponse": { + "type": "structure", + "members": { + "InAppMessagesResponse": { + "target": "com.amazonaws.pinpoint#InAppMessagesResponse", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.pinpoint#GetInAppTemplate": { "type": "operation", "input": { - "target": "com.amazonaws.pinpoint#GetJourneyDateRangeKpiRequest" + "target": "com.amazonaws.pinpoint#GetInAppTemplateRequest" }, "output": { - "target": "com.amazonaws.pinpoint#GetJourneyDateRangeKpiResponse" + "target": "com.amazonaws.pinpoint#GetInAppTemplateResponse" }, "errors": [ { @@ -8682,21 +8989,133 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves (queries) pre-aggregated data for a standard engagement metric that applies to a journey.
", + "smithy.api#documentation": "Retrieves the content and settings of a message template for messages sent through the in-app channel.
", "smithy.api#http": { "method": "GET", - "uri": "/v1/apps/{ApplicationId}/journeys/{JourneyId}/kpis/daterange/{KpiName}", + "uri": "/v1/templates/{TemplateName}/inapp", "code": 200 } } }, - "com.amazonaws.pinpoint#GetJourneyDateRangeKpiRequest": { + "com.amazonaws.pinpoint#GetInAppTemplateRequest": { "type": "structure", "members": { - "ApplicationId": { + "TemplateName": { "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
", + "smithy.api#documentation": "The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Version": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.
If specified, this value must match the identifier for an existing template version. If specified for an update operation, this value must match the identifier for the latest existing version of the template. This restriction helps ensure that race conditions don't occur.
If you don't specify a value for this parameter, Amazon Pinpoint does the following:
For a get operation, retrieves information about the active version of the template.
For an update operation, saves the updates to (overwrites) the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.
For a delete operation, deletes the template, including all versions of the template.
Retrieves information about the status, configuration, and other settings for a journey.
", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/apps/{ApplicationId}/journeys/{JourneyId}", + "code": 200 + } + } + }, + "com.amazonaws.pinpoint#GetJourneyDateRangeKpi": { + "type": "operation", + "input": { + "target": "com.amazonaws.pinpoint#GetJourneyDateRangeKpiRequest" + }, + "output": { + "target": "com.amazonaws.pinpoint#GetJourneyDateRangeKpiResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pinpoint#BadRequestException" + }, + { + "target": "com.amazonaws.pinpoint#ForbiddenException" + }, + { + "target": "com.amazonaws.pinpoint#InternalServerErrorException" + }, + { + "target": "com.amazonaws.pinpoint#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.pinpoint#NotFoundException" + }, + { + "target": "com.amazonaws.pinpoint#PayloadTooLargeException" + }, + { + "target": "com.amazonaws.pinpoint#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "Retrieves (queries) pre-aggregated data for a standard engagement metric that applies to a journey.
", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/apps/{ApplicationId}/journeys/{JourneyId}/kpis/daterange/{KpiName}", + "code": 200 + } + } + }, + "com.amazonaws.pinpoint#GetJourneyDateRangeKpiRequest": { + "type": "structure", + "members": { + "ApplicationId": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -10159,120 +10578,494 @@ } }, "traits": { - "smithy.api#documentation": "Provides information about the resource settings for a job that imports endpoint definitions from one or more files. The files can be stored in an Amazon Simple Storage Service (Amazon S3) bucket or uploaded directly from a computer by using the Amazon Pinpoint console.
" + "smithy.api#documentation": "Provides information about the resource settings for a job that imports endpoint definitions from one or more files. The files can be stored in an Amazon Simple Storage Service (Amazon S3) bucket or uploaded directly from a computer by using the Amazon Pinpoint console.
" + } + }, + "com.amazonaws.pinpoint#ImportJobResponse": { + "type": "structure", + "members": { + "ApplicationId": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The unique identifier for the application that's associated with the import job.
", + "smithy.api#required": {} + } + }, + "CompletedPieces": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "The number of pieces that were processed successfully (completed) by the import job, as of the time of the request.
" + } + }, + "CompletionDate": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The date, in ISO 8601 format, when the import job was completed.
" + } + }, + "CreationDate": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The date, in ISO 8601 format, when the import job was created.
", + "smithy.api#required": {} + } + }, + "Definition": { + "target": "com.amazonaws.pinpoint#ImportJobResource", + "traits": { + "smithy.api#documentation": "The resource settings that apply to the import job.
", + "smithy.api#required": {} + } + }, + "FailedPieces": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "The number of pieces that weren't processed successfully (failed) by the import job, as of the time of the request.
" + } + }, + "Failures": { + "target": "com.amazonaws.pinpoint#ListOf__string", + "traits": { + "smithy.api#documentation": "An array of entries, one for each of the first 100 entries that weren't processed successfully (failed) by the import job, if any.
" + } + }, + "Id": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The unique identifier for the import job.
", + "smithy.api#required": {} + } + }, + "JobStatus": { + "target": "com.amazonaws.pinpoint#JobStatus", + "traits": { + "smithy.api#documentation": "The status of the import job. The job status is FAILED if Amazon Pinpoint wasn't able to process one or more pieces in the job.
", + "smithy.api#required": {} + } + }, + "TotalFailures": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "The total number of endpoint definitions that weren't processed successfully (failed) by the import job, typically because an error, such as a syntax error, occurred.
" + } + }, + "TotalPieces": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "The total number of pieces that must be processed to complete the import job. Each piece consists of an approximately equal portion of the endpoint definitions that are part of the import job.
" + } + }, + "TotalProcessed": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "The total number of endpoint definitions that were processed by the import job.
" + } + }, + "Type": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The job type. This value is IMPORT for import jobs.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Provides information about the status and settings of a job that imports endpoint definitions from one or more files. The files can be stored in an Amazon Simple Storage Service (Amazon S3) bucket or uploaded directly from a computer by using the Amazon Pinpoint console.
" + } + }, + "com.amazonaws.pinpoint#ImportJobsResponse": { + "type": "structure", + "members": { + "Item": { + "target": "com.amazonaws.pinpoint#ListOfImportJobResponse", + "traits": { + "smithy.api#documentation": "An array of responses, one for each import job that's associated with the application (Import Jobs resource) or segment (Segment Import Jobs resource).
", + "smithy.api#required": {} + } + }, + "NextToken": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Provides information about the status and settings of all the import jobs that are associated with an application or segment. An import job is a job that imports endpoint definitions from one or more files.
" + } + }, + "com.amazonaws.pinpoint#InAppCampaignSchedule": { + "type": "structure", + "members": { + "EndDate": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The scheduled time after which the in-app message should not be shown. Timestamp is in ISO 8601 format.
" + } + }, + "EventFilter": { + "target": "com.amazonaws.pinpoint#CampaignEventFilter", + "traits": { + "smithy.api#documentation": "The event filter the SDK has to use to show the in-app message in the application.
" + } + }, + "QuietTime": { + "target": "com.amazonaws.pinpoint#QuietTime", + "traits": { + "smithy.api#documentation": "Time during which the in-app message should not be shown to the user.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Schedule of the campaign.
" + } + }, + "com.amazonaws.pinpoint#InAppMessage": { + "type": "structure", + "members": { + "Content": { + "target": "com.amazonaws.pinpoint#ListOfInAppMessageContent", + "traits": { + "smithy.api#documentation": "In-app message content.
" + } + }, + "CustomConfig": { + "target": "com.amazonaws.pinpoint#MapOf__string", + "traits": { + "smithy.api#documentation": "Custom config to be sent to SDK.
" + } + }, + "Layout": { + "target": "com.amazonaws.pinpoint#Layout", + "traits": { + "smithy.api#documentation": "The layout of the message.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Provides all fields required for building an in-app message.
" + } + }, + "com.amazonaws.pinpoint#InAppMessageBodyConfig": { + "type": "structure", + "members": { + "Alignment": { + "target": "com.amazonaws.pinpoint#Alignment", + "traits": { + "smithy.api#documentation": "The alignment of the text. Valid values: LEFT, CENTER, RIGHT.
", + "smithy.api#required": {} + } + }, + "Body": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "Message Body.
", + "smithy.api#required": {} + } + }, + "TextColor": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The text color.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Text config for Message Body.
" + } + }, + "com.amazonaws.pinpoint#InAppMessageButton": { + "type": "structure", + "members": { + "Android": { + "target": "com.amazonaws.pinpoint#OverrideButtonConfiguration", + "traits": { + "smithy.api#documentation": "Default button content.
" + } + }, + "DefaultConfig": { + "target": "com.amazonaws.pinpoint#DefaultButtonConfiguration", + "traits": { + "smithy.api#documentation": "Default button content.
" + } + }, + "IOS": { + "target": "com.amazonaws.pinpoint#OverrideButtonConfiguration", + "traits": { + "smithy.api#documentation": "Default button content.
" + } + }, + "Web": { + "target": "com.amazonaws.pinpoint#OverrideButtonConfiguration", + "traits": { + "smithy.api#documentation": "Default button content.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Button Config for an in-app message.
" + } + }, + "com.amazonaws.pinpoint#InAppMessageCampaign": { + "type": "structure", + "members": { + "CampaignId": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "Campaign id of the corresponding campaign.
" + } + }, + "DailyCap": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "Daily cap which controls the number of times any in-app messages can be shown to the endpoint during a day.
" + } + }, + "InAppMessage": { + "target": "com.amazonaws.pinpoint#InAppMessage", + "traits": { + "smithy.api#documentation": "In-app message content with all fields required for rendering an in-app message.
" + } + }, + "Priority": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "Priority of the in-app message.
" + } + }, + "Schedule": { + "target": "com.amazonaws.pinpoint#InAppCampaignSchedule", + "traits": { + "smithy.api#documentation": "Schedule of the campaign.
" + } + }, + "SessionCap": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "Session cap which controls the number of times an in-app message can be shown to the endpoint during an application session.
" + } + }, + "TotalCap": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "Total cap which controls the number of times an in-app message can be shown to the endpoint.
" + } + }, + "TreatmentId": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "Treatment id of the campaign.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Targeted in-app message campaign.
" + } + }, + "com.amazonaws.pinpoint#InAppMessageContent": { + "type": "structure", + "members": { + "BackgroundColor": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The background color for the message.
" + } + }, + "BodyConfig": { + "target": "com.amazonaws.pinpoint#InAppMessageBodyConfig", + "traits": { + "smithy.api#documentation": "The configuration for the message body.
" + } + }, + "HeaderConfig": { + "target": "com.amazonaws.pinpoint#InAppMessageHeaderConfig", + "traits": { + "smithy.api#documentation": "The configuration for the message header.
" + } + }, + "ImageUrl": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The image url for the background of message.
" + } + }, + "PrimaryBtn": { + "target": "com.amazonaws.pinpoint#InAppMessageButton", + "traits": { + "smithy.api#documentation": "The first button inside the message.
" + } + }, + "SecondaryBtn": { + "target": "com.amazonaws.pinpoint#InAppMessageButton", + "traits": { + "smithy.api#documentation": "The second button inside message.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The configuration for the message content.
" + } + }, + "com.amazonaws.pinpoint#InAppMessageHeaderConfig": { + "type": "structure", + "members": { + "Alignment": { + "target": "com.amazonaws.pinpoint#Alignment", + "traits": { + "smithy.api#documentation": "The alignment of the text. Valid values: LEFT, CENTER, RIGHT.
", + "smithy.api#required": {} + } + }, + "Header": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "Message Header.
", + "smithy.api#required": {} + } + }, + "TextColor": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The text color.
", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "Text config for Message Header.
" + } + }, + "com.amazonaws.pinpoint#InAppMessagesResponse": { + "type": "structure", + "members": { + "InAppMessageCampaigns": { + "target": "com.amazonaws.pinpoint#ListOfInAppMessageCampaign", + "traits": { + "smithy.api#documentation": "List of targeted in-app message campaigns.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Get in-app messages response object.
" } }, - "com.amazonaws.pinpoint#ImportJobResponse": { + "com.amazonaws.pinpoint#InAppTemplateRequest": { "type": "structure", "members": { - "ApplicationId": { - "target": "com.amazonaws.pinpoint#__string", + "Content": { + "target": "com.amazonaws.pinpoint#ListOfInAppMessageContent", "traits": { - "smithy.api#documentation": "The unique identifier for the application that's associated with the import job.
", - "smithy.api#required": {} + "smithy.api#documentation": "The content of the message, can include up to 5 modals. Each modal must contain a message, a header, and background color. ImageUrl and buttons are optional.
" } }, - "CompletedPieces": { - "target": "com.amazonaws.pinpoint#__integer", + "CustomConfig": { + "target": "com.amazonaws.pinpoint#MapOf__string", "traits": { - "smithy.api#documentation": "The number of pieces that were processed successfully (completed) by the import job, as of the time of the request.
" + "smithy.api#documentation": "Custom config to be sent to client.
" } }, - "CompletionDate": { - "target": "com.amazonaws.pinpoint#__string", + "Layout": { + "target": "com.amazonaws.pinpoint#Layout", "traits": { - "smithy.api#documentation": "The date, in ISO 8601 format, when the import job was completed.
" + "smithy.api#documentation": "The layout of the message.
" } }, - "CreationDate": { - "target": "com.amazonaws.pinpoint#__string", + "tags": { + "target": "com.amazonaws.pinpoint#MapOf__string", "traits": { - "smithy.api#documentation": "The date, in ISO 8601 format, when the import job was created.
", - "smithy.api#required": {} + "smithy.api#documentation": "A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.
", + "smithy.api#jsonName": "tags" } }, - "Definition": { - "target": "com.amazonaws.pinpoint#ImportJobResource", + "TemplateDescription": { + "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "The resource settings that apply to the import job.
", - "smithy.api#required": {} + "smithy.api#documentation": "The description of the template.
" } - }, - "FailedPieces": { - "target": "com.amazonaws.pinpoint#__integer", + } + }, + "traits": { + "smithy.api#documentation": "InApp Template Request.
" + } + }, + "com.amazonaws.pinpoint#InAppTemplateResponse": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "The number of pieces that weren't processed successfully (failed) by the import job, as of the time of the request.
" + "smithy.api#documentation": "The resource arn of the template.
" } }, - "Failures": { - "target": "com.amazonaws.pinpoint#ListOf__string", + "Content": { + "target": "com.amazonaws.pinpoint#ListOfInAppMessageContent", "traits": { - "smithy.api#documentation": "An array of entries, one for each of the first 100 entries that weren't processed successfully (failed) by the import job, if any.
" + "smithy.api#documentation": "The content of the message, can include up to 5 modals. Each modal must contain a message, a header, and background color. ImageUrl and buttons are optional.
" } }, - "Id": { + "CreationDate": { "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "The unique identifier for the import job.
", + "smithy.api#documentation": "The creation date of the template.
", "smithy.api#required": {} } }, - "JobStatus": { - "target": "com.amazonaws.pinpoint#JobStatus", + "CustomConfig": { + "target": "com.amazonaws.pinpoint#MapOf__string", "traits": { - "smithy.api#documentation": "The status of the import job. The job status is FAILED if Amazon Pinpoint wasn't able to process one or more pieces in the job.
", + "smithy.api#documentation": "Custom config to be sent to client.
" + } + }, + "LastModifiedDate": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The last modified date of the template.
", "smithy.api#required": {} } }, - "TotalFailures": { - "target": "com.amazonaws.pinpoint#__integer", + "Layout": { + "target": "com.amazonaws.pinpoint#Layout", "traits": { - "smithy.api#documentation": "The total number of endpoint definitions that weren't processed successfully (failed) by the import job, typically because an error, such as a syntax error, occurred.
" + "smithy.api#documentation": "The layout of the message.
" } }, - "TotalPieces": { - "target": "com.amazonaws.pinpoint#__integer", + "tags": { + "target": "com.amazonaws.pinpoint#MapOf__string", "traits": { - "smithy.api#documentation": "The total number of pieces that must be processed to complete the import job. Each piece consists of an approximately equal portion of the endpoint definitions that are part of the import job.
" + "smithy.api#documentation": "A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.
", + "smithy.api#jsonName": "tags" } }, - "TotalProcessed": { - "target": "com.amazonaws.pinpoint#__integer", + "TemplateDescription": { + "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "The total number of endpoint definitions that were processed by the import job.
" + "smithy.api#documentation": "The description of the template.
" } }, - "Type": { + "TemplateName": { "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "The job type. This value is IMPORT for import jobs.
", + "smithy.api#documentation": "The name of the template.
", "smithy.api#required": {} } - } - }, - "traits": { - "smithy.api#documentation": "Provides information about the status and settings of a job that imports endpoint definitions from one or more files. The files can be stored in an Amazon Simple Storage Service (Amazon S3) bucket or uploaded directly from a computer by using the Amazon Pinpoint console.
" - } - }, - "com.amazonaws.pinpoint#ImportJobsResponse": { - "type": "structure", - "members": { - "Item": { - "target": "com.amazonaws.pinpoint#ListOfImportJobResponse", + }, + "TemplateType": { + "target": "com.amazonaws.pinpoint#TemplateType", "traits": { - "smithy.api#documentation": "An array of responses, one for each import job that's associated with the application (Import Jobs resource) or segment (Segment Import Jobs resource).
", + "smithy.api#documentation": "The type of the template.
", "smithy.api#required": {} } }, - "NextToken": { + "Version": { "target": "com.amazonaws.pinpoint#__string", "traits": { - "smithy.api#documentation": "The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.
" + "smithy.api#documentation": "The version id of the template.
" } } }, "traits": { - "smithy.api#documentation": "Provides information about the status and settings of all the import jobs that are associated with an application or segment. An import job is a job that imports endpoint definitions from one or more files.
" + "smithy.api#documentation": "In-App Template Response.
" } }, "com.amazonaws.pinpoint#Include": { @@ -10808,6 +11601,37 @@ "smithy.api#documentation": "Provides information about the status, configuration, and other settings for all the journeys that are associated with an application.
" } }, + "com.amazonaws.pinpoint#Layout": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "BOTTOM_BANNER", + "name": "BOTTOM_BANNER" + }, + { + "value": "TOP_BANNER", + "name": "TOP_BANNER" + }, + { + "value": "OVERLAYS", + "name": "OVERLAYS" + }, + { + "value": "MOBILE_FEED", + "name": "MOBILE_FEED" + }, + { + "value": "MIDDLE_BANNER", + "name": "MIDDLE_BANNER" + }, + { + "value": "CAROUSEL", + "name": "CAROUSEL" + } + ] + } + }, "com.amazonaws.pinpoint#ListJourneys": { "type": "operation", "input": { @@ -10929,6 +11753,18 @@ "target": "com.amazonaws.pinpoint#ImportJobResponse" } }, + "com.amazonaws.pinpoint#ListOfInAppMessageCampaign": { + "type": "list", + "member": { + "target": "com.amazonaws.pinpoint#InAppMessageCampaign" + } + }, + "com.amazonaws.pinpoint#ListOfInAppMessageContent": { + "type": "list", + "member": { + "target": "com.amazonaws.pinpoint#InAppMessageContent" + } + }, "com.amazonaws.pinpoint#ListOfJourneyResponse": { "type": "list", "member": { @@ -11564,6 +12400,12 @@ "traits": { "smithy.api#documentation": "The message that the campaign sends through the SMS channel. If specified, this message overrides the default message.
" } + }, + "InAppMessage": { + "target": "com.amazonaws.pinpoint#CampaignInAppMessage", + "traits": { + "smithy.api#documentation": "The in-app message configuration.
" + } } }, "traits": { @@ -11957,6 +12799,27 @@ ] } }, + "com.amazonaws.pinpoint#OverrideButtonConfiguration": { + "type": "structure", + "members": { + "ButtonAction": { + "target": "com.amazonaws.pinpoint#ButtonAction", + "traits": { + "smithy.api#documentation": "Action triggered by the button.
", + "smithy.api#required": {} + } + }, + "Link": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "Button destination.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Override button configuration.
" + } + }, "com.amazonaws.pinpoint#PayloadTooLargeException": { "type": "structure", "members": { @@ -12062,6 +12925,9 @@ { "target": "com.amazonaws.pinpoint#CreateImportJob" }, + { + "target": "com.amazonaws.pinpoint#CreateInAppTemplate" + }, { "target": "com.amazonaws.pinpoint#CreateJourney" }, @@ -12119,6 +12985,9 @@ { "target": "com.amazonaws.pinpoint#DeleteGcmChannel" }, + { + "target": "com.amazonaws.pinpoint#DeleteInAppTemplate" + }, { "target": "com.amazonaws.pinpoint#DeleteJourney" }, @@ -12224,6 +13093,12 @@ { "target": "com.amazonaws.pinpoint#GetImportJobs" }, + { + "target": "com.amazonaws.pinpoint#GetInAppMessages" + }, + { + "target": "com.amazonaws.pinpoint#GetInAppTemplate" + }, { "target": "com.amazonaws.pinpoint#GetJourney" }, @@ -12353,6 +13228,9 @@ { "target": "com.amazonaws.pinpoint#UpdateGcmChannel" }, + { + "target": "com.amazonaws.pinpoint#UpdateInAppTemplate" + }, { "target": "com.amazonaws.pinpoint#UpdateJourney" }, @@ -14428,6 +15306,32 @@ "smithy.api#documentation": "Specifies the message template to use for the message, for each type of channel.
" } }, + "com.amazonaws.pinpoint#TemplateCreateMessageBody": { + "type": "structure", + "members": { + "Arn": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the message template that was created.
" + } + }, + "Message": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The message that's returned from the API for the request to create the message template.
" + } + }, + "RequestID": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The unique identifier for the request to create the message template.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Provides information about a request to create a message template.
" + } + }, "com.amazonaws.pinpoint#TemplateResponse": { "type": "structure", "members": { @@ -14514,6 +15418,10 @@ { "value": "PUSH", "name": "PUSH" + }, + { + "value": "INAPP", + "name": "INAPP" } ] } @@ -15746,6 +16654,92 @@ } } }, + "com.amazonaws.pinpoint#UpdateInAppTemplate": { + "type": "operation", + "input": { + "target": "com.amazonaws.pinpoint#UpdateInAppTemplateRequest" + }, + "output": { + "target": "com.amazonaws.pinpoint#UpdateInAppTemplateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.pinpoint#BadRequestException" + }, + { + "target": "com.amazonaws.pinpoint#ForbiddenException" + }, + { + "target": "com.amazonaws.pinpoint#InternalServerErrorException" + }, + { + "target": "com.amazonaws.pinpoint#MethodNotAllowedException" + }, + { + "target": "com.amazonaws.pinpoint#NotFoundException" + }, + { + "target": "com.amazonaws.pinpoint#PayloadTooLargeException" + }, + { + "target": "com.amazonaws.pinpoint#TooManyRequestsException" + } + ], + "traits": { + "smithy.api#documentation": "Updates an existing message template for messages sent through the in-app message channel.
", + "smithy.api#http": { + "method": "PUT", + "uri": "/v1/templates/{TemplateName}/inapp", + "code": 202 + } + } + }, + "com.amazonaws.pinpoint#UpdateInAppTemplateRequest": { + "type": "structure", + "members": { + "CreateNewVersion": { + "target": "com.amazonaws.pinpoint#__boolean", + "traits": { + "smithy.api#documentation": "Specifies whether to save the updates as a new version of the message template. Valid values are: true, save the updates as a new version; and, false, save the updates to (overwrite) the latest existing version of the template.
If you don't specify a value for this parameter, Amazon Pinpoint saves the updates to (overwrites) the latest existing version of the template. If you specify a value of true for this parameter, don't specify a value for the version parameter. Otherwise, an error will occur.
", + "smithy.api#httpQuery": "create-new-version" + } + }, + "InAppTemplateRequest": { + "target": "com.amazonaws.pinpoint#InAppTemplateRequest", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#required": {} + } + }, + "TemplateName": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.
", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Version": { + "target": "com.amazonaws.pinpoint#__string", + "traits": { + "smithy.api#documentation": "The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.
If specified, this value must match the identifier for an existing template version. If specified for an update operation, this value must match the identifier for the latest existing version of the template. This restriction helps ensure that race conditions don't occur.
If you don't specify a value for this parameter, Amazon Pinpoint does the following:
For a get operation, retrieves information about the active version of the template.
For an update operation, saves the updates to (overwrites) the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.
For a delete operation, deletes the template, including all versions of the template.
A custom name of the default treatment for the campaign, if the campaign has multiple treatments. A treatment is a variation of a campaign that's used for A/B testing.
" } + }, + "Priority": { + "target": "com.amazonaws.pinpoint#__integer", + "traits": { + "smithy.api#documentation": "Defines the priority of the campaign, used to decide the order of messages displayed to user if there are multiple messages scheduled to be displayed at the same moment.
" + } } }, "traits": { @@ -17289,6 +18289,10 @@ { "value": "CUSTOM", "name": "CUSTOM" + }, + { + "value": "IN_APP", + "name": "IN_APP" } ] } diff --git a/codegen/sdk-codegen/aws-models/robomaker.2018-06-29.json b/codegen/sdk-codegen/aws-models/robomaker.2018-06-29.json index 273cdfd7c3ff..b2250d89a9a5 100644 --- a/codegen/sdk-codegen/aws-models/robomaker.2018-06-29.json +++ b/codegen/sdk-codegen/aws-models/robomaker.2018-06-29.json @@ -812,8 +812,7 @@ "sources": { "target": "com.amazonaws.robomaker#SourceConfigs", "traits": { - "smithy.api#documentation": "The sources of the robot application.
", - "smithy.api#required": {} + "smithy.api#documentation": "The sources of the robot application.
" } }, "robotSoftwareSuite": { @@ -828,6 +827,12 @@ "traits": { "smithy.api#documentation": "A map that contains tag keys and tag values that are attached to the robot\n application.
" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "The object that contains that URI of the Docker image that you use for your robot application.
" + } } } }, @@ -881,6 +886,12 @@ "traits": { "smithy.api#documentation": "The list of all tags added to the robot application.
" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "An object that contains the Docker image URI used to a create your robot application.
" + } } } }, @@ -933,6 +944,18 @@ "traits": { "smithy.api#documentation": "The current revision id for the robot application. If you provide a value and it matches\n the latest revision ID, a new version will be created.
" } + }, + "s3Etags": { + "target": "com.amazonaws.robomaker#S3Etags", + "traits": { + "smithy.api#documentation": "The Amazon S3 identifier for the zip file bundle that you use for your robot application.
" + } + }, + "imageDigest": { + "target": "com.amazonaws.robomaker#ImageDigest", + "traits": { + "smithy.api#documentation": "A SHA256 identifier for the Docker image that you use for your robot application.
" + } } } }, @@ -980,6 +1003,12 @@ "traits": { "smithy.api#documentation": "The revision id of the robot application.
" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "The object that contains the Docker image URI used to create your robot application.
" + } } } }, @@ -1106,8 +1135,7 @@ "sources": { "target": "com.amazonaws.robomaker#SourceConfigs", "traits": { - "smithy.api#documentation": "The sources of the simulation application.
", - "smithy.api#required": {} + "smithy.api#documentation": "The sources of the simulation application.
" } }, "simulationSoftwareSuite": { @@ -1135,6 +1163,12 @@ "traits": { "smithy.api#documentation": "A map that contains tag keys and tag values that are attached to the simulation\n application.
" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "The object that contains the Docker image URI used to create your simulation application.
" + } } } }, @@ -1200,6 +1234,12 @@ "traits": { "smithy.api#documentation": "The list of all tags added to the simulation application.
" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "The object that contains the Docker image URI that you used to create your simulation application.
" + } } } }, @@ -1252,6 +1292,18 @@ "traits": { "smithy.api#documentation": "The current revision id for the simulation application. If you provide a value and it\n matches the latest revision ID, a new version will be created.
" } + }, + "s3Etags": { + "target": "com.amazonaws.robomaker#S3Etags", + "traits": { + "smithy.api#documentation": "The Amazon S3 eTag identifier for the zip file bundle that you use to create the simulation application.
" + } + }, + "imageDigest": { + "target": "com.amazonaws.robomaker#ImageDigest", + "traits": { + "smithy.api#documentation": "The SHA256 digest used to identify the Docker image URI used to created the simulation application.
" + } } } }, @@ -1311,6 +1363,12 @@ "traits": { "smithy.api#documentation": "The revision ID of the simulation application.
" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "The object that contains the Docker image URI used to create the simulation application.
" + } } } }, @@ -2977,6 +3035,18 @@ "traits": { "smithy.api#documentation": "The list of all tags added to the specified robot application.
" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "The object that contains the Docker image URI used to create the robot application.
" + } + }, + "imageDigest": { + "target": "com.amazonaws.robomaker#ImageDigest", + "traits": { + "smithy.api#documentation": "A SHA256 identifier for the Docker image that you use for your robot application.
" + } } } }, @@ -3168,6 +3238,18 @@ "traits": { "smithy.api#documentation": "The list of all tags added to the specified simulation application.
" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "The object that contains the Docker image URI used to create the simulation application.
" + } + }, + "imageDigest": { + "target": "com.amazonaws.robomaker#ImageDigest", + "traits": { + "smithy.api#documentation": "A SHA256 identifier for the Docker image that you use for your simulation application.
" + } } } }, @@ -3855,6 +3937,20 @@ } } }, + "com.amazonaws.robomaker#Environment": { + "type": "structure", + "members": { + "uri": { + "target": "com.amazonaws.robomaker#RepositoryUrl", + "traits": { + "smithy.api#documentation": "The Docker image URI for either your robot or simulation applications.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The object that contains the Docker image URI for either your robot or simulation applications.
" + } + }, "com.amazonaws.robomaker#EnvironmentVariableKey": { "type": "string", "traits": { @@ -4221,6 +4317,16 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.robomaker#ImageDigest": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 72 + }, + "smithy.api#pattern": "^[Ss][Hh][Aa]256:[0-9a-fA-F]{64}$" + } + }, "com.amazonaws.robomaker#Integer": { "type": "integer" }, @@ -5549,6 +5655,16 @@ "smithy.api#pattern": "^1.x$" } }, + "com.amazonaws.robomaker#RepositoryUrl": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^.+$" + } + }, "com.amazonaws.robomaker#ResourceAlreadyExistsException": { "type": "structure", "members": { @@ -6026,6 +6142,12 @@ "com.amazonaws.robomaker#S3Etag": { "type": "string" }, + "com.amazonaws.robomaker#S3Etags": { + "type": "list", + "member": { + "target": "com.amazonaws.robomaker#S3Etag" + } + }, "com.amazonaws.robomaker#S3Key": { "type": "string", "traits": { @@ -7560,8 +7682,7 @@ "sources": { "target": "com.amazonaws.robomaker#SourceConfigs", "traits": { - "smithy.api#documentation": "The sources of the robot application.
", - "smithy.api#required": {} + "smithy.api#documentation": "The sources of the robot application.
" } }, "robotSoftwareSuite": { @@ -7576,6 +7697,12 @@ "traits": { "smithy.api#documentation": "The revision id for the robot application.
" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "The object that contains the Docker image URI for your robot application.
" + } } } }, @@ -7623,6 +7750,12 @@ "traits": { "smithy.api#documentation": "The revision id of the robot application.
" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "The object that contains the Docker image URI for your robot application.
" + } } } }, @@ -7673,8 +7806,7 @@ "sources": { "target": "com.amazonaws.robomaker#SourceConfigs", "traits": { - "smithy.api#documentation": "The sources of the simulation application.
", - "smithy.api#required": {} + "smithy.api#documentation": "The sources of the simulation application.
" } }, "simulationSoftwareSuite": { @@ -7702,6 +7834,12 @@ "traits": { "smithy.api#documentation": "The revision id for the robot application.
" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "The object that contains the Docker image URI for your simulation application.
" + } } } }, @@ -7761,6 +7899,12 @@ "traits": { "smithy.api#documentation": "The revision id of the simulation application.
" } + }, + "environment": { + "target": "com.amazonaws.robomaker#Environment", + "traits": { + "smithy.api#documentation": "The object that contains the Docker image URI used for your simulation application.
" + } } } }, diff --git a/codegen/sdk-codegen/aws-models/s3.2006-03-01.json b/codegen/sdk-codegen/aws-models/s3.2006-03-01.json index a35258152bf0..45c3dcaccc0f 100644 --- a/codegen/sdk-codegen/aws-models/s3.2006-03-01.json +++ b/codegen/sdk-codegen/aws-models/s3.2006-03-01.json @@ -177,6 +177,9 @@ "smithy.api#documentation": "A container for information about access control for replicas.
" } }, + "com.amazonaws.s3#AccessPointArn": { + "type": "string" + }, "com.amazonaws.s3#AccountId": { "type": "string" }, @@ -1193,7 +1196,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "If you specified server-side encryption either with an Amazon S3-managed encryption key or an\n Amazon Web Services KMS customer master key (CMK) in your initiate multipart upload request, the response\n includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the\n object.
", + "smithy.api#documentation": "If you specified server-side encryption either with an Amazon S3-managed encryption key or an\n Amazon Web Services KMS key in your initiate multipart upload request, the response\n includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the\n object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -1207,7 +1210,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -1468,7 +1471,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -1926,6 +1929,14 @@ "smithy.api#required": {} } }, + "CreateBucketConfiguration": { + "target": "com.amazonaws.s3#CreateBucketConfiguration", + "traits": { + "smithy.api#documentation": "The configuration information for the bucket.
", + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "CreateBucketConfiguration" + } + }, "GrantFullControl": { "target": "com.amazonaws.s3#GrantFullControl", "traits": { @@ -1967,14 +1978,6 @@ "smithy.api#documentation": "Specifies whether you want S3 Object Lock to be enabled for the new bucket.
", "smithy.api#httpHeader": "x-amz-bucket-object-lock-enabled" } - }, - "CreateBucketConfiguration": { - "target": "com.amazonaws.s3#CreateBucketConfiguration", - "traits": { - "smithy.api#documentation": "The configuration information for the bucket.
", - "smithy.api#httpPayload": {}, - "smithy.api#xmlName": "CreateBucketConfiguration" - } } } }, @@ -1987,7 +1990,7 @@ "target": "com.amazonaws.s3#CreateMultipartUploadOutput" }, "traits": { - "smithy.api#documentation": "This action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request.
\n\nFor more information about multipart uploads, see Multipart Upload Overview.
\n\nIf you have configured a lifecycle rule to abort incomplete multipart uploads, the\n upload must complete within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting\n Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
\n\nFor information about the permissions required to use the multipart upload API, see\n Multipart Upload and\n Permissions.
\n\nFor request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating\n Requests (Amazon Web Services Signature Version 4).
\n\nAfter you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stop charging you for\n storing them only after you either complete or abort a multipart upload.
\nYou can optionally request server-side encryption. For server-side encryption, Amazon S3\n encrypts your data as it writes it to disks in its data centers and decrypts it when you\n access it. You can provide your own encryption key, or use Amazon Web Services Key Management Service (Amazon Web Services\n KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide\n your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to\n initiate the upload by using CreateMultipartUpload
.
To perform a multipart upload with encryption using an Amazon Web Services KMS CMK, the requester must\n have permission to the kms:Decrypt
and kms:GenerateDataKey*
\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions in the Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account\n as the Amazon Web Services KMS CMK, then you must have these permissions on the key policy. If your IAM\n user or role belongs to a different account than the key, then you must have the\n permissions on both the key policy and your IAM user or role.
\n\n\nFor more information, see Protecting\n Data Using Server-Side Encryption.
\n\nWhen copying an object, you can optionally specify the accounts or groups that\n should be granted specific permissions on the new object. There are two ways to\n grant the permissions using the request headers:
\nSpecify a canned ACL with the x-amz-acl
request header. For\n more information, see Canned ACL.
Specify access permissions explicitly with the\n x-amz-grant-read
, x-amz-grant-read-acp
,\n x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. These parameters map to\n the set of permissions that Amazon S3 supports in an ACL. For more information,\n see Access Control List (ACL)\n Overview.
You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.
\nYou can optionally tell Amazon S3 to encrypt data at rest using server-side\n encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts\n your data as it writes it to disks in its data centers and decrypts it when you\n access it. The option you use depends on whether you want to use Amazon Web Services managed\n encryption keys or provide your own encryption key.
\nUse encryption keys managed by Amazon S3 or customer master keys (CMKs) stored\n in Amazon Web Services Key Management Service (Amazon Web Services KMS) – If you want Amazon Web Services to manage the keys\n used to encrypt data, specify the following headers in the request.
\nx-amz-server-side-encryption
\nx-amz-server-side-encryption-aws-kms-key-id
\nx-amz-server-side-encryption-context
\nIf you specify x-amz-server-side-encryption:aws:kms
, but\n don't provide x-amz-server-side-encryption-aws-kms-key-id
,\n Amazon S3 uses the Amazon Web Services managed CMK in Amazon Web Services KMS to protect the data.
All GET and PUT requests for an object protected by Amazon Web Services KMS fail if\n you don't make them with SSL or by using SigV4.
\nFor more information about server-side encryption with CMKs stored in Amazon Web Services\n KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon Web Services\n KMS.
\nUse customer-provided encryption keys – If you want to manage your own\n encryption keys, provide all the following headers in the request.
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about server-side encryption with CMKs stored in Amazon Web Services\n KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon Web Services\n KMS.
\nYou also can use the following access control–related headers with this\n operation. By default, all objects are private. Only the owner has full access\n control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added\n to the access control list (ACL) on the object. For more information, see Using ACLs. With this\n operation, you can grant access permissions using one of the following two\n methods:
\nSpecify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of\n predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. For more information, see\n Canned\n ACL.
Specify access permissions explicitly — To explicitly grant access\n permissions to specific Amazon Web Services accounts or groups, use the following headers.\n Each header maps to specific permissions that Amazon S3 supports in an ACL. For\n more information, see Access\n Control List (ACL) Overview. In the header, you specify a list of\n grantees who get the specific permission. To grant permissions explicitly,\n use:
\nx-amz-grant-read
\nx-amz-grant-write
\nx-amz-grant-read-acp
\nx-amz-grant-write-acp
\nx-amz-grant-full-control
\nYou specify each grantee as a type=value pair, where the type is one of\n the following:
\n\n id
– if the value specified is the canonical user ID\n of an Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email\n address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-read
header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:
\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
\n
The following operations are related to CreateMultipartUpload
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nThis action initiates a multipart upload and returns an upload ID. This upload ID is\n used to associate all of the parts in the specific multipart upload. You specify this\n upload ID in each of your subsequent upload part requests (see UploadPart). You also include this\n upload ID in the final request to either complete or abort the multipart upload\n request.
\n\nFor more information about multipart uploads, see Multipart Upload Overview.
\n\nIf you have configured a lifecycle rule to abort incomplete multipart uploads, the\n upload must complete within the number of days specified in the bucket lifecycle\n configuration. Otherwise, the incomplete multipart upload becomes eligible for an abort\n action and Amazon S3 aborts the multipart upload. For more information, see Aborting\n Incomplete Multipart Uploads Using a Bucket Lifecycle Policy.
\n\nFor information about the permissions required to use the multipart upload API, see\n Multipart Upload and\n Permissions.
\n\nFor request signing, multipart upload is just a series of regular requests. You initiate\n a multipart upload, send one or more requests to upload parts, and then complete the\n multipart upload process. You sign each request individually. There is nothing special\n about signing multipart upload requests. For more information about signing, see Authenticating\n Requests (Amazon Web Services Signature Version 4).
\n\nAfter you initiate a multipart upload and upload one or more parts, to stop being\n charged for storing the uploaded parts, you must either complete or abort the multipart\n upload. Amazon S3 frees up the space used to store the parts and stop charging you for\n storing them only after you either complete or abort a multipart upload.
\nYou can optionally request server-side encryption. For server-side encryption, Amazon S3\n encrypts your data as it writes it to disks in its data centers and decrypts it when you\n access it. You can provide your own encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed encryption keys. If you choose to provide\n your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to\n initiate the upload by using CreateMultipartUpload
.
To perform a multipart upload with encryption using an Amazon Web Services KMS key, the requester must\n have permission to the kms:Decrypt
and kms:GenerateDataKey*
\n actions on the key. These permissions are required because Amazon S3 must decrypt and read data\n from the encrypted file parts before it completes the multipart upload. For more\n information, see Multipart upload API\n and permissions in the Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account\n as the KMS key, then you must have these permissions on the key policy. If your IAM\n user or role belongs to a different account than the key, then you must have the\n permissions on both the key policy and your IAM user or role.
\n\n\nFor more information, see Protecting\n Data Using Server-Side Encryption.
\n\nWhen copying an object, you can optionally specify the accounts or groups that\n should be granted specific permissions on the new object. There are two ways to\n grant the permissions using the request headers:
\nSpecify a canned ACL with the x-amz-acl
request header. For\n more information, see Canned ACL.
Specify access permissions explicitly with the\n x-amz-grant-read
, x-amz-grant-read-acp
,\n x-amz-grant-write-acp
, and\n x-amz-grant-full-control
headers. These parameters map to\n the set of permissions that Amazon S3 supports in an ACL. For more information,\n see Access Control List (ACL)\n Overview.
You can use either a canned ACL or specify access permissions explicitly. You\n cannot do both.
\nYou can optionally tell Amazon S3 to encrypt data at rest using server-side\n encryption. Server-side encryption is for data encryption at rest. Amazon S3 encrypts\n your data as it writes it to disks in its data centers and decrypts it when you\n access it. The option you use depends on whether you want to use Amazon Web Services managed\n encryption keys or provide your own encryption key.
\nUse encryption keys managed by Amazon S3 or customer managed key stored\n in Amazon Web Services Key Management Service (Amazon Web Services KMS) – If you want Amazon Web Services to manage the keys\n used to encrypt data, specify the following headers in the request.
\nx-amz-server-side-encryption
\nx-amz-server-side-encryption-aws-kms-key-id
\nx-amz-server-side-encryption-context
\nIf you specify x-amz-server-side-encryption:aws:kms
, but\n don't provide x-amz-server-side-encryption-aws-kms-key-id
,\n Amazon S3 uses the Amazon Web Services managed key in Amazon Web Services KMS to protect the data.
All GET and PUT requests for an object protected by Amazon Web Services KMS fail if\n you don't make them with SSL or by using SigV4.
\nFor more information about server-side encryption with KMS key (SSE-KMS),\n see Protecting Data Using Server-Side Encryption with KMS keys.
\nUse customer-provided encryption keys – If you want to manage your own\n encryption keys, provide all the following headers in the request.
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about server-side encryption with KMS keys (SSE-KMS),\n see Protecting Data Using Server-Side Encryption with KMS keys.
\nYou also can use the following access control–related headers with this\n operation. By default, all objects are private. Only the owner has full access\n control. When adding a new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These permissions are then added\n to the access control list (ACL) on the object. For more information, see Using ACLs. With this\n operation, you can grant access permissions using one of the following two\n methods:
\nSpecify a canned ACL (x-amz-acl
) — Amazon S3 supports a set of\n predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. For more information, see\n Canned\n ACL.
Specify access permissions explicitly — To explicitly grant access\n permissions to specific Amazon Web Services accounts or groups, use the following headers.\n Each header maps to specific permissions that Amazon S3 supports in an ACL. For\n more information, see Access\n Control List (ACL) Overview. In the header, you specify a list of\n grantees who get the specific permission. To grant permissions explicitly,\n use:
\nx-amz-grant-read
\nx-amz-grant-write
\nx-amz-grant-read-acp
\nx-amz-grant-write-acp
\nx-amz-grant-full-control
\nYou specify each grantee as a type=value pair, where the type is one of\n the following:
\n\n id
– if the value specified is the canonical user ID\n of an Amazon Web Services account
\n uri
– if you are granting permissions to a predefined\n group
\n emailAddress
– if the value specified is the email\n address of an Amazon Web Services account
Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:
\nUS East (N. Virginia)
\nUS West (N. California)
\nUS West (Oregon)
\nAsia Pacific (Singapore)
\nAsia Pacific (Sydney)
\nAsia Pacific (Tokyo)
\nEurope (Ireland)
\nSouth America (São Paulo)
\nFor a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.
\nFor example, the following x-amz-grant-read
header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:
\n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\"
\n
The following operations are related to CreateMultipartUpload
:
\n UploadPart\n
\n\n AbortMultipartUpload\n
\n\n ListParts\n
\n\n ListMultipartUploads\n
\nIf present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -2232,7 +2235,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "Specifies the ID of the symmetric customer managed Amazon Web Services KMS CMK to use for object\n encryption. All GET and PUT requests for an object protected by Amazon Web Services KMS will fail if not\n made via SSL or using SigV4. For information about configuring using any of the officially\n supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Specifies the ID of the symmetric customer managed key to use for object\n encryption. All GET and PUT requests for an object protected by Amazon Web Services KMS will fail if not\n made via SSL or using SigV4. For information about configuring using any of the officially\n supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication\n in the Amazon S3 User Guide.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -2487,7 +2490,7 @@ "target": "com.amazonaws.s3#DeleteBucketIntelligentTieringConfigurationRequest" }, "traits": { - "smithy.api#documentation": "Deletes the S3 Intelligent-Tiering configuration from the specified bucket.
\nThe S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
\nThe S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
\nIf you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
\nOperations related to\n DeleteBucketIntelligentTieringConfiguration
include:
Deletes the S3 Intelligent-Tiering configuration from the specified bucket.
\nThe S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.
\nThe S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
\nFor more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
\nOperations related to\n DeleteBucketIntelligentTieringConfiguration
include:
Container for the request.
", + "smithy.api#httpPayload": {}, + "smithy.api#required": {}, + "smithy.api#xmlName": "Delete" + } + }, "MFA": { "target": "com.amazonaws.s3#MFA", "traits": { @@ -3148,15 +3160,6 @@ "smithy.api#documentation": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for the request.
", - "smithy.api#httpPayload": {}, - "smithy.api#required": {}, - "smithy.api#xmlName": "Delete" - } } } }, @@ -3326,7 +3329,7 @@ "KMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If the encryption type is aws:kms
, this optional value specifies the ID of\n the symmetric customer managed Amazon Web Services KMS CMK to use for encryption of job results. Amazon S3 only\n supports symmetric CMKs. For more information, see Using symmetric and\n asymmetric keys in the Amazon Web Services Key Management Service Developer Guide.
If the encryption type is aws:kms
, this optional value specifies the ID of\n the symmetric customer managed key to use for encryption of job results. Amazon S3 only\n supports symmetric keys. For more information, see Using symmetric and\n asymmetric keys in the Amazon Web Services Key Management Service Developer Guide.
Gets the S3 Intelligent-Tiering configuration from the specified bucket.
\nThe S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
\nThe S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
\nIf you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
\nOperations related to\n GetBucketIntelligentTieringConfiguration
include:
Gets the S3 Intelligent-Tiering configuration from the specified bucket.
\nThe S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.
\nThe S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
\nFor more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
\nOperations related to\n GetBucketIntelligentTieringConfiguration
include:
Retrieves objects from Amazon S3. To use GET
, you must have READ
\n access to the object. If you grant READ
access to the anonymous user, you can\n return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer\n file system. You can, however, create a logical hierarchy by using object key names that\n imply a folder structure. For example, instead of naming an object sample.jpg
,\n you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object\n in the GET
operation. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg
, specify the resource as\n /photos/2006/February/sample.jpg
. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg
in the bucket named\n examplebucket
, specify the resource as\n /examplebucket/photos/2006/February/sample.jpg
. For more information about\n request types, see HTTP Host Header Bucket Specification.
To distribute large files to many people, you can save bandwidth costs by using\n BitTorrent. For more information, see Amazon S3\n Torrent. For more information about returning the ACL of an object, see GetObjectAcl.
\n\nIf the object you are retrieving is stored in the S3 Glacier or\n S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this action returns an\n InvalidObjectStateError
error. For information about restoring archived\n objects, see Restoring Archived\n Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not\n be sent for GET requests if your object uses server-side encryption with CMKs stored in Amazon Web Services\n KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your\n object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about SSE-C, see Server-Side Encryption (Using\n Customer-Provided Encryption Keys).
\n\nAssuming you have the relevant permission to read object tags, the response also returns the\n x-amz-tagging-count
header that provides the count of number of tags\n associated with the object. You can use GetObjectTagging to retrieve\n the tag set associated with an object.
\n Permissions\n
\nYou need the relevant read object (or version) permission for this operation. For more\n information, see Specifying Permissions\n in a Policy. If the object you request does not exist, the error Amazon S3 returns\n depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will\n return an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 will return an\n HTTP status code 403 (\"access denied\") error.
\n Versioning\n
\nBy default, the GET action returns the current version of an object. To return a\n different version, use the versionId
subresource.
You need the s3:GetObjectVersion
permission to access a specific version of an object.\n
If the current version of the object is a delete marker, Amazon S3 behaves as if the\n object was deleted and includes x-amz-delete-marker: true
in the\n response.
For more information about versioning, see PutBucketVersioning.
\n\n\n Overriding Response Header Values\n
\nThere are times when you want to override certain response header values in a GET\n response. For example, you might override the Content-Disposition response header value in\n your GET request.
\n\nYou can override values for a set of response headers using the following query\n parameters. These response header values are sent only on a successful request, that is,\n when status code 200 OK is returned. The set of headers you can override using these\n parameters is a subset of the headers that Amazon S3 accepts when you create an object. The\n response headers that you can override for the GET response are Content-Type
,\n Content-Language
, Expires
, Cache-Control
,\n Content-Disposition
, and Content-Encoding
. To override these\n header values in the GET response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL,\n when using these parameters. They cannot be used with an unsigned (anonymous)\n request.
\n\n response-content-type
\n
\n response-content-language
\n
\n response-expires
\n
\n response-cache-control
\n
\n response-content-disposition
\n
\n response-content-encoding
\n
\n Additional Considerations about Request Headers\n
\n\nIf both of the If-Match
and If-Unmodified-Since
headers are\n present in the request as follows: If-Match
condition evaluates to\n true
, and; If-Unmodified-Since
condition evaluates to\n false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are\n present in the request as follows: If-None-Match
condition evaluates to\n false
, and; If-Modified-Since
condition evaluates to\n true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
\n\nThe following operations are related to GetObject
:
\n ListBuckets\n
\n\n GetObjectAcl\n
\nRetrieves objects from Amazon S3. To use GET
, you must have READ
\n access to the object. If you grant READ
access to the anonymous user, you can\n return the object without using an authorization header.
An Amazon S3 bucket has no directory hierarchy such as you would find in a typical computer\n file system. You can, however, create a logical hierarchy by using object key names that\n imply a folder structure. For example, instead of naming an object sample.jpg
,\n you can name it photos/2006/February/sample.jpg
.
To get an object from such a logical hierarchy, specify the full key name for the object\n in the GET
operation. For a virtual hosted-style request example, if you have\n the object photos/2006/February/sample.jpg
, specify the resource as\n /photos/2006/February/sample.jpg
. For a path-style request example, if you\n have the object photos/2006/February/sample.jpg
in the bucket named\n examplebucket
, specify the resource as\n /examplebucket/photos/2006/February/sample.jpg
. For more information about\n request types, see HTTP Host Header Bucket Specification.
To distribute large files to many people, you can save bandwidth costs by using\n BitTorrent. For more information, see Amazon S3\n Torrent. For more information about returning the ACL of an object, see GetObjectAcl.
\n\nIf the object you are retrieving is stored in the S3 Glacier or\n S3 Glacier Deep Archive storage class, or S3 Intelligent-Tiering Archive or\n S3 Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you must first restore a\n copy using RestoreObject. Otherwise, this action returns an\n InvalidObjectStateError
error. For information about restoring archived\n objects, see Restoring Archived\n Objects.
Encryption request headers, like x-amz-server-side-encryption
, should not\n be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS) \n or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your\n object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object,\n you must use the following headers:
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about SSE-C, see Server-Side Encryption (Using\n Customer-Provided Encryption Keys).
\n\nAssuming you have the relevant permission to read object tags, the response also returns the\n x-amz-tagging-count
header that provides the count of number of tags\n associated with the object. You can use GetObjectTagging to retrieve\n the tag set associated with an object.
\n Permissions\n
\nYou need the relevant read object (or version) permission for this operation. For more\n information, see Specifying Permissions\n in a Policy. If the object you request does not exist, the error Amazon S3 returns\n depends on whether you also have the s3:ListBucket
permission.
If you have the s3:ListBucket
permission on the bucket, Amazon S3 will\n return an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 will return an\n HTTP status code 403 (\"access denied\") error.
\n Versioning\n
\nBy default, the GET action returns the current version of an object. To return a\n different version, use the versionId
subresource.
You need the s3:GetObjectVersion
permission to access a specific version of an object.\n
If the current version of the object is a delete marker, Amazon S3 behaves as if the\n object was deleted and includes x-amz-delete-marker: true
in the\n response.
For more information about versioning, see PutBucketVersioning.
\n\n\n Overriding Response Header Values\n
\nThere are times when you want to override certain response header values in a GET\n response. For example, you might override the Content-Disposition response header value in\n your GET request.
\n\nYou can override values for a set of response headers using the following query\n parameters. These response header values are sent only on a successful request, that is,\n when status code 200 OK is returned. The set of headers you can override using these\n parameters is a subset of the headers that Amazon S3 accepts when you create an object. The\n response headers that you can override for the GET response are Content-Type
,\n Content-Language
, Expires
, Cache-Control
,\n Content-Disposition
, and Content-Encoding
. To override these\n header values in the GET response, you use the following request parameters.
You must sign the request, either using an Authorization header or a presigned URL,\n when using these parameters. They cannot be used with an unsigned (anonymous)\n request.
\n\n response-content-type
\n
\n response-content-language
\n
\n response-expires
\n
\n response-cache-control
\n
\n response-content-disposition
\n
\n response-content-encoding
\n
\n Additional Considerations about Request Headers\n
\n\nIf both of the If-Match
and If-Unmodified-Since
headers are\n present in the request as follows: If-Match
condition evaluates to\n true
, and; If-Unmodified-Since
condition evaluates to\n false
; then, S3 returns 200 OK and the data requested.
If both of the If-None-Match
and If-Modified-Since
headers are\n present in the request as follows: If-None-Match
condition evaluates to\n false
, and; If-Modified-Since
condition evaluates to\n true
; then, S3 returns 304 Not Modified response code.
For more information about conditional requests, see RFC 7232.
\n\nThe following operations are related to GetObject
:
\n ListBuckets\n
\n\n GetObjectAcl\n
\nIf present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -5151,7 +5154,7 @@ "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { - "smithy.api#documentation": "The bucket name containing the object.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", + "smithy.api#documentation": "The bucket name containing the object.
\nWhen using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
\nWhen using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.
\nWhen using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -5749,7 +5752,7 @@ } ], "traits": { - "smithy.api#documentation": "The HEAD action retrieves metadata from an object without returning the object\n itself. This action is useful if you're only interested in an object's metadata. To use\n HEAD, you must have READ access to the object.
\n\nA HEAD
request has the same options as a GET
action on an\n object. The response is identical to the GET
response except that there is no\n response body. Because of this, if the HEAD
request generates an error, it\n returns a generic 404 Not Found
or 403 Forbidden
code. It is not \n possible to retrieve the exact exception beyond these error codes.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about SSE-C, see Server-Side Encryption (Using\n Customer-Provided Encryption Keys).
\nEncryption request headers, like x-amz-server-side-encryption
, should\n not be sent for GET requests if your object uses server-side encryption with CMKs stored\n in Amazon Web Services KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys\n (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest\n error.
\n The last modified property in this case is the creation date of the object.
\nRequest headers are limited to 8 KB in size. For more information, see Common Request\n Headers.
\nConsider the following when using request headers:
\n Consideration 1 – If both of the If-Match
and\n If-Unmodified-Since
headers are present in the request as\n follows:
\n If-Match
condition evaluates to true
, and;
\n If-Unmodified-Since
condition evaluates to\n false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and\n If-Modified-Since
headers are present in the request as\n follows:
\n If-None-Match
condition evaluates to false
,\n and;
\n If-Modified-Since
condition evaluates to\n true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
\n\n\n Permissions\n
\nYou need the relevant read object (or version) permission for this operation. For more\n information, see Specifying Permissions\n in a Policy. If the object you request does not exist, the error Amazon S3 returns\n depends on whether you also have the s3:ListBucket permission.
\nIf you have the s3:ListBucket
permission on the bucket, Amazon S3 returns\n an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP\n status code 403 (\"access denied\") error.
The following action is related to HeadObject
:
\n GetObject\n
\nThe HEAD action retrieves metadata from an object without returning the object\n itself. This action is useful if you're only interested in an object's metadata. To use\n HEAD, you must have READ access to the object.
\n\nA HEAD
request has the same options as a GET
action on an\n object. The response is identical to the GET
response except that there is no\n response body. Because of this, if the HEAD
request generates an error, it\n returns a generic 404 Not Found
or 403 Forbidden
code. It is not \n possible to retrieve the exact exception beyond these error codes.
If you encrypt an object by using server-side encryption with customer-provided\n encryption keys (SSE-C) when you store the object in Amazon S3, then when you retrieve the\n metadata from the object, you must use the following headers:
\nx-amz-server-side-encryption-customer-algorithm
\nx-amz-server-side-encryption-customer-key
\nx-amz-server-side-encryption-customer-key-MD5
\nFor more information about SSE-C, see Server-Side Encryption (Using\n Customer-Provided Encryption Keys).
\nEncryption request headers, like x-amz-server-side-encryption
, should\n not be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS)\n or server-side encryption with Amazon S3–managed encryption keys\n (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest\n error.
\n The last modified property in this case is the creation date of the object.
\nRequest headers are limited to 8 KB in size. For more information, see Common Request\n Headers.
\nConsider the following when using request headers:
\n Consideration 1 – If both of the If-Match
and\n If-Unmodified-Since
headers are present in the request as\n follows:
\n If-Match
condition evaluates to true
, and;
\n If-Unmodified-Since
condition evaluates to\n false
;
Then Amazon S3 returns 200 OK
and the data requested.
Consideration 2 – If both of the If-None-Match
and\n If-Modified-Since
headers are present in the request as\n follows:
\n If-None-Match
condition evaluates to false
,\n and;
\n If-Modified-Since
condition evaluates to\n true
;
Then Amazon S3 returns the 304 Not Modified
response code.
For more information about conditional requests, see RFC 7232.
\n\n\n Permissions\n
\nYou need the relevant read object (or version) permission for this operation. For more\n information, see Specifying Permissions\n in a Policy. If the object you request does not exist, the error Amazon S3 returns\n depends on whether you also have the s3:ListBucket permission.
\nIf you have the s3:ListBucket
permission on the bucket, Amazon S3 returns\n an HTTP status code 404 (\"no such key\") error.
If you don’t have the s3:ListBucket
permission, Amazon S3 returns an HTTP\n status code 403 (\"access denied\") error.
The following action is related to HeadObject
:
\n GetObject\n
\nIf the object is stored using server-side encryption either with an Amazon Web Services KMS customer\n master key (CMK) or an Amazon S3-managed encryption key, the response includes this header with\n the value of the server-side encryption algorithm used when storing this object in Amazon\n S3 (for example, AES256, aws:kms).
", + "smithy.api#documentation": "If the object is stored using server-side encryption either with an Amazon Web Services KMS key or \n an Amazon S3-managed encryption key, the response includes this header with\n the value of the server-side encryption algorithm used when storing this object in Amazon\n S3 (for example, AES256, aws:kms).
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -5940,7 +5943,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -6958,7 +6961,7 @@ "target": "com.amazonaws.s3#ListBucketIntelligentTieringConfigurationsOutput" }, "traits": { - "smithy.api#documentation": "Lists the S3 Intelligent-Tiering configuration from the specified bucket.
\nThe S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
\nThe S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
\nIf you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
\nOperations related to\n ListBucketIntelligentTieringConfigurations
include:
Lists the S3 Intelligent-Tiering configuration from the specified bucket.
\nThe S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.
\nThe S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
\nFor more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
\nOperations related to\n ListBucketIntelligentTieringConfigurations
include:
The access point ARN used when evaluating an AND predicate.
" + } } }, "traits": { @@ -8241,12 +8250,12 @@ "Filter": { "target": "com.amazonaws.s3#MetricsFilter", "traits": { - "smithy.api#documentation": "Specifies a metrics configuration filter. The metrics configuration will only include\n objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction\n (MetricsAndOperator).
" + "smithy.api#documentation": "Specifies a metrics configuration filter. The metrics configuration will only include\n objects that meet the filter's criteria. A filter must be a prefix, an object tag, an access point ARN, or a conjunction\n (MetricsAndOperator).
" } } }, "traits": { - "smithy.api#documentation": "Specifies a metrics configuration for the CloudWatch request metrics (specified by the\n metrics configuration ID) from an Amazon S3 bucket. If you're updating an existing metrics\n configuration, note that this is a full replacement of the existing metrics configuration.\n If you don't include the elements you want to keep, they are erased. For more information,\n see PUT Bucket\n metrics in the Amazon S3 API Reference.
" + "smithy.api#documentation": "Specifies a metrics configuration for the CloudWatch request metrics (specified by the\n metrics configuration ID) from an Amazon S3 bucket. If you're updating an existing metrics\n configuration, note that this is a full replacement of the existing metrics configuration.\n If you don't include the elements you want to keep, they are erased. For more information,\n see PutBucketMetricsConfiguration.
" } }, "com.amazonaws.s3#MetricsConfigurationList": { @@ -8270,6 +8279,12 @@ "smithy.api#documentation": "The tag used when evaluating a metrics filter.
" } }, + "AccessPointArn": { + "target": "com.amazonaws.s3#AccessPointArn", + "traits": { + "smithy.api#documentation": "The access point ARN used when evaluating a metrics filter.
" + } + }, "And": { "target": "com.amazonaws.s3#MetricsAndOperator", "traits": { @@ -8278,7 +8293,7 @@ } }, "traits": { - "smithy.api#documentation": "Specifies a metrics configuration filter. The metrics configuration only includes\n objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction\n (MetricsAndOperator).
" + "smithy.api#documentation": "Specifies a metrics configuration filter. The metrics configuration only includes\n objects that meet the filter's criteria. A filter must be a prefix, an object tag, an access point ARN, or a conjunction\n (MetricsAndOperator). For more information, see PutBucketMetricsConfiguration.
" } }, "com.amazonaws.s3#MetricsId": { @@ -9242,13 +9257,6 @@ "smithy.api#required": {} } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Contains the elements that set the ACL permissions for an object per grantee.
", + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "AccessControlPolicy" + } + }, "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { @@ -9341,14 +9364,6 @@ "smithy.api#documentation": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Contains the elements that set the ACL permissions for an object per grantee.
", - "smithy.api#httpPayload": {}, - "smithy.api#xmlName": "AccessControlPolicy" - } } } }, @@ -9385,13 +9400,6 @@ "smithy.api#required": {} } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more\n information, see Enabling Cross-Origin Resource\n Sharing in the Amazon S3 User Guide.
", + "smithy.api#httpPayload": {}, + "smithy.api#required": {}, + "smithy.api#xmlName": "CORSConfiguration" + } + }, "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { @@ -9442,15 +9466,6 @@ "smithy.api#documentation": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more\n information, see Enabling Cross-Origin Resource\n Sharing in the Amazon S3 User Guide.
", - "smithy.api#httpPayload": {}, - "smithy.api#required": {}, - "smithy.api#xmlName": "CORSConfiguration" - } } } }, @@ -9460,7 +9475,7 @@ "target": "com.amazonaws.s3#PutBucketEncryptionRequest" }, "traits": { - "smithy.api#documentation": "This action uses the encryption
subresource to configure default\n encryption and Amazon S3 Bucket Key for an existing bucket.
Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys\n (SSE-S3) or Amazon Web Services KMS customer master keys (SSE-KMS). If you specify default encryption\n using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default\n encryption, see Amazon S3 default bucket encryption\n in the Amazon S3 User Guide. For more information about S3 Bucket Keys,\n see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
\nThis action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature\n Version 4).
\nTo use this operation, you must have permissions to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources in the Amazon S3 User Guide.
\n Related Resources\n
\n\n GetBucketEncryption\n
\nThis action uses the encryption
subresource to configure default\n encryption and Amazon S3 Bucket Key for an existing bucket.
Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys\n (SSE-S3) or customer managed keys (SSE-KMS). If you specify default encryption\n using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default\n encryption, see Amazon S3 default bucket encryption\n in the Amazon S3 User Guide. For more information about S3 Bucket Keys,\n see Amazon S3 Bucket Keys in the Amazon S3 User Guide.
\nThis action requires Amazon Web Services Signature Version 4. For more information, see Authenticating Requests (Amazon Web Services Signature\n Version 4).
\nTo use this operation, you must have permissions to perform the\n s3:PutEncryptionConfiguration
action. The bucket owner has this permission\n by default. The bucket owner can grant this permission to others. For more information\n about permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources in the Amazon S3 User Guide.
\n Related Resources\n
\n\n GetBucketEncryption\n
\nSpecifies default encryption for a bucket using server-side encryption with Amazon S3-managed\n keys (SSE-S3) or customer master keys stored in Amazon Web Services KMS (SSE-KMS). For information about\n the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption\n in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed\n keys (SSE-S3) or customer managed keys (SSE-KMS). For information about\n the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption\n in the Amazon S3 User Guide.
", "smithy.api#httpLabel": {}, "smithy.api#required": {} } @@ -9487,13 +9502,6 @@ "smithy.api#httpHeader": "Content-MD5" } }, - "ExpectedBucketOwner": { - "target": "com.amazonaws.s3#AccountId", - "traits": { - "smithy.api#documentation": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Puts a S3 Intelligent-Tiering configuration to the specified bucket.\n You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.
\nThe S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
\nThe S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
\nIf you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
\nOperations related to\n PutBucketIntelligentTieringConfiguration
include:
You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically\n move objects stored in the S3 Intelligent-Tiering storage class to the\n Archive Access or Deep Archive Access tier.
\n\n Special Errors\n
\n\n HTTP 400 Bad Request Error\n
\n\n Code: InvalidArgument
\n\n Cause: Invalid Argument
\n\n HTTP 400 Bad Request Error\n
\n\n Code: TooManyConfigurations
\n\n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.
\n\n HTTP 403 Forbidden Error\n
\n\n Code: AccessDenied
\n\n Cause: You are not the owner of the specified bucket,\n or you do not have the s3:PutIntelligentTieringConfiguration
bucket\n permission to set the configuration on the bucket.
Puts a S3 Intelligent-Tiering configuration to the specified bucket.\n You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.
\nThe S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.
\nThe S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
\nFor more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
\nOperations related to\n PutBucketIntelligentTieringConfiguration
include:
You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically\n move objects stored in the S3 Intelligent-Tiering storage class to the\n Archive Access or Deep Archive Access tier.
\n\n Special Errors\n
\n\n HTTP 400 Bad Request Error\n
\n\n Code: InvalidArgument
\n\n Cause: Invalid Argument
\n\n HTTP 400 Bad Request Error\n
\n\n Code: TooManyConfigurations
\n\n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.
\n\n HTTP 403 Forbidden Error\n
\n\n Code: AccessDenied
\n\n Cause: You are not the owner of the specified bucket,\n or you do not have the s3:PutIntelligentTieringConfiguration
bucket\n permission to set the configuration on the bucket.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for logging status information.
", + "smithy.api#httpPayload": {}, + "smithy.api#required": {}, + "smithy.api#xmlName": "BucketLoggingStatus" + } + }, "ContentMD5": { "target": "com.amazonaws.s3#ContentMD5", "traits": { @@ -9681,15 +9705,6 @@ "smithy.api#documentation": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for logging status information.
", - "smithy.api#httpPayload": {}, - "smithy.api#required": {}, - "smithy.api#xmlName": "BucketLoggingStatus" - } } } }, @@ -9699,7 +9714,7 @@ "target": "com.amazonaws.s3#PutBucketMetricsConfigurationRequest" }, "traits": { - "smithy.api#documentation": "Sets a metrics configuration (specified by the metrics configuration ID) for the bucket.\n You can have up to 1,000 metrics configurations per bucket. If you're updating an existing\n metrics configuration, note that this is a full replacement of the existing metrics\n configuration. If you don't include the elements you want to keep, they are erased.
\n\nTo use this operation, you must have permissions to perform the\n s3:PutMetricsConfiguration
action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon\n CloudWatch.
\n\nThe following operations are related to\n PutBucketMetricsConfiguration
:
\n GetBucketLifecycle
has the following special error:
Error code: TooManyConfigurations
\n
Description: You are attempting to create a new configuration but have\n already reached the 1,000-configuration limit.
\nHTTP Status Code: HTTP 400 Bad Request
\nSets a metrics configuration (specified by the metrics configuration ID) for the bucket.\n You can have up to 1,000 metrics configurations per bucket. If you're updating an existing\n metrics configuration, note that this is a full replacement of the existing metrics\n configuration. If you don't include the elements you want to keep, they are erased.
\n\nTo use this operation, you must have permissions to perform the\n s3:PutMetricsConfiguration
action. The bucket owner has this permission by\n default. The bucket owner can grant this permission to others. For more information about\n permissions, see Permissions Related to Bucket Subresource Operations and Managing Access Permissions to Your Amazon S3\n Resources.
For information about CloudWatch request metrics for Amazon S3, see Monitoring Metrics with Amazon\n CloudWatch.
\n\nThe following operations are related to\n PutBucketMetricsConfiguration
:
\n GetBucketLifecycle
has the following special error:
Error code: TooManyConfigurations
\n
Description: You are attempting to create a new configuration but have\n already reached the 1,000-configuration limit.
\nHTTP Status Code: HTTP 400 Bad Request
\nThe account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Creates a replication configuration or replaces an existing one. For more information,\n see Replication in the Amazon S3 User Guide.
\n \nSpecify the replication configuration in the request body. In the replication\n configuration, you provide the name of the destination bucket or buckets where you want\n Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your\n behalf, and other relevant information.
\n\n\nA replication configuration must include at least one rule, and can contain a maximum of\n 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in\n the source bucket. To choose additional subsets of objects to replicate, add a rule for\n each subset.
\n\nTo specify a subset of the objects in the source bucket to apply a replication rule to,\n add the Filter element as a child of the Rule element. You can filter objects based on an\n object key prefix, one or more object tags, or both. When you add the Filter element in the\n configuration, you must also add the following elements:\n DeleteMarkerReplication
, Status
, and\n Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles\n replication of delete markers differently. For more information, see Backward Compatibility.
\nFor information about enabling versioning on a bucket, see Using Versioning.
\n\n\n Handling Replication of Encrypted Objects\n
\nBy default, Amazon S3 doesn't replicate objects that are stored at rest using server-side\n encryption with CMKs stored in Amazon Web Services KMS. To replicate Amazon Web Services KMS-encrypted objects, add the\n following: SourceSelectionCriteria
, SseKmsEncryptedObjects
,\n Status
, EncryptionConfiguration
, and\n ReplicaKmsKeyID
. For information about replication configuration, see\n Replicating Objects\n Created with SSE Using CMKs stored in Amazon Web Services KMS.
For information on PutBucketReplication
errors, see List of\n replication-related error codes\n
\n Permissions\n
\nTo create a PutBucketReplication
request, you must have s3:PutReplicationConfiguration
\n permissions for the bucket. \n
By default, a resource owner, in this case the Amazon Web Services account that created the bucket, can\n perform this operation. The resource owner can also grant others permissions to perform the\n operation. For more information about permissions, see Specifying Permissions in a Policy\n and Managing Access Permissions to Your\n Amazon S3 Resources.
\nTo perform this operation, the user or role performing the action must have the\n iam:PassRole permission.
\nThe following operations are related to PutBucketReplication
:
\n GetBucketReplication\n
\nCreates a replication configuration or replaces an existing one. For more information,\n see Replication in the Amazon S3 User Guide.
\n \nSpecify the replication configuration in the request body. In the replication\n configuration, you provide the name of the destination bucket or buckets where you want\n Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to replicate objects on your\n behalf, and other relevant information.
\n\n\nA replication configuration must include at least one rule, and can contain a maximum of\n 1,000. Each rule identifies a subset of objects to replicate by filtering the objects in\n the source bucket. To choose additional subsets of objects to replicate, add a rule for\n each subset.
\n\nTo specify a subset of the objects in the source bucket to apply a replication rule to,\n add the Filter element as a child of the Rule element. You can filter objects based on an\n object key prefix, one or more object tags, or both. When you add the Filter element in the\n configuration, you must also add the following elements:\n DeleteMarkerReplication
, Status
, and\n Priority
.
If you are using an earlier version of the replication configuration, Amazon S3 handles\n replication of delete markers differently. For more information, see Backward Compatibility.
\nFor information about enabling versioning on a bucket, see Using Versioning.
\n\n\n Handling Replication of Encrypted Objects\n
\nBy default, Amazon S3 doesn't replicate objects that are stored at rest using server-side\n encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects, add the\n following: SourceSelectionCriteria
, SseKmsEncryptedObjects
,\n Status
, EncryptionConfiguration
, and\n ReplicaKmsKeyID
. For information about replication configuration, see\n Replicating Objects\n Created with SSE Using KMS keys.
For information on PutBucketReplication
errors, see List of\n replication-related error codes\n
\n Permissions\n
\nTo create a PutBucketReplication
request, you must have s3:PutReplicationConfiguration
\n permissions for the bucket. \n
By default, a resource owner, in this case the Amazon Web Services account that created the bucket, can\n perform this operation. The resource owner can also grant others permissions to perform the\n operation. For more information about permissions, see Specifying Permissions in a Policy\n and Managing Access Permissions to Your\n Amazon S3 Resources.
\nTo perform this operation, the user or role performing the action must have the\n iam:PassRole permission.
\nThe following operations are related to PutBucketReplication
:
\n GetBucketReplication\n
\nThe account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Contains the elements that set the ACL permissions for an object per grantee.
", + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "AccessControlPolicy" + } + }, "Bucket": { "target": "com.amazonaws.s3#BucketName", "traits": { @@ -10300,14 +10323,6 @@ "smithy.api#documentation": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Contains the elements that set the ACL permissions for an object per grantee.
", - "smithy.api#httpPayload": {}, - "smithy.api#xmlName": "AccessControlPolicy" - } } } }, @@ -10359,6 +10374,14 @@ "smithy.api#required": {} } }, + "LegalHold": { + "target": "com.amazonaws.s3#ObjectLockLegalHold", + "traits": { + "smithy.api#documentation": "Container element for the Legal Hold configuration you want to apply to the specified\n object.
", + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "LegalHold" + } + }, "RequestPayer": { "target": "com.amazonaws.s3#RequestPayer", "traits": { @@ -10385,14 +10408,6 @@ "smithy.api#documentation": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container element for the Legal Hold configuration you want to apply to the specified\n object.
", - "smithy.api#httpPayload": {}, - "smithy.api#xmlName": "LegalHold" - } } } }, @@ -10436,6 +10451,14 @@ "smithy.api#required": {} } }, + "ObjectLockConfiguration": { + "target": "com.amazonaws.s3#ObjectLockConfiguration", + "traits": { + "smithy.api#documentation": "The Object Lock configuration that you want to apply to the specified bucket.
", + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "ObjectLockConfiguration" + } + }, "RequestPayer": { "target": "com.amazonaws.s3#RequestPayer", "traits": { @@ -10462,14 +10485,6 @@ "smithy.api#documentation": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The Object Lock configuration that you want to apply to the specified bucket.
", - "smithy.api#httpPayload": {}, - "smithy.api#xmlName": "ObjectLockConfiguration" - } } } }, @@ -10493,7 +10508,7 @@ "ServerSideEncryption": { "target": "com.amazonaws.s3#ServerSideEncryption", "traits": { - "smithy.api#documentation": "If you specified server-side encryption either with an Amazon Web Services KMS customer master key (CMK)\n or Amazon S3-managed encryption key in your PUT request, the response includes this header. It\n confirms the encryption algorithm that Amazon S3 used to encrypt the object.
", + "smithy.api#documentation": "If you specified server-side encryption either with an Amazon Web Services KMS key\n or Amazon S3-managed encryption key in your PUT request, the response includes this header. It\n confirms the encryption algorithm that Amazon S3 used to encrypt the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption" } }, @@ -10521,7 +10536,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If x-amz-server-side-encryption
is present and has the value of\n aws:kms
, this header specifies the ID of the Amazon Web Services Key Management Service\n (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) that was used for the\n object.
If x-amz-server-side-encryption
is present and has the value of\n aws:kms
, this header specifies the ID of the Amazon Web Services Key Management Service\n (Amazon Web Services KMS) symmetric customer managed key that was used for the\n object.
If x-amz-server-side-encryption
is present and has the value of\n aws:kms
, this header specifies the ID of the Amazon Web Services Key Management Service\n (Amazon Web Services KMS) symmetrical customer managed customer master key (CMK) that was used for the\n object. If you specify x-amz-server-side-encryption:aws:kms
, but do not\n provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services\n managed CMK in Amazon Web Services to protect the data. If the KMS key does not exist in the same account\n issuing the command, you must use the full ARN and not just the ID.\n
If x-amz-server-side-encryption
is present and has the value of\n aws:kms
, this header specifies the ID of the Amazon Web Services Key Management Service\n (Amazon Web Services KMS) symmetrical customer managed key that was used for the\n object. If you specify x-amz-server-side-encryption:aws:kms
, but do not\n provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services\n managed key to protect the data. If the KMS key does not exist in the same account\n issuing the command, you must use the full ARN and not just the ID.\n
The container element for the Object Retention configuration.
", + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "Retention" + } + }, "RequestPayer": { "target": "com.amazonaws.s3#RequestPayer", "traits": { @@ -10858,14 +10881,6 @@ "smithy.api#documentation": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The container element for the Object Retention configuration.
", - "smithy.api#httpPayload": {}, - "smithy.api#xmlName": "Retention" - } } } }, @@ -10932,6 +10947,15 @@ "smithy.api#httpHeader": "Content-MD5" } }, + "Tagging": { + "target": "com.amazonaws.s3#Tagging", + "traits": { + "smithy.api#documentation": "Container for the TagSet
and Tag
elements
Container for the TagSet
and Tag
elements
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
A container that describes additional filters for identifying the source objects that\n you want to replicate. You can choose to enable or disable the replication of these\n objects. Currently, Amazon S3 supports only the filter that you can specify for objects created\n with server-side encryption using a customer master key (CMK) stored in Amazon Web Services Key Management\n Service (SSE-KMS).
" + "smithy.api#documentation": "A container that describes additional filters for identifying the source objects that\n you want to replicate. You can choose to enable or disable the replication of these\n objects. Currently, Amazon S3 supports only the filter that you can specify for objects created\n with server-side encryption using a customer managed key stored in Amazon Web Services Key Management\n Service (SSE-KMS).
" } }, "ExistingObjectReplication": { @@ -11565,6 +11580,13 @@ "smithy.api#httpQuery": "versionId" } }, + "RestoreRequest": { + "target": "com.amazonaws.s3#RestoreRequest", + "traits": { + "smithy.api#httpPayload": {}, + "smithy.api#xmlName": "RestoreRequest" + } + }, "RequestPayer": { "target": "com.amazonaws.s3#RequestPayer", "traits": { @@ -11577,13 +11599,6 @@ "smithy.api#documentation": "The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed\n customer master key (CMK) to use for encrypting inventory reports.
", + "smithy.api#documentation": "Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed key\n to use for encrypting inventory reports.
", "smithy.api#required": {} } } @@ -11831,7 +11846,7 @@ "target": "com.amazonaws.s3#SelectObjectContentOutput" }, "traits": { - "smithy.api#documentation": "This action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.
\nThis action is not supported by Amazon S3 on Outposts.
\nFor more information about Amazon S3 Select,\n see Selecting Content from\n Objects in the Amazon S3 User Guide.
\nFor more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select\n and S3 Glacier Select in the Amazon S3 User Guide.
\n \n\n Permissions\n
\nYou must have s3:GetObject
permission for this operation. Amazon S3 Select does\n not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy\n in the Amazon S3 User Guide.
\n Object Data Formats\n
\nYou can use Amazon S3 Select to query objects that have the following format\n properties:
\n\n CSV, JSON, and Parquet - Objects must be in CSV, JSON, or\n Parquet format.
\n\n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.
\n\n GZIP or BZIP2 - CSV and JSON files can be compressed using\n GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select\n supports for CSV and JSON files. Amazon S3 Select supports columnar compression for\n Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression\n for Parquet objects.
\n\n Server-side encryption - Amazon S3 Select supports querying\n objects that are protected with server-side encryption.
\nFor objects that are encrypted with customer-provided encryption keys (SSE-C), you\n must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the\n Amazon S3 User Guide.
\nFor objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and\n customer master keys (CMKs) stored in Amazon Web Services Key Management Service (SSE-KMS),\n server-side encryption is handled transparently, so you don't need to specify\n anything. For more information about server-side encryption, including SSE-S3 and\n SSE-KMS, see Protecting Data Using\n Server-Side Encryption in the Amazon S3 User Guide.
\n\n Working with the Response Body\n
\nGiven the response size is unknown, Amazon S3 Select streams the response as a series of\n messages and includes a Transfer-Encoding
header with chunked
as\n its value in the response. For more information, see Appendix: SelectObjectContent\n Response.
\n GetObject Support\n
\nThe SelectObjectContent
action does not support the following\n GetObject
functionality. For more information, see GetObject.
\n Range
: Although you can specify a scan range for an Amazon S3 Select request\n (see SelectObjectContentRequest - ScanRange in the request parameters),\n you cannot specify the range of bytes of an object to return.
GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify\n the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY
storage classes. For\n more information, about storage classes see Storage Classes\n in the Amazon S3 User Guide.
\n Special Errors\n
\n\nFor a list of special errors for this operation, see List of\n SELECT Object Content Error Codes\n
\n\n Related Resources\n
\n\n GetObject\n
\nThis action filters the contents of an Amazon S3 object based on a simple structured query\n language (SQL) statement. In the request, along with the SQL expression, you must also\n specify a data serialization format (JSON, CSV, or Apache Parquet) of the object. Amazon S3 uses\n this format to parse object data into records, and returns only records that match the\n specified SQL expression. You must also specify the data serialization format for the\n response.
\nThis action is not supported by Amazon S3 on Outposts.
\nFor more information about Amazon S3 Select,\n see Selecting Content from\n Objects in the Amazon S3 User Guide.
\nFor more information about using SQL with Amazon S3 Select, see SQL Reference for Amazon S3 Select\n and S3 Glacier Select in the Amazon S3 User Guide.
\n \n\n Permissions\n
\nYou must have s3:GetObject
permission for this operation. Amazon S3 Select does\n not support anonymous access. For more information about permissions, see Specifying Permissions in a Policy\n in the Amazon S3 User Guide.
\n Object Data Formats\n
\nYou can use Amazon S3 Select to query objects that have the following format\n properties:
\n\n CSV, JSON, and Parquet - Objects must be in CSV, JSON, or\n Parquet format.
\n\n UTF-8 - UTF-8 is the only encoding type Amazon S3 Select\n supports.
\n\n GZIP or BZIP2 - CSV and JSON files can be compressed using\n GZIP or BZIP2. GZIP and BZIP2 are the only compression formats that Amazon S3 Select\n supports for CSV and JSON files. Amazon S3 Select supports columnar compression for\n Parquet using GZIP or Snappy. Amazon S3 Select does not support whole-object compression\n for Parquet objects.
\n\n Server-side encryption - Amazon S3 Select supports querying\n objects that are protected with server-side encryption.
\nFor objects that are encrypted with customer-provided encryption keys (SSE-C), you\n must use HTTPS, and you must use the headers that are documented in the GetObject. For more information about SSE-C, see Server-Side Encryption\n (Using Customer-Provided Encryption Keys) in the\n Amazon S3 User Guide.
\nFor objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and\n Amazon Web Services KMS keys (SSE-KMS),\n server-side encryption is handled transparently, so you don't need to specify\n anything. For more information about server-side encryption, including SSE-S3 and\n SSE-KMS, see Protecting Data Using\n Server-Side Encryption in the Amazon S3 User Guide.
\n\n Working with the Response Body\n
\nGiven the response size is unknown, Amazon S3 Select streams the response as a series of\n messages and includes a Transfer-Encoding
header with chunked
as\n its value in the response. For more information, see Appendix: SelectObjectContent\n Response.
\n GetObject Support\n
\nThe SelectObjectContent
action does not support the following\n GetObject
functionality. For more information, see GetObject.
\n Range
: Although you can specify a scan range for an Amazon S3 Select request\n (see SelectObjectContentRequest - ScanRange in the request parameters),\n you cannot specify the range of bytes of an object to return.
GLACIER, DEEP_ARCHIVE and REDUCED_REDUNDANCY storage classes: You cannot specify\n the GLACIER, DEEP_ARCHIVE, or REDUCED_REDUNDANCY
storage classes. For\n more information, about storage classes see Storage Classes\n in the Amazon S3 User Guide.
\n Special Errors\n
\n\nFor a list of special errors for this operation, see List of\n SELECT Object Content Error Codes\n
\n\n Related Resources\n
\n\n GetObject\n
\nA container that describes additional filters for identifying the source objects that\n you want to replicate. You can choose to enable or disable the replication of these\n objects. Currently, Amazon S3 supports only the filter that you can specify for objects created\n with server-side encryption using a customer master key (CMK) stored in Amazon Web Services Key Management\n Service (SSE-KMS).
" + "smithy.api#documentation": "A container that describes additional filters for identifying the source objects that\n you want to replicate. You can choose to enable or disable the replication of these\n objects. Currently, Amazon S3 supports only the filter that you can specify for objects created\n with server-side encryption using a customer managed key stored in Amazon Web Services Key Management\n Service (SSE-KMS).
" } }, "com.amazonaws.s3#SseKmsEncryptedObjects": { @@ -12650,7 +12665,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) that was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key that was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -12845,7 +12860,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed customer master key (CMK) was used for the object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric\n customer managed key was used for the object.
", "smithy.api#httpHeader": "x-amz-server-side-encryption-aws-kms-key-id" } }, @@ -13035,7 +13050,7 @@ "traits": { "aws.auth#unsignedPayload": {}, "smithy.api#auth": ["aws.auth#sigv4"], - "smithy.api#documentation": "Passes transformed\n objects to a GetObject
operation when using Object Lambda Access Points. For information about\n Object Lambda Access Points, see Transforming objects with\n Object Lambda Access Points in the Amazon S3 User Guide.
This operation supports metadata that can be returned by GetObject, in addition to\n RequestRoute
, RequestToken
, StatusCode
,\n ErrorCode
, and ErrorMessage
. The GetObject
\n response metadata is supported so that the WriteGetObjectResponse
caller,\n typically an Lambda function, can provide the same metadata when it internally invokes\n GetObject
. When WriteGetObjectResponse
is called by a\n customer-owned Lambda function, the metadata returned to the end user\n GetObject
call might differ from what Amazon S3 would normally return.
You can include any number of metadata headers. When including a metadata header, it should be\n prefaced with x-amz-meta
. For example, x-amz-meta-my-custom-header: MyCustomValue
.\n The primary use case for this is to forward GetObject
metadata.
Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to detect and redact\n personally identifiable information (PII) and decompress S3 objects. These Lambda functions\n are available in the Amazon Web Services Serverless Application Repository, and can be selected through the Amazon Web Services Management Console when you create your\n Object Lambda Access Point.
\nExample 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically detects personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
\nExample 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically redacts personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
\nExample 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.
\nFor information on how to view and use these functions, see Using Amazon Web Services built Lambda functions in the Amazon S3 User Guide.
", + "smithy.api#documentation": "Passes transformed\n objects to a GetObject
operation when using Object Lambda access points. For information about\n Object Lambda access points, see Transforming objects with\n Object Lambda access points in the Amazon S3 User Guide.
This operation supports metadata that can be returned by GetObject, in addition to\n RequestRoute
, RequestToken
, StatusCode
,\n ErrorCode
, and ErrorMessage
. The GetObject
\n response metadata is supported so that the WriteGetObjectResponse
caller,\n typically an Lambda function, can provide the same metadata when it internally invokes\n GetObject
. When WriteGetObjectResponse
is called by a\n customer-owned Lambda function, the metadata returned to the end user\n GetObject
call might differ from what Amazon S3 would normally return.
You can include any number of metadata headers. When including a metadata header, it should be\n prefaced with x-amz-meta
. For example, x-amz-meta-my-custom-header: MyCustomValue
.\n The primary use case for this is to forward GetObject
metadata.
Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to detect and redact\n personally identifiable information (PII) and decompress S3 objects. These Lambda functions\n are available in the Amazon Web Services Serverless Application Repository, and can be selected through the Amazon Web Services Management Console when you create your\n Object Lambda access point.
\nExample 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically detects personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
\nExample 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically redacts personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
\nExample 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.
\nFor information on how to view and use these functions, see Using Amazon Web Services built Lambda functions in the Amazon S3 User Guide.
", "smithy.api#endpoint": { "hostPrefix": "{RequestRoute}." }, @@ -13264,7 +13279,7 @@ "SSEKMSKeyId": { "target": "com.amazonaws.s3#SSEKMSKeyId", "traits": { - "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) that was used for stored in Amazon S3 object.
", + "smithy.api#documentation": "If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed key that was used for stored in Amazon S3 object.
", "smithy.api#httpHeader": "x-amz-fwd-header-x-amz-server-side-encryption-aws-kms-key-id" } }, diff --git a/codegen/sdk-codegen/aws-models/sagemaker.2017-07-24.json b/codegen/sdk-codegen/aws-models/sagemaker.2017-07-24.json index 91003b738d30..345aab661ec1 100644 --- a/codegen/sdk-codegen/aws-models/sagemaker.2017-07-24.json +++ b/codegen/sdk-codegen/aws-models/sagemaker.2017-07-24.json @@ -4729,7 +4729,7 @@ } ], "traits": { - "smithy.api#documentation": "Creates a Domain
used by Amazon SageMaker Studio. A domain consists of an associated\n Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application,\n policy, and Amazon Virtual Private Cloud (VPC) configurations. An Amazon Web Services account is limited to one domain per region.\n Users within a domain can share notebook files and other artifacts with each other.
\n EFS storage\n
\nWhen a domain is created, an EFS volume is created for use by all of the users within the\n domain. Each user receives a private home directory within the EFS volume for notebooks,\n Git repositories, and data files.
\nSageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with\n an Amazon Web Services managed customer master key (CMK) by default. For more control, you can specify a\n customer managed CMK. For more information, see\n Protect Data at\n Rest Using Encryption.
\n\n\n VPC configuration\n
\nAll SageMaker Studio traffic between the domain and the EFS volume is through the specified\n VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType
\n parameter. AppNetworkAccessType
corresponds to the network access type that you\n choose when you onboard to Studio. The following options are available:
\n PublicInternetOnly
- Non-EFS traffic goes through a VPC managed by\n Amazon SageMaker, which allows internet access. This is the default value.
\n VpcOnly
- All Studio traffic is through the specified VPC and subnets.\n Internet access is disabled by default. To allow internet access, you must specify a\n NAT gateway.
When internet access is disabled, you won't be able to run a Studio notebook or to\n train or host models unless your VPC has an interface endpoint to the SageMaker API and runtime\n or a NAT gateway and your security groups allow outbound connections.
\nNFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules\n in order to launch a SageMaker Studio app successfully.
\nFor more information, see\n Connect\n SageMaker Studio Notebooks to Resources in a VPC.
" + "smithy.api#documentation": "Creates a Domain
used by Amazon SageMaker Studio. A domain consists of an associated\n Amazon Elastic File System (EFS) volume, a list of authorized users, and a variety of security, application,\n policy, and Amazon Virtual Private Cloud (VPC) configurations. An Amazon Web Services account is limited to one domain per region.\n Users within a domain can share notebook files and other artifacts with each other.
\n EFS storage\n
\nWhen a domain is created, an EFS volume is created for use by all of the users within the\n domain. Each user receives a private home directory within the EFS volume for notebooks,\n Git repositories, and data files.
\nSageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with\n an Amazon Web Services managed key by default. For more control, you can specify a\n customer managed key. For more information, see\n Protect Data at\n Rest Using Encryption.
\n\n\n VPC configuration\n
\nAll SageMaker Studio traffic between the domain and the EFS volume is through the specified\n VPC and subnets. For other Studio traffic, you can specify the AppNetworkAccessType
\n parameter. AppNetworkAccessType
corresponds to the network access type that you\n choose when you onboard to Studio. The following options are available:
\n PublicInternetOnly
- Non-EFS traffic goes through a VPC managed by\n Amazon SageMaker, which allows internet access. This is the default value.
\n VpcOnly
- All Studio traffic is through the specified VPC and subnets.\n Internet access is disabled by default. To allow internet access, you must specify a\n NAT gateway.
When internet access is disabled, you won't be able to run a Studio notebook or to\n train or host models unless your VPC has an interface endpoint to the SageMaker API and runtime\n or a NAT gateway and your security groups allow outbound connections.
\nNFS traffic over TCP on port 2049 needs to be allowed in both inbound and outbound rules\n in order to launch a SageMaker Studio app successfully.
\nFor more information, see\n Connect\n SageMaker Studio Notebooks to Resources in a VPC.
" } }, "com.amazonaws.sagemaker#CreateDomainRequest": { @@ -4794,7 +4794,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached to the domain with an Amazon Web Services managed\n customer master key (CMK) by default. For more control, specify a customer managed CMK.
" + "smithy.api#documentation": "SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached to the domain with an Amazon Web Services managed\n key by default. For more control, specify a customer managed key.
" } } } @@ -4878,7 +4878,7 @@ "ResourceKey": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The CMK to use when encrypting the EBS volume the edge packaging job runs on.
" + "smithy.api#documentation": "The Amazon Web Services KMS key to use when encrypting the EBS volume the edge packaging job runs on.
" } }, "Tags": { @@ -10645,7 +10645,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The Amazon Web Services KMS customer managed CMK used to encrypt\n the EFS volume attached to the domain.
" + "smithy.api#documentation": "The Amazon Web Services KMS customer managed key used to encrypt\n the EFS volume attached to the domain.
" } } } @@ -10729,7 +10729,7 @@ "ResourceKey": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The CMK to use when encrypting the EBS volume the job run on.
" + "smithy.api#documentation": "The Amazon Web Services KMS key to use when encrypting the EBS volume the job run on.
" } }, "EdgePackagingJobStatus": { @@ -14256,7 +14256,7 @@ "CreatedBy": { "target": "com.amazonaws.sagemaker#UserContext", "traits": { - "smithy.api#documentation": "Who created the component.
" + "smithy.api#documentation": "Who created the trial component.
" } }, "LastModifiedTime": { @@ -15996,7 +15996,10 @@ } }, "CreatedBy": { - "target": "com.amazonaws.sagemaker#UserContext" + "target": "com.amazonaws.sagemaker#UserContext", + "traits": { + "smithy.api#documentation": "Who created the experiment.
" + } }, "LastModifiedTime": { "target": "com.amazonaws.sagemaker#Timestamp", @@ -19184,7 +19187,7 @@ "LifecycleConfigArns": { "target": "com.amazonaws.sagemaker#LifecycleConfigArns", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the KernelGatewayApp.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.
" } } }, @@ -19566,7 +19569,7 @@ "VolumeKmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume\n attached to the ML compute instance(s) that run the training and inference jobs used for\n automated data labeling.
\nYou can only specify a VolumeKmsKeyId
when you create a labeling job with\n automated data labeling enabled using the API operation CreateLabelingJob
.\n You cannot specify an Amazon Web Services KMS customer managed CMK to encrypt the storage volume used for\n automated data labeling model training and inference when you create a labeling job\n using the console. To learn more, see Output Data and Storage Volume\n Encryption.
The VolumeKmsKeyId
can be any of the following formats:
KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt data on the storage volume\n attached to the ML compute instance(s) that run the training and inference jobs used for\n automated data labeling.
\nYou can only specify a VolumeKmsKeyId
when you create a labeling job with\n automated data labeling enabled using the API operation CreateLabelingJob
.\n You cannot specify an Amazon Web Services KMS key to encrypt the storage volume used for\n automated data labeling model training and inference when you create a labeling job\n using the console. To learn more, see Output Data and Storage Volume\n Encryption.
The VolumeKmsKeyId
can be any of the following formats:
KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
A string in the training job name. This filter returns only models in the training\n job whose name contains the specified string.
" + "smithy.api#documentation": "A string in the model name. This filter returns only models whose \n name contains the specified string.
" } }, "CreationTimeBefore": { @@ -28044,7 +28047,7 @@ "KmsKeyId": { "target": "com.amazonaws.sagemaker#KmsKeyId", "traits": { - "smithy.api#documentation": "The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using\n Amazon S3 server-side encryption. The KmsKeyId
can be any of the following\n formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// KMS Key Alias
\n\n \"alias/ExampleAlias\"
\n
// Amazon Resource Name (ARN) of a KMS Key Alias
\n\n \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
\n
If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must\n include permissions to call kms:Encrypt
. If you don't provide a KMS key ID,\n Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side\n encryption with KMS-managed keys for OutputDataConfig
. If you use a bucket\n policy with an s3:PutObject
permission that only allows objects with\n server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more\n information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n
The KMS key policy must grant permission to the IAM role that you specify in your\n CreateTrainingJob
, CreateTransformJob
, or\n CreateHyperParameterTuningJob
requests. For more information, see\n Using\n Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer\n Guide.
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the model artifacts at rest using\n Amazon S3 server-side encryption. The KmsKeyId
can be any of the following\n formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// KMS Key Alias
\n\n \"alias/ExampleAlias\"
\n
// Amazon Resource Name (ARN) of a KMS Key Alias
\n\n \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
\n
If you use a KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must\n include permissions to call kms:Encrypt
. If you don't provide a KMS key ID,\n Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side\n encryption with KMS-managed keys for OutputDataConfig
. If you use a bucket\n policy with an s3:PutObject
permission that only allows objects with\n server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more\n information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n
The KMS key policy must grant permission to the IAM role that you specify in your\n CreateTrainingJob
, CreateTransformJob
, or\n CreateHyperParameterTuningJob
requests. For more information, see\n Using\n Key Policies in Amazon Web Services KMS in the Amazon Web Services Key Management Service Developer\n Guide.
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the core dump data at rest using\n Amazon S3 server-side encryption. The KmsKeyId
can be any of the following\n formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// KMS Key Alias
\n\n \"alias/ExampleAlias\"
\n
// Amazon Resource Name (ARN) of a KMS Key Alias
\n\n \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
\n
If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must\n include permissions to call kms:Encrypt
. If you don't provide a KMS key ID,\n Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side\n encryption with KMS-managed keys for OutputDataConfig
. If you use a bucket\n policy with an s3:PutObject
permission that only allows objects with\n server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more\n information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n
The KMS key policy must grant permission to the IAM role that you specify in your\n CreateEndpoint
and UpdateEndpoint
requests. For more\n information, see Using Key Policies in Amazon Web Services\n KMS in the Amazon Web Services Key Management Service Developer Guide.
The Amazon Web Services Key Management Service (Amazon Web Services KMS) key that Amazon SageMaker uses to encrypt the core dump data at rest using\n Amazon S3 server-side encryption. The KmsKeyId
can be any of the following\n formats:
// KMS Key ID
\n\n \"1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// Amazon Resource Name (ARN) of a KMS Key
\n\n \"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab\"
\n
// KMS Key Alias
\n\n \"alias/ExampleAlias\"
\n
// Amazon Resource Name (ARN) of a KMS Key Alias
\n\n \"arn:aws:kms:us-west-2:111122223333:alias/ExampleAlias\"
\n
If you use a KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must\n include permissions to call kms:Encrypt
. If you don't provide a KMS key ID,\n Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side\n encryption with KMS-managed keys for OutputDataConfig
. If you use a bucket\n policy with an s3:PutObject
permission that only allows objects with\n server-side encryption, set the condition key of\n s3:x-amz-server-side-encryption
to \"aws:kms\"
. For more\n information, see KMS-Managed Encryption\n Keys in the Amazon Simple Storage Service Developer Guide.\n
The KMS key policy must grant permission to the IAM role that you specify in your\n CreateEndpoint
and UpdateEndpoint
requests. For more\n information, see Using Key Policies in Amazon Web Services\n KMS in the Amazon Web Services Key Management Service Developer Guide.
The Amazon Resource Name (ARN) of the project.
" + } + }, + "ProjectName": { + "target": "com.amazonaws.sagemaker#ProjectEntityName", + "traits": { + "smithy.api#documentation": "The name of the project.
" + } + }, + "ProjectId": { + "target": "com.amazonaws.sagemaker#ProjectId", + "traits": { + "smithy.api#documentation": "The ID of the project.
" + } + }, + "ProjectDescription": { + "target": "com.amazonaws.sagemaker#EntityDescription", + "traits": { + "smithy.api#documentation": "The description of the project.
" + } + }, + "ServiceCatalogProvisioningDetails": { + "target": "com.amazonaws.sagemaker#ServiceCatalogProvisioningDetails" + }, + "ServiceCatalogProvisionedProductDetails": { + "target": "com.amazonaws.sagemaker#ServiceCatalogProvisionedProductDetails" + }, + "ProjectStatus": { + "target": "com.amazonaws.sagemaker#ProjectStatus", + "traits": { + "smithy.api#documentation": "The status of the project.
" + } + }, + "CreatedBy": { + "target": "com.amazonaws.sagemaker#UserContext", + "traits": { + "smithy.api#documentation": "Who created the project.
" + } + }, + "CreationTime": { + "target": "com.amazonaws.sagemaker#Timestamp", + "traits": { + "smithy.api#documentation": "A timestamp specifying when the project was created.
" + } + }, + "Tags": { + "target": "com.amazonaws.sagemaker#TagList", + "traits": { + "smithy.api#documentation": "An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in\n different ways, for example, by purpose, owner, or environment. For more information,\n see Tagging Amazon Web Services\n Resources.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The properties of a project as returned by the Search API.
" + } + }, "com.amazonaws.sagemaker#ProjectArn": { "type": "string", "traits": { @@ -31249,7 +31314,7 @@ "LifecycleConfigArn": { "target": "com.amazonaws.sagemaker#StudioLifecycleConfigArn", "traits": { - "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the Resource.
" + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.
" } } }, @@ -31300,6 +31365,10 @@ { "value": "FeatureGroup", "name": "FEATURE_GROUP" + }, + { + "value": "Project", + "name": "PROJECT" } ] } @@ -31349,6 +31418,60 @@ ] } }, + "com.amazonaws.sagemaker#RetryPipelineExecution": { + "type": "operation", + "input": { + "target": "com.amazonaws.sagemaker#RetryPipelineExecutionRequest" + }, + "output": { + "target": "com.amazonaws.sagemaker#RetryPipelineExecutionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.sagemaker#ConflictException" + }, + { + "target": "com.amazonaws.sagemaker#ResourceLimitExceeded" + }, + { + "target": "com.amazonaws.sagemaker#ResourceNotFound" + } + ], + "traits": { + "smithy.api#documentation": "Retry the execution of the pipeline.
" + } + }, + "com.amazonaws.sagemaker#RetryPipelineExecutionRequest": { + "type": "structure", + "members": { + "PipelineExecutionArn": { + "target": "com.amazonaws.sagemaker#PipelineExecutionArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the pipeline execution.
", + "smithy.api#required": {} + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.sagemaker#IdempotencyToken", + "traits": { + "smithy.api#documentation": "A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n operation. An idempotent operation completes no more than once.
", + "smithy.api#idempotencyToken": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.sagemaker#RetryPipelineExecutionResponse": { + "type": "structure", + "members": { + "PipelineExecutionArn": { + "target": "com.amazonaws.sagemaker#PipelineExecutionArn", + "traits": { + "smithy.api#documentation": "The Amazon Resource Name (ARN) of the pipeline execution.
" + } + } + } + }, "com.amazonaws.sagemaker#RetryStrategy": { "type": "structure", "members": { @@ -32148,6 +32271,9 @@ { "target": "com.amazonaws.sagemaker#RenderUiTemplate" }, + { + "target": "com.amazonaws.sagemaker#RetryPipelineExecution" + }, { "target": "com.amazonaws.sagemaker#Search" }, @@ -32468,6 +32594,12 @@ }, "FeatureGroup": { "target": "com.amazonaws.sagemaker#FeatureGroup" + }, + "Project": { + "target": "com.amazonaws.sagemaker#Project", + "traits": { + "smithy.api#documentation": "The properties of a project.
" + } } }, "traits": { @@ -32874,7 +33006,7 @@ } }, "traits": { - "smithy.api#documentation": "Details that you specify to provision a service catalog product. For information about\n service catalog, see .What is Amazon Web Services Service\n Catalog.
" + "smithy.api#documentation": "Details that you specify to provision a service catalog product. For information about\n service catalog, see What is Amazon Web Services Service\n Catalog.
" } }, "com.amazonaws.sagemaker#SessionExpirationDurationInSeconds": { @@ -33329,7 +33461,7 @@ "ClientRequestToken": { "target": "com.amazonaws.sagemaker#IdempotencyToken", "traits": { - "smithy.api#documentation": "A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n operation. An idempotent operation completes no more than one time.
", + "smithy.api#documentation": "A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n operation. An idempotent operation completes no more than once.
", "smithy.api#idempotencyToken": {}, "smithy.api#required": {} } @@ -33603,7 +33735,7 @@ "ClientRequestToken": { "target": "com.amazonaws.sagemaker#IdempotencyToken", "traits": { - "smithy.api#documentation": "A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n operation. An idempotent operation completes no more than one time.
", + "smithy.api#documentation": "A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n operation. An idempotent operation completes no more than once.
", "smithy.api#idempotencyToken": {}, "smithy.api#required": {} } @@ -35916,7 +36048,10 @@ } }, "CreatedBy": { - "target": "com.amazonaws.sagemaker#UserContext" + "target": "com.amazonaws.sagemaker#UserContext", + "traits": { + "smithy.api#documentation": "Who created the trial.
" + } }, "LastModifiedTime": { "target": "com.amazonaws.sagemaker#Timestamp", @@ -36006,7 +36141,10 @@ } }, "CreatedBy": { - "target": "com.amazonaws.sagemaker#UserContext" + "target": "com.amazonaws.sagemaker#UserContext", + "traits": { + "smithy.api#documentation": "Who created the trial component.
" + } }, "LastModifiedTime": { "target": "com.amazonaws.sagemaker#Timestamp", @@ -36461,7 +36599,7 @@ "CreatedBy": { "target": "com.amazonaws.sagemaker#UserContext", "traits": { - "smithy.api#documentation": "Who created the component.
" + "smithy.api#documentation": "Who created the trial component.
" } }, "LastModifiedTime": { @@ -38083,7 +38221,7 @@ } }, "traits": { - "smithy.api#documentation": "Information about the user who created or modified an experiment, trial, or trial\n component.
" + "smithy.api#documentation": "Information about the user who created or modified an experiment, trial, trial\n component, or project.
" } }, "com.amazonaws.sagemaker#UserProfileArn": { diff --git a/codegen/sdk-codegen/aws-models/transcribe-streaming.2017-10-26.json b/codegen/sdk-codegen/aws-models/transcribe-streaming.2017-10-26.json index 7e229e655432..88adb442a70e 100644 --- a/codegen/sdk-codegen/aws-models/transcribe-streaming.2017-10-26.json +++ b/codegen/sdk-codegen/aws-models/transcribe-streaming.2017-10-26.json @@ -43,6 +43,12 @@ "traits": { "smithy.api#documentation": "One or more alternative interpretations of the input audio.
" } + }, + "Entities": { + "target": "com.amazonaws.transcribestreaming#EntityList", + "traits": { + "smithy.api#documentation": "Contains the entities identified as personally identifiable information (PII) in the transcription output.
" + } } }, "traits": { @@ -70,7 +76,7 @@ } }, "traits": { - "smithy.api#documentation": "Provides a wrapper for the audio chunks that you are sending.
\nFor information on audio encoding in Amazon Transcribe, see input. For information\n on audio encoding formats in Amazon Transcribe Medical, see input-med.
" + "smithy.api#documentation": "Provides a wrapper for the audio chunks that you are sending.
\nFor information on audio encoding in Amazon Transcribe, see \n Speech input. For information\n on audio encoding formats in Amazon Transcribe Medical, see \n Speech input.
" } }, "com.amazonaws.transcribestreaming#AudioStream": { @@ -79,7 +85,7 @@ "AudioEvent": { "target": "com.amazonaws.transcribestreaming#AudioEvent", "traits": { - "smithy.api#documentation": "A blob of audio from your application. You audio stream consists of one or more audio\n events.
\nFor information on audio encoding formats in Amazon Transcribe, see input. For\n information on audio encoding formats in Amazon Transcribe Medical, see input-med.
\nFor more information on stream encoding in Amazon Transcribe, see event-stream. For\n information on stream encoding in Amazon Transcribe Medical, see event-stream-med.
" + "smithy.api#documentation": "A blob of audio from your application. You audio stream consists of one or more audio\n events.
\nFor information on audio encoding formats in Amazon Transcribe, see Speech input. For\n information on audio encoding formats in Amazon Transcribe Medical, see Speech input.
\nFor more information on stream encoding in Amazon Transcribe, see Event stream encoding. For\n information on stream encoding in Amazon Transcribe Medical, see Event stream encoding.
" } } }, @@ -123,9 +129,81 @@ "smithy.api#httpError": 409 } }, + "com.amazonaws.transcribestreaming#ContentIdentificationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PII", + "name": "PII" + } + ] + } + }, + "com.amazonaws.transcribestreaming#ContentRedactionType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PII", + "name": "PII" + } + ] + } + }, "com.amazonaws.transcribestreaming#Double": { "type": "double" }, + "com.amazonaws.transcribestreaming#Entity": { + "type": "structure", + "members": { + "StartTime": { + "target": "com.amazonaws.transcribestreaming#Double", + "traits": { + "smithy.api#documentation": "The start time of speech that was identified as PII.
" + } + }, + "EndTime": { + "target": "com.amazonaws.transcribestreaming#Double", + "traits": { + "smithy.api#documentation": "The end time of speech that was identified as PII.
" + } + }, + "Category": { + "target": "com.amazonaws.transcribestreaming#String", + "traits": { + "smithy.api#documentation": "The category of of information identified in this entity; for example, PII.
" + } + }, + "Type": { + "target": "com.amazonaws.transcribestreaming#String", + "traits": { + "smithy.api#documentation": "The type of PII identified in this entity; for example, name or credit card number.
" + } + }, + "Content": { + "target": "com.amazonaws.transcribestreaming#String", + "traits": { + "smithy.api#documentation": "The words in the transcription output that have been identified as a PII entity.
" + } + }, + "Confidence": { + "target": "com.amazonaws.transcribestreaming#Confidence", + "traits": { + "smithy.api#documentation": "A value between zero and one that Amazon Transcribe assigns to PII identified in the source audio. Larger values indicate a higher confidence in PII identification.
" + } + } + }, + "traits": { + "smithy.api#documentation": "The entity identified as personally identifiable information (PII).
" + } + }, + "com.amazonaws.transcribestreaming#EntityList": { + "type": "list", + "member": { + "target": "com.amazonaws.transcribestreaming#Entity" + } + }, "com.amazonaws.transcribestreaming#InternalFailureException": { "type": "structure", "members": { @@ -586,6 +664,16 @@ ] } }, + "com.amazonaws.transcribestreaming#PiiEntityTypes": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 300 + }, + "smithy.api#pattern": "^[A-Z_, ]+$" + } + }, "com.amazonaws.transcribestreaming#RequestId": { "type": "string" }, @@ -659,7 +747,7 @@ "min": 36, "max": 36 }, - "smithy.api#pattern": "[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}" + "smithy.api#pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$" } }, "com.amazonaws.transcribestreaming#Specialty": { @@ -747,7 +835,7 @@ "MediaSampleRateHertz": { "target": "com.amazonaws.transcribestreaming#MediaSampleRateHertz", "traits": { - "smithy.api#documentation": "The sample rate of the input audio in Hertz. Sample rates of 16000 Hz or higher are\n accepted.
", + "smithy.api#documentation": "The sample rate of the input audio in Hertz.
", "smithy.api#httpHeader": "x-amzn-transcribe-sample-rate", "smithy.api#required": {} } @@ -847,7 +935,7 @@ "MediaSampleRateHertz": { "target": "com.amazonaws.transcribestreaming#MediaSampleRateHertz", "traits": { - "smithy.api#documentation": "The sample rate of the input audio in Hertz. Valid value: 16000 Hz.
", + "smithy.api#documentation": "The sample rate of the input audio in Hertz.
", "smithy.api#httpHeader": "x-amzn-transcribe-sample-rate" } }, @@ -949,7 +1037,7 @@ } ], "traits": { - "smithy.api#documentation": "Starts a bidirectional HTTP2 stream where audio is streamed to Amazon Transcribe and the transcription\n results are streamed to your application.
\nThe following are encoded as HTTP2 headers:
\nx-amzn-transcribe-language-code
\nx-amzn-transcribe-media-encoding
\nx-amzn-transcribe-sample-rate
\nx-amzn-transcribe-session-id
\nStarts a bidirectional HTTP/2 stream where audio is streamed to Amazon Transcribe and the transcription\n results are streamed to your application.
\nThe following are encoded as HTTP/2 headers:
\nx-amzn-transcribe-language-code
\nx-amzn-transcribe-media-encoding
\nx-amzn-transcribe-sample-rate
\nx-amzn-transcribe-session-id
\nSee the SDK for Go API Reference for more detail.
", "smithy.api#http": { "method": "POST", "uri": "/stream-transcription", @@ -971,7 +1059,7 @@ "MediaSampleRateHertz": { "target": "com.amazonaws.transcribestreaming#MediaSampleRateHertz", "traits": { - "smithy.api#documentation": "The sample rate, in Hertz, of the input audio. We suggest that you use 8000 Hz for low\n quality audio and 16000 Hz for high quality audio.
", + "smithy.api#documentation": "The sample rate, in Hertz, of the input audio. We suggest that you use 8,000 Hz for low\n quality audio and 16,000 Hz for high quality audio.
", "smithy.api#httpHeader": "x-amzn-transcribe-sample-rate", "smithy.api#required": {} } @@ -1001,7 +1089,7 @@ "AudioStream": { "target": "com.amazonaws.transcribestreaming#AudioStream", "traits": { - "smithy.api#documentation": "PCM-encoded stream of audio blobs. The audio stream is encoded as an HTTP2 data\n frame.
", + "smithy.api#documentation": "PCM-encoded stream of audio blobs. The audio stream is encoded as an HTTP/2 data\n frame.
", "smithy.api#httpPayload": {}, "smithy.api#required": {} } @@ -1009,14 +1097,14 @@ "VocabularyFilterName": { "target": "com.amazonaws.transcribestreaming#VocabularyFilterName", "traits": { - "smithy.api#documentation": "The name of the vocabulary filter you've created that is unique to your AWS account.\n Provide the name in this field to successfully use it in a stream.
", + "smithy.api#documentation": "The name of the vocabulary filter you've created that is unique to your account.\n Provide the name in this field to successfully use it in a stream.
", "smithy.api#httpHeader": "x-amzn-transcribe-vocabulary-filter-name" } }, "VocabularyFilterMethod": { "target": "com.amazonaws.transcribestreaming#VocabularyFilterMethod", "traits": { - "smithy.api#documentation": "The manner in which you use your vocabulary filter to filter words in your transcript.\n Remove
removes filtered words from your transcription results.\n Mask
masks those words with a ***
in your transcription results.\n Tag
keeps the filtered words in your transcription results and tags them. The\n tag appears as VocabularyFilterMatch
equal to True
\n
The manner in which you use your vocabulary filter to filter words in your transcript.\n Remove
removes filtered words from your transcription results.\n Mask
masks filtered words with a ***
in your transcription results.\n Tag
keeps the filtered words in your transcription results and tags them. The\n tag appears as VocabularyFilterMatch
equal to True
\n
You can use this field to set the stability level of the transcription results. A higher\n stability level means that the transcription results are less likely to change. Higher\n stability levels can come with lower overall transcription accuracy.
", "smithy.api#httpHeader": "x-amzn-transcribe-partial-results-stability" } + }, + "ContentIdentificationType": { + "target": "com.amazonaws.transcribestreaming#ContentIdentificationType", + "traits": { + "smithy.api#documentation": "Set this field to PII to identify personally identifiable information (PII) in the transcription output. Content identification is performed only upon complete transcription of the audio segments.
\nYou can’t set both ContentIdentificationType
and ContentRedactionType
in the same request. If you set both, your request returns a BadRequestException
.
Set this field to PII to redact personally identifiable information (PII) in the transcription output. Content redaction is performed only upon complete transcription of the audio segments.
\nYou can’t set both ContentRedactionType
and ContentIdentificationType
in the same request. If you set both, your request returns a BadRequestException
.
List the PII entity types you want to identify or redact. In order to specify entity types, you must have \n either ContentIdentificationType
or ContentRedactionType
enabled.
\n PIIEntityTypes
must be comma-separated; the available values are:\n BANK_ACCOUNT_NUMBER
, BANK_ROUTING
,\n CREDIT_DEBIT_NUMBER
, CREDIT_DEBIT_CVV
, \n CREDIT_DEBIT_EXPIRY
, PIN
, EMAIL
, \n ADDRESS
, NAME
, PHONE
, \n SSN
, and ALL
.
\n PiiEntityTypes
is an optional parameter with a default value of ALL
.
The sample rate for the input audio stream. Use 8000 Hz for low quality audio and 16000 Hz\n for high quality audio.
", + "smithy.api#documentation": "The sample rate for the input audio stream. Use 8,000 Hz for low quality audio and 16,000 Hz\n for high quality audio.
", "smithy.api#httpHeader": "x-amzn-transcribe-sample-rate" } }, @@ -1157,6 +1266,27 @@ "smithy.api#documentation": "If partial results stabilization has been enabled in the stream, shows the stability\n level.
", "smithy.api#httpHeader": "x-amzn-transcribe-partial-results-stability" } + }, + "ContentIdentificationType": { + "target": "com.amazonaws.transcribestreaming#ContentIdentificationType", + "traits": { + "smithy.api#documentation": "Shows whether content identification was enabled in this stream.
", + "smithy.api#httpHeader": "x-amzn-transcribe-content-identification-type" + } + }, + "ContentRedactionType": { + "target": "com.amazonaws.transcribestreaming#ContentRedactionType", + "traits": { + "smithy.api#documentation": "Shows whether content redaction was enabled in this stream.
", + "smithy.api#httpHeader": "x-amzn-transcribe-content-redaction-type" + } + }, + "PiiEntityTypes": { + "target": "com.amazonaws.transcribestreaming#PiiEntityTypes", + "traits": { + "smithy.api#documentation": "Lists the PII entity types you specified in your request.
", + "smithy.api#httpHeader": "x-amzn-transcribe-pii-entity-types" + } } } }, @@ -1224,7 +1354,7 @@ "TranscriptEvent": { "target": "com.amazonaws.transcribestreaming#TranscriptEvent", "traits": { - "smithy.api#documentation": "A portion of the transcription of the audio stream. Events are sent periodically from\n Amazon Transcribe to your application. The event can be a partial transcription of a section of the audio\n stream, or it can be the entire transcription of that portion of the audio stream.\n
" + "smithy.api#documentation": "A portion of the transcription of the audio stream. Events are sent periodically from\n Amazon Transcribe to your application. The event can be a partial transcription of a section of the audio\n stream, or it can be the entire transcription of that portion of the audio stream.
" } }, "BadRequestException": { @@ -1304,7 +1434,7 @@ "min": 1, "max": 200 }, - "smithy.api#pattern": "^[0-9a-zA-Z._-]+" + "smithy.api#pattern": "^[0-9a-zA-Z._-]+$" } }, "com.amazonaws.transcribestreaming#VocabularyName": { @@ -1314,7 +1444,7 @@ "min": 1, "max": 200 }, - "smithy.api#pattern": "^[0-9a-zA-Z._-]+" + "smithy.api#pattern": "^[0-9a-zA-Z._-]+$" } } } diff --git a/codegen/sdk-codegen/aws-models/transcribe.2017-10-26.json b/codegen/sdk-codegen/aws-models/transcribe.2017-10-26.json index 72f39f0c92ab..d4e7628d5857 100644 --- a/codegen/sdk-codegen/aws-models/transcribe.2017-10-26.json +++ b/codegen/sdk-codegen/aws-models/transcribe.2017-10-26.json @@ -4045,6 +4045,12 @@ "smithy.api#documentation": "An object containing a list of languages that might be present in your collection of audio files. Automatic language\n identification chooses a language that best matches the source audio from that list.
\nTo transcribe speech in Modern Standard Arabic (ar-SA), your audio or video file must be encoded at a sample \n rate of 16,000 Hz or higher.
" } }, + "Subtitles": { + "target": "com.amazonaws.transcribe#Subtitles", + "traits": { + "smithy.api#documentation": "Add subtitles to your batch transcription job.
" + } + }, "Tags": { "target": "com.amazonaws.transcribe#TagList", "traits": { @@ -4078,6 +4084,67 @@ } } }, + "com.amazonaws.transcribe#SubtitleFileUris": { + "type": "list", + "member": { + "target": "com.amazonaws.transcribe#Uri" + } + }, + "com.amazonaws.transcribe#SubtitleFormat": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "vtt", + "name": "VTT" + }, + { + "value": "srt", + "name": "SRT" + } + ] + } + }, + "com.amazonaws.transcribe#SubtitleFormats": { + "type": "list", + "member": { + "target": "com.amazonaws.transcribe#SubtitleFormat" + } + }, + "com.amazonaws.transcribe#Subtitles": { + "type": "structure", + "members": { + "Formats": { + "target": "com.amazonaws.transcribe#SubtitleFormats", + "traits": { + "smithy.api#documentation": "Specify the output format for your subtitle file.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Generate subtitles for your batch transcription job.
" + } + }, + "com.amazonaws.transcribe#SubtitlesOutput": { + "type": "structure", + "members": { + "Formats": { + "target": "com.amazonaws.transcribe#SubtitleFormats", + "traits": { + "smithy.api#documentation": "Specify the output format for your subtitle file; if you select both SRT and VTT formats, two output files are genereated.
" + } + }, + "SubtitleFileUris": { + "target": "com.amazonaws.transcribe#SubtitleFileUris", + "traits": { + "smithy.api#documentation": "Choose the output location for your subtitle file. This location must be an S3 bucket.
" + } + } + }, + "traits": { + "smithy.api#documentation": "Specify the output format for your subtitle file.
" + } + }, "com.amazonaws.transcribe#Tag": { "type": "structure", "members": { @@ -4551,6 +4618,12 @@ "traits": { "smithy.api#documentation": "A key:value pair assigned to a given transcription job.
" } + }, + "Subtitles": { + "target": "com.amazonaws.transcribe#SubtitlesOutput", + "traits": { + "smithy.api#documentation": "Generate subtitles for your batch transcription job.
" + } } }, "traits": { diff --git a/codegen/sdk-codegen/aws-models/wafv2.2019-07-29.json b/codegen/sdk-codegen/aws-models/wafv2.2019-07-29.json index b47df468416c..a445c0e7106a 100644 --- a/codegen/sdk-codegen/aws-models/wafv2.2019-07-29.json +++ b/codegen/sdk-codegen/aws-models/wafv2.2019-07-29.json @@ -229,14 +229,14 @@ "type": "structure", "members": {}, "traits": { - "smithy.api#documentation": "Inspect all of the elements that WAF has parsed and extracted from the web request\n JSON body that are within the JsonBody\n MatchScope
. This is used with the FieldToMatch option\n JsonBody
.\n \n
This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
" + "smithy.api#documentation": "Inspect all of the elements that WAF has parsed and extracted from the web request\n JSON body that are within the JsonBody\n MatchScope
. This is used with the FieldToMatch option\n JsonBody
.\n \n
This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
\nJSON specification: \"All\": {}
\n
All query arguments of a web request.
\nThis is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
" + "smithy.api#documentation": "All query arguments of a web request.
\nThis is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
\nJSON specification: \"AllQueryArguments\": {}
\n
The body of a web request. This immediately follows the request headers.
\nThis is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
" + "smithy.api#documentation": "The body of a web request. This immediately follows the request headers.
\nThis is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
\nJSON specification: \"Body\": {}
\n
The part of a web request that you want WAF to inspect. Include the single\n FieldToMatch
type that you want to inspect, with additional specifications\n as needed, according to the type. You specify a single request component in\n FieldToMatch
for each rule statement that requires it. To inspect more than\n one component of a web request, create a separate rule statement for each component.
The part of a web request that you want WAF to inspect. Include the single FieldToMatch
type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch
for each rule statement that requires it. To inspect more than one component of a web request, create a separate rule statement for each component.
JSON specification for a QueryString
field to match:
\n \"FieldToMatch\": { \"QueryString\": {} }
\n
Example JSON for a Method
field to match specification:
\n \"FieldToMatch\": { \"Method\": { \"Name\": \"DELETE\" } }
\n
A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
\nYou cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. It can only be referenced as a top-level statement within a rule.
A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
\nYou cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. You \n can only use a rule group reference statement at the top level inside a web ACL.
Retrieves the specified managed rule set.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Retrieves the specified managed rule set.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Retrieves the keys that are currently blocked by a rate-based rule. The maximum number\n of managed keys that can be blocked for a single rate-based rule is 10,000. If more than\n 10,000 addresses exceed the rate limit, those with the highest rates are blocked.
" + "smithy.api#documentation": "Retrieves the keys that are currently blocked by a rate-based rule instance. The maximum number of managed keys that can be blocked for a single rate-based rule instance is 10,000. If more than 10,000 addresses exceed the rate limit, those with the highest rates are blocked.
\nFor a rate-based rule that you've defined inside a rule group, provide the name of the rule group reference statement in your request, in addition to the rate-based rule name and the web ACL name.
\nWAF monitors web requests and manages keys independently for each unique combination of web ACL, optional rule group, and rate-based rule. For example, if you define a rate-based rule inside a rule group, and then use the rule group in a web ACL, WAF monitors web requests and manages keys for that web ACL, rule group reference statement, and rate-based rule instance. If you use the same rule group in a second web ACL, WAF monitors web requests and manages keys for this second usage completely independent of your first.
" } }, "com.amazonaws.wafv2#GetRateBasedStatementManagedKeysRequest": { @@ -3368,10 +3368,16 @@ "smithy.api#required": {} } }, + "RuleGroupRuleName": { + "target": "com.amazonaws.wafv2#EntityName", + "traits": { + "smithy.api#documentation": "The name of the rule group reference statement in your web ACL. This is required only when you have the rate-based rule nested \ninside a rule group.
" + } + }, "RuleName": { "target": "com.amazonaws.wafv2#EntityName", "traits": { - "smithy.api#documentation": "The name of the rate-based rule to get the keys for.
", + "smithy.api#documentation": "The name of the rate-based rule to get the keys for. If you have the rule defined inside a rule group that you're using in your web ACL, also provide the name of the rule group reference statement in the request parameter RuleGroupRuleName
.
The body of a web request, inspected as JSON. The body immediately follows the request\n headers. This is used in the FieldToMatch specification.
\nUse the specifications in this object to indicate which parts of the JSON body to\n inspect using the rule's inspection criteria. WAF inspects only the parts of the JSON\n that result from the matches that you\n indicate.\n
" + "smithy.api#documentation": "The body of a web request, inspected as JSON. The body immediately follows the request\n headers. This is used in the FieldToMatch specification.
\nUse the specifications in this object to indicate which parts of the JSON body to\n inspect using the rule's inspection criteria. WAF inspects only the parts of the JSON\n that result from the matches that you\n indicate.\n
\nExample JSON: \"JsonBody\": { \"MatchPattern\": { \"All\": {} }, \"MatchScope\": \"ALL\" }
\n
Retrieves an array of managed rule groups that are available for you to use. This list\n includes all Amazon Web Services Managed Rules rule groups and all of the Marketplace managed rule groups that you're\n subscribed to.
" + "smithy.api#documentation": "Retrieves an array of managed rule groups that are available for you to use. This list\n includes all Amazon Web Services Managed Rules rule groups and all of the Amazon Web Services Marketplace managed rule groups that you're\n subscribed to.
" } }, "com.amazonaws.wafv2#ListAvailableManagedRuleGroupsRequest": { @@ -4491,7 +4497,7 @@ } ], "traits": { - "smithy.api#documentation": "Retrieves the managed rule sets that you own.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Retrieves the managed rule sets that you own.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
The parts of the request that you want to keep out of the logs. For example, if you\n redact the HEADER
field, the HEADER
field in the firehose will be\n xxx
.
You must use one of the following values: URI
,\n QUERY_STRING
, HEADER
, or METHOD
.
The parts of the request that you want to keep out of the logs. For\n example, if you redact the SingleHeader
field, the HEADER
field in the firehose will be xxx
.
You can specify only the following fields for redaction: UriPath
, QueryString
, SingleHeader
, Method
, and JsonBody
.
The description of the managed rule group, provided by Amazon Web Services Managed Rules or the Marketplace seller who manages it.
" + "smithy.api#documentation": "The description of the managed rule group, provided by Amazon Web Services Managed Rules or the Amazon Web Services Marketplace seller who manages it.
" } } }, "traits": { - "smithy.api#documentation": "High-level information about a managed rule group, returned by ListAvailableManagedRuleGroups. This provides information like the name and vendor name, that you provide when you add a ManagedRuleGroupStatement to a web ACL. Managed rule groups include Amazon Web Services Managed Rules rule groups, which are free of charge to WAF customers, and Marketplace managed rule groups, which you can subscribe to through Marketplace.
" + "smithy.api#documentation": "High-level information about a managed rule group, returned by ListAvailableManagedRuleGroups. This provides information like the name and vendor name, that you provide when you add a ManagedRuleGroupStatement to a web ACL. Managed rule groups include Amazon Web Services Managed Rules rule groups, which are free of charge to WAF customers, and Amazon Web Services Marketplace managed rule groups, which you can subscribe to through Amazon Web Services Marketplace.
" } }, "com.amazonaws.wafv2#ManagedRuleGroupVersion": { @@ -5102,7 +5108,7 @@ } }, "traits": { - "smithy.api#documentation": "A set of rules that is managed by Amazon Web Services and Marketplace sellers to provide versioned managed\n rule groups for customers of WAF.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
A set of rules that is managed by Amazon Web Services and Amazon Web Services Marketplace sellers to provide versioned managed\n rule groups for customers of WAF.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
High-level information for a managed rule set.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
High-level information for a managed rule set.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Information for a single version of a managed rule set.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Information for a single version of a managed rule set.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
The HTTP method of a web request. The method indicates the type of operation that the\n request is asking the origin to perform.
\nThis is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
" + "smithy.api#documentation": "The HTTP method of a web request. The method indicates the type of operation that the request is asking the origin to perform.
\nThis is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
\nJSON specification: \"Method\": {}
\n
Specifies that WAF should do nothing. This is generally used to try out a rule\n without performing any actions. You set the OverrideAction
on the Rule.
This is used in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.
" + "smithy.api#documentation": "Specifies that WAF should do nothing. This is generally used to try out a rule\n without performing any actions. You set the OverrideAction
on the Rule.
This is used in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.
\nJSON specification: \"None\": {}
\n
Defines the versions of your managed rule set that you are offering to the customers.\n Customers see your offerings as managed rule groups with versioning.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Customers retrieve their managed rule group list by calling ListAvailableManagedRuleGroups. The name that you provide here for your\n managed rule set is the name the customer sees for the corresponding managed rule group.\n Customers can retrieve the available versions for a managed rule group by calling ListAvailableManagedRuleGroupVersions. You provide a rule group\n specification for each version. For each managed rule set, you must specify a version that\n you recommend using.
\nTo initiate the expiration of a managed rule group version, use UpdateManagedRuleSetVersionExpiryDate.
" + "smithy.api#documentation": "Defines the versions of your managed rule set that you are offering to the customers.\n Customers see your offerings as managed rule groups with versioning.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Customers retrieve their managed rule group list by calling ListAvailableManagedRuleGroups. The name that you provide here for your\n managed rule set is the name the customer sees for the corresponding managed rule group.\n Customers can retrieve the available versions for a managed rule group by calling ListAvailableManagedRuleGroupVersions. You provide a rule group\n specification for each version. For each managed rule set, you must specify a version that\n you recommend using.
\nTo initiate the expiration of a managed rule group version, use UpdateManagedRuleSetVersionExpiryDate.
" } }, "com.amazonaws.wafv2#PutManagedRuleSetVersionsRequest": { @@ -5765,7 +5783,7 @@ "type": "structure", "members": {}, "traits": { - "smithy.api#documentation": "The query string of a web request. This is the part of a URL that appears after a\n ?
character, if any.
This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
" + "smithy.api#documentation": "The query string of a web request. This is the part of a URL that appears after a ?
character, if any.
This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
\nJSON specification: \"QueryString\": {}
\n
A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.
\nWhen the rule action triggers, WAF blocks additional requests from the IP address until the request rate falls below the limit.
\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:
\nAn IP match statement with an IP set that specified the address 192.0.2.44.
\nA string match statement that searches in the User-Agent header for the string BadBot.
\nIn this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.
\nYou cannot nest a RateBasedStatement
, for example for use inside a NotStatement
or OrStatement
. It can only be referenced as a top-level statement within a rule.
A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.
\nWAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by WAF. If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by WAF.
\nWhen the rule action triggers, WAF blocks additional requests from the IP address until the request rate falls below the limit.
\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:
\nAn IP match statement with an IP set that specified the address 192.0.2.44.
\nA string match statement that searches in the User-Agent header for the string BadBot.
\nIn this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.
\nYou cannot nest a RateBasedStatement
inside another statement, for example inside a NotStatement
or OrStatement
. You can define a RateBasedStatement
inside a web ACL and inside a rule group.
The set of IP addresses that are currently blocked for a rate-based statement.
" + "smithy.api#documentation": "The set of IP addresses that are currently blocked for a RateBasedStatement.
" } }, "com.amazonaws.wafv2#RateLimit": { @@ -6250,7 +6268,7 @@ } }, "traits": { - "smithy.api#documentation": "A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
\nYou cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. It can only be referenced as a top-level statement within a rule.
A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
\nYou cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. You \n can only use a rule group reference statement at the top level inside a web ACL.
One of the headers in a web request, identified by name, for example,\n User-Agent
or Referer
. This setting isn't case\n sensitive.
This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
" + "smithy.api#documentation": "One of the headers in a web request, identified by name, for example,\n User-Agent
or Referer
. This setting isn't case\n sensitive.
This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
\nExample JSON: \"SingleHeader\": { \"Name\": \"haystack\" }
\n
One query argument in a web request, identified by name, for example\n UserName or SalesRegion. The name can be up to\n 30 characters long and isn't case sensitive.
" + "smithy.api#documentation": "One query argument in a web request, identified by name, for example\n UserName or SalesRegion. The name can be up to\n 30 characters long and isn't case sensitive.
\nExample JSON: \"SingleQueryArgument\": { \"Name\": \"myArgument\" }
\n
A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
\nYou cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. It can only be referenced as a top-level statement within a rule.
A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
\nYou cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. You \n can only use a rule group reference statement at the top level inside a web ACL.
A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.
\nWhen the rule action triggers, WAF blocks additional requests from the IP address until the request rate falls below the limit.
\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:
\nAn IP match statement with an IP set that specified the address 192.0.2.44.
\nA string match statement that searches in the User-Agent header for the string BadBot.
\nIn this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.
\nYou cannot nest a RateBasedStatement
, for example for use inside a NotStatement
or OrStatement
. It can only be referenced as a top-level statement within a rule.
A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.
\nWAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by WAF. If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by WAF.
\nWhen the rule action triggers, WAF blocks additional requests from the IP address until the request rate falls below the limit.
\nYou can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:
\nAn IP match statement with an IP set that specified the address 192.0.2.44.
\nA string match statement that searches in the User-Agent header for the string BadBot.
\nIn this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.
\nYou cannot nest a RateBasedStatement
inside another statement, for example inside a NotStatement
or OrStatement
. You can define a RateBasedStatement
inside a web ACL and inside a rule group.
The processing guidance for a Rule, used by WAF to determine\n whether a web request matches the rule.
" + "smithy.api#documentation": "The processing guidance for a Rule, used by WAF to determine whether a web request matches the rule.
" } }, "com.amazonaws.wafv2#Statements": { @@ -7106,7 +7124,7 @@ } ], "traits": { - "smithy.api#documentation": "Updates the expiration information for your managed rule set. Use this to initiate the\n expiration of a managed rule group version. After you initiate expiration for a version,\n WAF excludes it from the reponse to ListAvailableManagedRuleGroupVersions for the managed rule group.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Updates the expiration information for your managed rule set. Use this to initiate the\n expiration of a managed rule group version. After you initiate expiration for a version,\n WAF excludes it from the reponse to ListAvailableManagedRuleGroupVersions for the managed rule group.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
The path component of the URI of a web request. This is the part of a web request that identifies a resource. For example, /images/daily-ad.jpg
.
This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
" + "smithy.api#documentation": "The path component of the URI of a web request. This is the part of a web request that identifies a resource. For example, /images/daily-ad.jpg
.
This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
\nJSON specification: \"UriPath\": {}
\n
A version of the named managed rule group, that the rule group's vendor publishes for\n use by customers.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
A version of the named managed rule group, that the rule group's vendor publishes for\n use by customers.
\nThis is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
\nVendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Start transcription for the specified meetingId
.
Starts transcription for the specified meetingId
.
Start transcription for the specified meetingId
.
Starts transcription for the specified meetingId
.
The ID of the SIP media application.
*/ SipMediaApplicationId: string | undefined; + + /** + *The SIP headers added to an outbound call leg.
+ */ + SipHeaders?: { [key: string]: string }; } export namespace CreateSipMediaApplicationCallRequest { @@ -3920,6 +3925,7 @@ export namespace CreateSipMediaApplicationCallRequest { ...obj, ...(obj.FromPhoneNumber && { FromPhoneNumber: SENSITIVE_STRING }), ...(obj.ToPhoneNumber && { ToPhoneNumber: SENSITIVE_STRING }), + ...(obj.SipHeaders && { SipHeaders: SENSITIVE_STRING }), }); } diff --git a/clients/client-chime/models/models_1.ts b/clients/client-chime/models/models_1.ts index 08dab1bd3beb..7b4c5636c243 100644 --- a/clients/client-chime/models/models_1.ts +++ b/clients/client-chime/models/models_1.ts @@ -255,6 +255,9 @@ export enum OriginationRouteProtocol { *Origination routes define call distribution properties for your SIP hosts to receive inbound * calls using your Amazon Chime Voice Connector. Limit: Ten origination routes for each * Amazon Chime Voice Connector.
+ *The parameters listed below are not required, but you must use at least one.
+ *Origination settings enable your SIP hosts to receive inbound calls using your Amazon Chime * Voice Connector.
+ *The parameters listed below are not required, but you must use at least one.
+ *The call distribution properties defined for your SIP hosts. Valid range: Minimum value of 1. - * Maximum value of 20.
+ * Maximum value of 20. This parameter is not required, but you must specify this parameter orDisabled
.
*/
Routes?: OriginationRoute[];
/**
* When origination settings are disabled, inbound calls are not enabled for your Amazon Chime - * Voice Connector.
+ * Voice Connector. This parameter is not required, but you must specify this parameter orRoutes
.
*/
Disabled?: boolean;
}
@@ -3162,7 +3168,7 @@ export interface TranscriptionConfiguration {
EngineTranscribeSettings?: EngineTranscribeSettings;
/**
- * The transcription configuration settings passed to Amazon Transcribe.
+ *The transcription configuration settings passed to Amazon Transcribe Medical.
*/ EngineTranscribeMedicalSettings?: EngineTranscribeMedicalSettings; } diff --git a/clients/client-chime/protocols/Aws_restJson1.ts b/clients/client-chime/protocols/Aws_restJson1.ts index ddbaff6f4cf2..23a5c3d145b4 100644 --- a/clients/client-chime/protocols/Aws_restJson1.ts +++ b/clients/client-chime/protocols/Aws_restJson1.ts @@ -1900,6 +1900,8 @@ export const serializeAws_restJson1CreateSipMediaApplicationCallCommand = async body = JSON.stringify({ ...(input.FromPhoneNumber !== undefined && input.FromPhoneNumber !== null && { FromPhoneNumber: input.FromPhoneNumber }), + ...(input.SipHeaders !== undefined && + input.SipHeaders !== null && { SipHeaders: serializeAws_restJson1SipHeadersMap(input.SipHeaders, context) }), ...(input.ToPhoneNumber !== undefined && input.ToPhoneNumber !== null && { ToPhoneNumber: input.ToPhoneNumber }), }); return new __HttpRequest({ @@ -11068,6 +11070,14 @@ const deserializeAws_restJson1CreateSipMediaApplicationCallCommandError = async let errorCode: string = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.chime#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "BadRequestException": case "com.amazonaws.chime#BadRequestException": response = { @@ -28028,6 +28038,18 @@ const serializeAws_restJson1SigninDelegateGroupList = (input: SigninDelegateGrou }); }; +const serializeAws_restJson1SipHeadersMap = (input: { [key: string]: string }, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + const serializeAws_restJson1SipMediaApplicationEndpoint = ( input: SipMediaApplicationEndpoint, context: __SerdeContext diff --git a/clients/client-comprehend/models/models_0.ts b/clients/client-comprehend/models/models_0.ts index 09034177e73e..e221c4a5fce5 100644 --- a/clients/client-comprehend/models/models_0.ts +++ b/clients/client-comprehend/models/models_0.ts @@ -1,6 +1,11 @@ import { SENSITIVE_STRING } from "@aws-sdk/smithy-client"; import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; +export enum AugmentedManifestsDocumentTypeFormat { + PLAIN_TEXT_DOCUMENT = "PLAIN_TEXT_DOCUMENT", + SEMI_STRUCTURED_DOCUMENT = "SEMI_STRUCTURED_DOCUMENT", +} + /** *An augmented manifest file that provides training data for your custom model. An augmented * manifest file is a labeled dataset that is produced by Amazon SageMaker Ground Truth.
@@ -22,6 +27,32 @@ export interface AugmentedManifestsListItem { * an individual job. */ AttributeNames: string[] | undefined; + + /** + *The S3 prefix to the annotation files that are referred in the augmented manifest file.
+ */ + AnnotationDataS3Uri?: string; + + /** + *The S3 prefix to the source files (PDFs) that are referred to in the augmented manifest file.
+ */ + SourceDocumentsS3Uri?: string; + + /** + *The type of augmented manifest. PlainTextDocument or SemiStructuredDocument. If you don't specify, the default is PlainTextDocument.
+ *
+ * PLAIN_TEXT_DOCUMENT
A document type that represents any unicode text that is encoded in UTF-8.
+ * SEMI_STRUCTURED_DOCUMENT
A document type with positional and structural context, like a PDF. For training with Amazon Comprehend, only PDFs are supported.
+ * For inference, Amazon Comprehend support PDFs, DOCX and TXT.
The input properties for a topic detection job.
+ */ +export interface DocumentReaderConfig { + /** + *This enum field will start with two values which will apply to PDFs:
+ *
+ * TEXTRACT_DETECT_DOCUMENT_TEXT
- The service calls DetectDocumentText for PDF documents per page.
+ * TEXTRACT_ANALYZE_DOCUMENT
- The service calls AnalyzeDocument for PDF documents per page.
This enum field provides two values:
+ *
+ * SERVICE_DEFAULT
- use service defaults for Document reading. For Digital PDF it would mean using an internal parser instead of Textract APIs
+ * FORCE_DOCUMENT_READ_ACTION
- Always use specified action for DocumentReadAction, including Digital PDF.
+ *
Specifies how the text in an input file should be processed:
+ */ + FeatureTypes?: (DocumentReadFeatureTypes | string)[]; +} + +export namespace DocumentReaderConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DocumentReaderConfig): any => ({ + ...obj, + }); +} + export enum InputFormat { ONE_DOC_PER_FILE = "ONE_DOC_PER_FILE", ONE_DOC_PER_LINE = "ONE_DOC_PER_LINE", } /** - *The input properties for a topic detection job.
+ *The input properties for an inference job.
*/ export interface InputDataConfig { /** @@ -2073,6 +2169,13 @@ export interface InputDataConfig { * */ InputFormat?: InputFormat | string; + + /** + *The document reader config field applies only for InputDataConfig of StartEntitiesDetectionJob.
+ *Use DocumentReaderConfig to provide specifications about how you want your inference documents read. + * Currently it applies for PDF documents in StartEntitiesDetectionJob custom inference.
+ */ + DocumentReaderConfig?: DocumentReaderConfig; } export namespace InputDataConfig { diff --git a/clients/client-comprehend/protocols/Aws_json1_1.ts b/clients/client-comprehend/protocols/Aws_json1_1.ts index bae2e009a581..d4da350c37f3 100644 --- a/clients/client-comprehend/protocols/Aws_json1_1.ts +++ b/clients/client-comprehend/protocols/Aws_json1_1.ts @@ -280,6 +280,8 @@ import { DocumentClassifierOutputDataConfig, DocumentClassifierProperties, DocumentLabel, + DocumentReadFeatureTypes, + DocumentReaderConfig, DominantLanguage, DominantLanguageDetectionJobFilter, DominantLanguageDetectionJobProperties, @@ -6466,11 +6468,16 @@ const serializeAws_json1_1AugmentedManifestsListItem = ( context: __SerdeContext ): any => { return { + ...(input.AnnotationDataS3Uri !== undefined && + input.AnnotationDataS3Uri !== null && { AnnotationDataS3Uri: input.AnnotationDataS3Uri }), ...(input.AttributeNames !== undefined && input.AttributeNames !== null && { AttributeNames: serializeAws_json1_1AttributeNamesList(input.AttributeNames, context), }), + ...(input.DocumentType !== undefined && input.DocumentType !== null && { DocumentType: input.DocumentType }), ...(input.S3Uri !== undefined && input.S3Uri !== null && { S3Uri: input.S3Uri }), + ...(input.SourceDocumentsS3Uri !== undefined && + input.SourceDocumentsS3Uri !== null && { SourceDocumentsS3Uri: input.SourceDocumentsS3Uri }), }; }; @@ -6864,6 +6871,19 @@ const serializeAws_json1_1DocumentClassifierOutputDataConfig = ( }; }; +const serializeAws_json1_1DocumentReaderConfig = (input: DocumentReaderConfig, context: __SerdeContext): any => { + return { + ...(input.DocumentReadAction !== undefined && + input.DocumentReadAction !== null && { DocumentReadAction: input.DocumentReadAction }), + ...(input.DocumentReadMode !== undefined && + input.DocumentReadMode !== null && { DocumentReadMode: input.DocumentReadMode }), + ...(input.FeatureTypes !== undefined && + input.FeatureTypes !== null && { + FeatureTypes: serializeAws_json1_1ListOfDocumentReadFeatureTypes(input.FeatureTypes, context), + }), + }; +}; + const serializeAws_json1_1DominantLanguageDetectionJobFilter = ( input: DominantLanguageDetectionJobFilter, context: __SerdeContext @@ -7019,6 +7039,10 @@ const serializeAws_json1_1EventsDetectionJobFilter = ( const serializeAws_json1_1InputDataConfig = (input: InputDataConfig, context: __SerdeContext): any => { return { + ...(input.DocumentReaderConfig !== undefined && + input.DocumentReaderConfig !== null && { + DocumentReaderConfig: serializeAws_json1_1DocumentReaderConfig(input.DocumentReaderConfig, context), + }), ...(input.InputFormat !== undefined && input.InputFormat !== null && { InputFormat: input.InputFormat }), ...(input.S3Uri !== undefined && input.S3Uri !== null && { S3Uri: input.S3Uri }), }; @@ -7133,6 +7157,20 @@ const serializeAws_json1_1ListKeyPhrasesDetectionJobsRequest = ( }; }; +const serializeAws_json1_1ListOfDocumentReadFeatureTypes = ( + input: (DocumentReadFeatureTypes | string)[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_json1_1ListOfPiiEntityTypes = (input: (PiiEntityType | string)[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) @@ -7647,11 +7685,14 @@ const deserializeAws_json1_1AugmentedManifestsListItem = ( context: __SerdeContext ): AugmentedManifestsListItem => { return { + AnnotationDataS3Uri: __expectString(output.AnnotationDataS3Uri), AttributeNames: output.AttributeNames !== undefined && output.AttributeNames !== null ? deserializeAws_json1_1AttributeNamesList(output.AttributeNames, context) : undefined, + DocumentType: __expectString(output.DocumentType), S3Uri: __expectString(output.S3Uri), + SourceDocumentsS3Uri: __expectString(output.SourceDocumentsS3Uri), } as any; }; @@ -8304,6 +8345,17 @@ const deserializeAws_json1_1DocumentLabel = (output: any, context: __SerdeContex } as any; }; +const deserializeAws_json1_1DocumentReaderConfig = (output: any, context: __SerdeContext): DocumentReaderConfig => { + return { + DocumentReadAction: __expectString(output.DocumentReadAction), + DocumentReadMode: __expectString(output.DocumentReadMode), + FeatureTypes: + output.FeatureTypes !== undefined && output.FeatureTypes !== null + ? deserializeAws_json1_1ListOfDocumentReadFeatureTypes(output.FeatureTypes, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1DominantLanguage = (output: any, context: __SerdeContext): DominantLanguage => { return { LanguageCode: __expectString(output.LanguageCode), @@ -8722,6 +8774,10 @@ const deserializeAws_json1_1EventsDetectionJobPropertiesList = ( const deserializeAws_json1_1InputDataConfig = (output: any, context: __SerdeContext): InputDataConfig => { return { + DocumentReaderConfig: + output.DocumentReaderConfig !== undefined && output.DocumentReaderConfig !== null + ? deserializeAws_json1_1DocumentReaderConfig(output.DocumentReaderConfig, context) + : undefined, InputFormat: __expectString(output.InputFormat), S3Uri: __expectString(output.S3Uri), } as any; @@ -9018,6 +9074,20 @@ const deserializeAws_json1_1ListOfDetectSyntaxResult = ( }); }; +const deserializeAws_json1_1ListOfDocumentReadFeatureTypes = ( + output: any, + context: __SerdeContext +): (DocumentReadFeatureTypes | string)[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + const deserializeAws_json1_1ListOfDominantLanguages = (output: any, context: __SerdeContext): DominantLanguage[] => { return (output || []) .filter((e: any) => e != null) diff --git a/clients/client-database-migration-service/DatabaseMigrationService.ts b/clients/client-database-migration-service/DatabaseMigrationService.ts index 942555bbb2df..2710c2b4ffe8 100644 --- a/clients/client-database-migration-service/DatabaseMigrationService.ts +++ b/clients/client-database-migration-service/DatabaseMigrationService.ts @@ -518,6 +518,8 @@ export class DatabaseMigrationService extends DatabaseMigrationServiceClient { /** *Creates a replication subnet group given a list of the subnet IDs in a VPC.
+ *The VPC needs to have at least one subnet in at least two availability zones in the Amazon Web Services Region, otherwise the
+ * service will throw a ReplicationSubnetGroupDoesNotCoverEnoughAZs
exception.
Reloads the target database table with the source data.
+ *You can only use this operation with a task in the RUNNING
state, otherwise the service
+ * will throw an InvalidResourceStateFault
exception.
Creates a replication subnet group given a list of the subnet IDs in a VPC.
+ *The VPC needs to have at least one subnet in at least two availability zones in the Amazon Web Services Region, otherwise the
+ * service will throw a ReplicationSubnetGroupDoesNotCoverEnoughAZs
exception.
Reloads the target database table with the source data.
+ *You can only use this operation with a task in the RUNNING
state, otherwise the service
+ * will throw an InvalidResourceStateFault
exception.
The pending maintenance action to apply to this resource.
+ *Valid values: os-upgrade
, system-update
, db-upgrade
+ *
The IAM role that has permission to access the Amazon S3 bucket. When specified as part of request syntax,
- * such as for the CreateEndpoint
and ModifyEndpoint
actions,
- * the role must allow the iam:PassRole
action.
The Amazon Resource Name (ARN) used by the service access IAM role. The role must allow the iam:PassRole
action.
- * ServiceAccessRoleArn
- The IAM role that has permission to access the
- * Amazon S3 bucket. The role must allow the iam:PassRole
action.
ServiceAccessRoleArn
- The Amazon Resource Name (ARN) used by the service access IAM role.
+ * The role must allow the iam:PassRole
action.
* @@ -3511,8 +3511,8 @@ export interface Endpoint { *
- * ServiceAccessRoleArn
- The IAM role that has permission to access the
- * Amazon S3 bucket. The role must allow the iam:PassRole
action.
ServiceAccessRoleArn
- - The Amazon Resource Name (ARN) used by the service access IAM role.
+ * The role must allow the iam:PassRole
action.
* @@ -7936,8 +7936,7 @@ export interface ModifyEndpointMessage { *
Attributes include the following:
*serviceAccessRoleArn - The Identity and Access Management (IAM) role that has
- * permission to access the Amazon S3 bucket. The role must allow the iam:PassRole
action.
serviceAccessRoleArn - The Amazon Resource Name (ARN) used by the service access IAM role. The role must allow the iam:PassRole
action.
BucketName - The name of the S3 bucket to use.
@@ -8591,10 +8590,18 @@ export interface RebootReplicationInstanceMessage { /** *If this parameter is true
, the reboot is conducted through a Multi-AZ
- * failover. (If the instance isn't configured for Multi-AZ, then you can't specify
- * true
.)
true
. ( --force-planned-failover
and --force-failover
can't both be set to true
.)
*/
ForceFailover?: boolean;
+
+ /**
+ * If this parameter is true
, the reboot is conducted through a planned Multi-AZ failover
+ * where resources are released and cleaned up prior to conducting the failover.
+ * If the instance isn''t configured for Multi-AZ, then you can't specify true
.
+ * ( --force-planned-failover
and --force-failover
can't both be set to true
.)
Initiates the copy of an AMI. You can copy an AMI from one Region to another, or from a - * Region to an AWS Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost + * Region to an Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost * to another, or within the same Outpost. To copy an AMI to another partition, see CreateStoreImageTask.
* *To copy an AMI from one Region to another, specify the source Region using the @@ -4525,10 +4525,10 @@ export class EC2 extends EC2Client { * * * - *
If you customized your instance with instance store volumes or EBS volumes in addition to the root device volume, the + *
If you customized your instance with instance store volumes or Amazon EBS volumes in addition to the root device volume, the * new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, * the instance automatically launches with those additional volumes.
- *For more information, see Creating Amazon EBS-Backed Linux AMIs + *
For more information, see Creating Amazon EBS-Backed Linux AMIs * in the Amazon Elastic Compute Cloud User Guide.
*/ public createImage(args: CreateImageCommandInput, options?: __HttpHandlerOptions): PromiseCreates a managed prefix list. You can specify one or more entries for the prefix list. Each entry consists of a CIDR block and an optional description.
- *You must specify the maximum number of entries for the prefix list. The maximum number of entries cannot be changed later.
+ *Creates a managed prefix list. You can specify one or more entries for the prefix list. + * Each entry consists of a CIDR block and an optional description.
*/ public createManagedPrefixList( args: CreateManagedPrefixListCommandInput, @@ -5234,12 +5234,12 @@ export class EC2 extends EC2Client { } /** - *Starts a task that restores an AMI from an S3 object that was previously created by using + *
Starts a task that restores an AMI from an Amazon S3 object that was previously created by using * CreateStoreImageTask.
- *To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the + *
To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the * Amazon Elastic Compute Cloud User Guide.
*For more information, see Store and restore an AMI using - * S3 in the Amazon Elastic Compute Cloud User Guide.
+ * Amazon S3 in the Amazon Elastic Compute Cloud User Guide. */ public createRestoreImageTask( args: CreateRestoreImageTaskCommandInput, @@ -5532,11 +5532,11 @@ export class EC2 extends EC2Client { } /** - *Stores an AMI as a single object in an S3 bucket.
- *To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the + *
Stores an AMI as a single object in an Amazon S3 bucket.
+ *To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the * Amazon Elastic Compute Cloud User Guide.
*For more information, see Store and restore an AMI using - * S3 in the Amazon Elastic Compute Cloud User Guide.
+ * Amazon S3 in the Amazon Elastic Compute Cloud User Guide. */ public createStoreImageTask( args: CreateStoreImageTaskCommandInput, @@ -8432,7 +8432,7 @@ export class EC2 extends EC2Client { * new instances; however, it doesn't affect any instances that you've already launched * from the AMI. You'll continue to incur usage costs for those instances until you * terminate them. - *When you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was + *
When you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was * created for the root volume of the instance during the AMI creation process. When you * deregister an instance store-backed AMI, it doesn't affect the files that you uploaded * to Amazon S3 when you created the AMI.
@@ -9888,7 +9888,8 @@ export class EC2 extends EC2Client { /** *Describes the specified images (AMIs, AKIs, and ARIs) available to you or all of the images available to you.
- *The images available to you include public images, private images that you own, and private images owned by other AWS accounts for which you have explicit launch permissions.
+ *The images available to you include public images, private images that you own, and private images owned by other + * Amazon Web Services accounts for which you have explicit launch permissions.
*Recently deregistered images appear in the returned results for a short interval and then * return empty results. After all instances that reference a deregistered AMI are terminated, * specifying the ID of the image will eventually return an error indicating that the AMI ID @@ -11915,10 +11916,10 @@ export class EC2 extends EC2Client { * response shows the estimated progress as a percentage.
*Tasks are listed in reverse chronological order. Currently, only tasks from the past 31 * days can be viewed.
- *To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the + *
To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the * Amazon Elastic Compute Cloud User Guide.
*For more information, see Store and restore an AMI using - * S3 in the Amazon Elastic Compute Cloud User Guide.
+ * Amazon S3 in the Amazon Elastic Compute Cloud User Guide. */ public describeStoreImageTasks( args: DescribeStoreImageTasksCommandInput, @@ -15816,8 +15817,8 @@ export class EC2 extends EC2Client { /** *Modifies the specified attribute of the specified AMI. You can specify only one attribute at a time.
* You can use the Attribute
parameter to specify the attribute or one of the following parameters:
- * Description
, LaunchPermission
, or ProductCode
.
AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace product code cannot be made public.
+ *Description
or LaunchPermission
.
+ * Images with an Amazon Web Services Marketplace product code cannot be made public.
*To enable the SriovNetSupport enhanced networking attribute of an image, enable SriovNetSupport on an instance * and create an AMI from the instance.
*/ @@ -17425,7 +17426,7 @@ export class EC2 extends EC2Client { * Creating your * own AMIs in the Amazon Elastic Compute Cloud User Guide. *For Amazon EBS-backed instances, CreateImage creates and registers + *
For Amazon EBS-backed instances, CreateImage creates and registers * the AMI in a single request, so you don't have to register the AMI yourself.
** Register a snapshot of a root device volume *
- *You can use RegisterImage
to create an Amazon EBS-backed Linux AMI from
+ *
You can use RegisterImage
to create an Amazon EBS-backed Linux AMI from
* a snapshot of a root device volume. You specify the snapshot using a block device mapping.
* You can't set the encryption state of the volume using the block device mapping. If the
* snapshot is encrypted, or encryption by default is enabled, the root volume of an instance
* launched from the AMI is encrypted.
For more information, see Create a Linux AMI from a snapshot and Use encryption with EBS-backed AMIs + *
For more information, see Create a Linux AMI from a snapshot and Use encryption with Amazon EBS-backed AMIs * in the Amazon Elastic Compute Cloud User Guide.
* - *- * AWS Marketplace product codes + *
+ * Amazon Web Services Marketplace product codes *
- *If any snapshots have AWS Marketplace product codes, they are copied to the new + *
If any snapshots have Amazon Web Services Marketplace product codes, they are copied to the new * AMI.
*Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE - * Linux Enterprise Server (SLES), use the EC2 billing product code associated with an AMI to + * Linux Enterprise Server (SLES), use the Amazon EC2 billing product code associated with an AMI to * verify the subscription status for package updates. To create a new AMI for operating systems * that require a billing product code, instead of registering the AMI, do the following to * preserve the billing product code association:
@@ -17468,8 +17469,8 @@ export class EC2 extends EC2Client { * from an AMI with a billing product code, make sure that the Reserved Instance has the matching * billing product code. If you purchase a Reserved Instance without the matching billing product * code, the Reserved Instance will not be applied to the On-Demand Instance. For information - * about how to obtain the platform details and billing information of an AMI, see Obtaining billing - * information in the Amazon Elastic Compute Cloud User Guide. + * about how to obtain the platform details and billing information of an AMI, see Understanding AMI + * billing in the Amazon Elastic Compute Cloud User Guide. */ public registerImage( args: RegisterImageCommandInput, @@ -18297,9 +18298,6 @@ export class EC2 extends EC2Client { /** *Resets an attribute of an AMI to its default value.
- *The productCodes attribute can't be reset.
- *Performing this operation on an instance that uses an instance store as its root * device returns an error.
+ * + *If you attempt to start a T3 instance with host
tenancy and the unlimted
+ * CPU credit option, the request fails. The unlimited
CPU credit option is not
+ * supported on Dedicated Hosts. Before you start the instance, either change its CPU credit
+ * option to standard
, or change its tenancy to default
or dedicated
.
For more information, see Stopping instances in the * Amazon EC2 User Guide.
*/ diff --git a/clients/client-ec2/commands/CopyImageCommand.ts b/clients/client-ec2/commands/CopyImageCommand.ts index ca5e9c57a64c..ada35fafee9d 100644 --- a/clients/client-ec2/commands/CopyImageCommand.ts +++ b/clients/client-ec2/commands/CopyImageCommand.ts @@ -19,7 +19,7 @@ export interface CopyImageCommandOutput extends CopyImageResult, __MetadataBeare /** *Initiates the copy of an AMI. You can copy an AMI from one Region to another, or from a - * Region to an AWS Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost + * Region to an Outpost. You can't copy an AMI from an Outpost to a Region, from one Outpost * to another, or within the same Outpost. To copy an AMI to another partition, see CreateStoreImageTask.
* *To copy an AMI from one Region to another, specify the source Region using the diff --git a/clients/client-ec2/commands/CreateImageCommand.ts b/clients/client-ec2/commands/CreateImageCommand.ts index f554811b38c1..fa5d3a011234 100644 --- a/clients/client-ec2/commands/CreateImageCommand.ts +++ b/clients/client-ec2/commands/CreateImageCommand.ts @@ -24,10 +24,10 @@ export interface CreateImageCommandOutput extends CreateImageResult, __MetadataB * * * - *
If you customized your instance with instance store volumes or EBS volumes in addition to the root device volume, the + *
If you customized your instance with instance store volumes or Amazon EBS volumes in addition to the root device volume, the * new AMI contains block device mapping information for those volumes. When you launch an instance from this new AMI, * the instance automatically launches with those additional volumes.
- *For more information, see Creating Amazon EBS-Backed Linux AMIs + *
For more information, see Creating Amazon EBS-Backed Linux AMIs * in the Amazon Elastic Compute Cloud User Guide.
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ec2/commands/CreateManagedPrefixListCommand.ts b/clients/client-ec2/commands/CreateManagedPrefixListCommand.ts index 7ac8c199f875..de31c43e335c 100644 --- a/clients/client-ec2/commands/CreateManagedPrefixListCommand.ts +++ b/clients/client-ec2/commands/CreateManagedPrefixListCommand.ts @@ -21,8 +21,8 @@ export interface CreateManagedPrefixListCommandInput extends CreateManagedPrefix export interface CreateManagedPrefixListCommandOutput extends CreateManagedPrefixListResult, __MetadataBearer {} /** - *Creates a managed prefix list. You can specify one or more entries for the prefix list. Each entry consists of a CIDR block and an optional description.
- *You must specify the maximum number of entries for the prefix list. The maximum number of entries cannot be changed later.
+ *Creates a managed prefix list. You can specify one or more entries for the prefix list. + * Each entry consists of a CIDR block and an optional description.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/commands/CreateRestoreImageTaskCommand.ts b/clients/client-ec2/commands/CreateRestoreImageTaskCommand.ts index be8943dd231a..715c91d03a35 100644 --- a/clients/client-ec2/commands/CreateRestoreImageTaskCommand.ts +++ b/clients/client-ec2/commands/CreateRestoreImageTaskCommand.ts @@ -21,12 +21,12 @@ export interface CreateRestoreImageTaskCommandInput extends CreateRestoreImageTa export interface CreateRestoreImageTaskCommandOutput extends CreateRestoreImageTaskResult, __MetadataBearer {} /** - *Starts a task that restores an AMI from an S3 object that was previously created by using + *
Starts a task that restores an AMI from an Amazon S3 object that was previously created by using * CreateStoreImageTask.
- *To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the + *
To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the * Amazon Elastic Compute Cloud User Guide.
*For more information, see Store and restore an AMI using - * S3 in the Amazon Elastic Compute Cloud User Guide.
+ * Amazon S3 in the Amazon Elastic Compute Cloud User Guide. * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/commands/CreateStoreImageTaskCommand.ts b/clients/client-ec2/commands/CreateStoreImageTaskCommand.ts index 5995b4a4fadd..46be30469fec 100644 --- a/clients/client-ec2/commands/CreateStoreImageTaskCommand.ts +++ b/clients/client-ec2/commands/CreateStoreImageTaskCommand.ts @@ -21,11 +21,11 @@ export interface CreateStoreImageTaskCommandInput extends CreateStoreImageTaskRe export interface CreateStoreImageTaskCommandOutput extends CreateStoreImageTaskResult, __MetadataBearer {} /** - *Stores an AMI as a single object in an S3 bucket.
- *To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the + *
Stores an AMI as a single object in an Amazon S3 bucket.
+ *To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the * Amazon Elastic Compute Cloud User Guide.
*For more information, see Store and restore an AMI using - * S3 in the Amazon Elastic Compute Cloud User Guide.
+ * Amazon S3 in the Amazon Elastic Compute Cloud User Guide. * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/commands/DeregisterImageCommand.ts b/clients/client-ec2/commands/DeregisterImageCommand.ts index 7142abe88deb..2d6f42e389b5 100644 --- a/clients/client-ec2/commands/DeregisterImageCommand.ts +++ b/clients/client-ec2/commands/DeregisterImageCommand.ts @@ -22,7 +22,7 @@ export interface DeregisterImageCommandOutput extends __MetadataBearer {} * new instances; however, it doesn't affect any instances that you've already launched * from the AMI. You'll continue to incur usage costs for those instances until you * terminate them. - *When you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was + *
When you deregister an Amazon EBS-backed AMI, it doesn't affect the snapshot that was * created for the root volume of the instance during the AMI creation process. When you * deregister an instance store-backed AMI, it doesn't affect the files that you uploaded * to Amazon S3 when you created the AMI.
diff --git a/clients/client-ec2/commands/DescribeImagesCommand.ts b/clients/client-ec2/commands/DescribeImagesCommand.ts index 893f83ca5cd9..da1483f84185 100644 --- a/clients/client-ec2/commands/DescribeImagesCommand.ts +++ b/clients/client-ec2/commands/DescribeImagesCommand.ts @@ -19,7 +19,8 @@ export interface DescribeImagesCommandOutput extends DescribeImagesResult, __Met /** *Describes the specified images (AMIs, AKIs, and ARIs) available to you or all of the images available to you.
- *The images available to you include public images, private images that you own, and private images owned by other AWS accounts for which you have explicit launch permissions.
+ *The images available to you include public images, private images that you own, and private images owned by other + * Amazon Web Services accounts for which you have explicit launch permissions.
*Recently deregistered images appear in the returned results for a short interval and then * return empty results. After all instances that reference a deregistered AMI are terminated, * specifying the ID of the image will eventually return an error indicating that the AMI ID diff --git a/clients/client-ec2/commands/DescribeStoreImageTasksCommand.ts b/clients/client-ec2/commands/DescribeStoreImageTasksCommand.ts index 91b5d134943a..a87bb83425b6 100644 --- a/clients/client-ec2/commands/DescribeStoreImageTasksCommand.ts +++ b/clients/client-ec2/commands/DescribeStoreImageTasksCommand.ts @@ -29,10 +29,10 @@ export interface DescribeStoreImageTasksCommandOutput extends DescribeStoreImage * response shows the estimated progress as a percentage.
*Tasks are listed in reverse chronological order. Currently, only tasks from the past 31 * days can be viewed.
- *To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using S3 in the + *
To use this API, you must have the required permissions. For more information, see Permissions for storing and restoring AMIs using Amazon S3 in the * Amazon Elastic Compute Cloud User Guide.
*For more information, see Store and restore an AMI using - * S3 in the Amazon Elastic Compute Cloud User Guide.
+ * Amazon S3 in the Amazon Elastic Compute Cloud User Guide. * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/commands/ModifyImageAttributeCommand.ts b/clients/client-ec2/commands/ModifyImageAttributeCommand.ts index 6027d6081a68..7cd86ac3ba3d 100644 --- a/clients/client-ec2/commands/ModifyImageAttributeCommand.ts +++ b/clients/client-ec2/commands/ModifyImageAttributeCommand.ts @@ -23,8 +23,8 @@ export interface ModifyImageAttributeCommandOutput extends __MetadataBearer {} /** *Modifies the specified attribute of the specified AMI. You can specify only one attribute at a time.
* You can use the Attribute
parameter to specify the attribute or one of the following parameters:
- * Description
, LaunchPermission
, or ProductCode
.
AWS Marketplace product codes cannot be modified. Images with an AWS Marketplace product code cannot be made public.
+ *Description
or LaunchPermission
.
+ * Images with an Amazon Web Services Marketplace product code cannot be made public.
*To enable the SriovNetSupport enhanced networking attribute of an image, enable SriovNetSupport on an instance * and create an AMI from the instance.
* @example diff --git a/clients/client-ec2/commands/RegisterImageCommand.ts b/clients/client-ec2/commands/RegisterImageCommand.ts index 40da7cae17cc..6829498ad3e7 100644 --- a/clients/client-ec2/commands/RegisterImageCommand.ts +++ b/clients/client-ec2/commands/RegisterImageCommand.ts @@ -23,7 +23,7 @@ export interface RegisterImageCommandOutput extends RegisterImageResult, __Metad * Creating your * own AMIs in the Amazon Elastic Compute Cloud User Guide. *For Amazon EBS-backed instances, CreateImage creates and registers + *
For Amazon EBS-backed instances, CreateImage creates and registers * the AMI in a single request, so you don't have to register the AMI yourself.
** Register a snapshot of a root device volume *
- *You can use RegisterImage
to create an Amazon EBS-backed Linux AMI from
+ *
You can use RegisterImage
to create an Amazon EBS-backed Linux AMI from
* a snapshot of a root device volume. You specify the snapshot using a block device mapping.
* You can't set the encryption state of the volume using the block device mapping. If the
* snapshot is encrypted, or encryption by default is enabled, the root volume of an instance
* launched from the AMI is encrypted.
For more information, see Create a Linux AMI from a snapshot and Use encryption with EBS-backed AMIs + *
For more information, see Create a Linux AMI from a snapshot and Use encryption with Amazon EBS-backed AMIs * in the Amazon Elastic Compute Cloud User Guide.
* - *- * AWS Marketplace product codes + *
+ * Amazon Web Services Marketplace product codes *
- *If any snapshots have AWS Marketplace product codes, they are copied to the new + *
If any snapshots have Amazon Web Services Marketplace product codes, they are copied to the new * AMI.
*Windows and some Linux distributions, such as Red Hat Enterprise Linux (RHEL) and SUSE - * Linux Enterprise Server (SLES), use the EC2 billing product code associated with an AMI to + * Linux Enterprise Server (SLES), use the Amazon EC2 billing product code associated with an AMI to * verify the subscription status for package updates. To create a new AMI for operating systems * that require a billing product code, instead of registering the AMI, do the following to * preserve the billing product code association:
@@ -66,8 +66,8 @@ export interface RegisterImageCommandOutput extends RegisterImageResult, __Metad * from an AMI with a billing product code, make sure that the Reserved Instance has the matching * billing product code. If you purchase a Reserved Instance without the matching billing product * code, the Reserved Instance will not be applied to the On-Demand Instance. For information - * about how to obtain the platform details and billing information of an AMI, see Obtaining billing - * information in the Amazon Elastic Compute Cloud User Guide. + * about how to obtain the platform details and billing information of an AMI, see Understanding AMI + * billing in the Amazon Elastic Compute Cloud User Guide. * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/commands/ResetImageAttributeCommand.ts b/clients/client-ec2/commands/ResetImageAttributeCommand.ts index 33d52ebd3546..ff544ad07da6 100644 --- a/clients/client-ec2/commands/ResetImageAttributeCommand.ts +++ b/clients/client-ec2/commands/ResetImageAttributeCommand.ts @@ -22,9 +22,6 @@ export interface ResetImageAttributeCommandOutput extends __MetadataBearer {} /** *Resets an attribute of an AMI to its default value.
- *The productCodes attribute can't be reset.
- *Performing this operation on an instance that uses an instance store as its root * device returns an error.
+ * + *If you attempt to start a T3 instance with host
tenancy and the unlimted
+ * CPU credit option, the request fails. The unlimited
CPU credit option is not
+ * supported on Dedicated Hosts. Before you start the instance, either change its CPU credit
+ * option to standard
, or change its tenancy to default
or dedicated
.
For more information, see Stopping instances in the * Amazon EC2 User Guide.
* @example diff --git a/clients/client-ec2/models/models_0.ts b/clients/client-ec2/models/models_0.ts index d1fd725840ec..a6a35f44652a 100644 --- a/clients/client-ec2/models/models_0.ts +++ b/clients/client-ec2/models/models_0.ts @@ -3,7 +3,7 @@ */ export interface TargetConfigurationRequest { /** - *The number of instances the Covertible Reserved Instance offering can be applied to. This parameter is reserved and cannot + *
The number of instances the Convertible Reserved Instance offering can be applied to. This parameter is reserved and cannot * be specified in a request
*/ InstanceCount?: number; @@ -1213,6 +1213,8 @@ export namespace AdvertiseByoipCidrResult { export type Affinity = "default" | "host"; export type ResourceType = + | "capacity-reservation" + | "carrier-gateway" | "client-vpn-endpoint" | "customer-gateway" | "dedicated-host" @@ -1231,15 +1233,24 @@ export type ResourceType = | "instance" | "instance-event-window" | "internet-gateway" + | "ipv4pool-ec2" + | "ipv6pool-ec2" | "key-pair" | "launch-template" + | "local-gateway" + | "local-gateway-route-table" + | "local-gateway-route-table-virtual-interface-group-association" | "local-gateway-route-table-vpc-association" + | "local-gateway-virtual-interface" + | "local-gateway-virtual-interface-group" | "natgateway" | "network-acl" | "network-insights-analysis" | "network-insights-path" | "network-interface" | "placement-group" + | "prefix-list" + | "replace-root-volume-task" | "reserved-instances" | "route-table" | "security-group" @@ -1258,6 +1269,8 @@ export type ResourceType = | "transit-gateway-route-table" | "volume" | "vpc" + | "vpc-endpoint" + | "vpc-endpoint-service" | "vpc-flow-log" | "vpc-peering-connection" | "vpn-connection" @@ -1268,26 +1281,28 @@ export type ResourceType = */ export interface TagSpecification { /** - *The type of resource to tag. Currently, the resource types that support tagging on
- * creation are: capacity-reservation
| carrier-gateway
|
- * client-vpn-endpoint
| customer-gateway
|
- * dedicated-host
| dhcp-options
| egress-only-internet-gateway
| elastic-ip
| elastic-gpu
|
- * export-image-task
- * | export-instance-task
| fleet
| fpga-image
|
- * host-reservation
| image
| import-image-task
|
+ *
The type of resource to tag on creation. The possible values are:
+ * capacity-reservation
| carrier-gateway
|
+ * client-vpn-endpoint
| customer-gateway
|
+ * dedicated-host
| dhcp-options
|
+ * egress-only-internet-gateway
| elastic-gpu
|
+ * elastic-ip
| export-image-task
|
+ * export-instance-task
| fleet
| fpga-image
|
+ * host-reservation
| image
| import-image-task
|
* import-snapshot-task
| instance
| instance-event-window
|
- * internet-gateway
| ipv4pool-ec2
| ipv6pool-ec2
|
- * key-pair
| launch-template
| local-gateway-route-table-vpc-association
| placement-group
|
- * prefix-list
| natgateway
| network-acl
| network-interface
|
- * reserved-instances
|route-table
| security-group
| snapshot
| spot-fleet-request
- * | spot-instances-request
| snapshot
| subnet
|
- * traffic-mirror-filter
| traffic-mirror-session
|
- * traffic-mirror-target
| transit-gateway
|
- * transit-gateway-attachment
| transit-gateway-multicast-domain
| transit-gateway-route-table
|
- * volume
|vpc
| vpc-peering-connection
|
- * vpc-endpoint
(for interface and gateway endpoints) |
- * vpc-endpoint-service
(for Amazon Web Services PrivateLink) | vpc-flow-log
|
- * vpn-connection
| vpn-gateway
.
internet-gateway
| ipv4pool-ec2
| ipv6pool-ec2
|
+ * key-pair
| launch-template
| local-gateway-route-table-vpc-association
|
+ * natgateway
| network-acl
| network-insights-analysis
|
+ * network-insights-path
| network-interface
|
+ * placement-group
| prefix-list
| reserved-instances
|
+ * route-table
| security-group
| security-group-rule
|
+ * snapshot
| spot-fleet-request
| spot-instances-request
| subnet
|
+ * traffic-mirror-filter
| traffic-mirror-session
| traffic-mirror-target
|
+ * transit-gateway
| transit-gateway-attachment
|
+ * transit-gateway-multicast-domain
| transit-gateway-route-table
|
+ * volume
| vpc
| vpc-endpoint
| vpc-endpoint-service
|
+ * vpc-flow-log
| vpc-peering-connection
|
+ * vpn-connection
| vpn-gateway
.
* To tag a resource after it has been created, see CreateTags.
*/ ResourceType?: ResourceType | string; @@ -3935,12 +3950,12 @@ export namespace AuthorizeSecurityGroupIngressResult { } /** - *Describes the storage parameters for S3 and S3 buckets for an instance store-backed AMI.
+ *Describes the storage parameters for Amazon S3 and Amazon S3 buckets for an instance store-backed AMI.
*/ export interface S3Storage { /** *The access key ID of the owner of the bucket. Before you specify a value for your access key ID, review and follow the guidance - * in Best Practices for Managing AWS Access Keys.
+ * in Best Practices for Managing Amazon Web Services Access Keys. */ AWSAccessKeyId?: string; @@ -4835,16 +4850,16 @@ export interface CopyImageRequest { /** *Specifies whether the destination snapshots of the copied image should be encrypted.
* You can encrypt a copy of an unencrypted snapshot, but you cannot create an unencrypted
- * copy of an encrypted snapshot. The default CMK for EBS is used unless you specify a non-default
- * AWS Key Management Service (AWS KMS) CMK using KmsKeyId
. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.
KmsKeyId
. For more information, see Amazon EBS Encryption in the Amazon Elastic Compute Cloud User Guide.
*/
Encrypted?: boolean;
/**
- * The identifier of the symmetric AWS Key Management Service (AWS KMS) customer master key (CMK) to use when creating
- * encrypted volumes. If this parameter is not specified, your AWS managed CMK for EBS is used.
- * If you specify a CMK, you must also set the encrypted state to true
.
You can specify a CMK using any of the following:
+ *The identifier of the symmetric Key Management Service (KMS) KMS key to use when creating
+ * encrypted volumes. If this parameter is not specified, your Amazon Web Services managed KMS key for Amazon EBS is used.
+ * If you specify a KMS key, you must also set the encrypted state to true
.
You can specify a KMS key using any of the following:
*Key ID. For example, 1234abcd-12ab-34cd-56ef-1234567890ab.
@@ -4859,10 +4874,10 @@ export interface CopyImageRequest { *Alias ARN. For example, arn:aws:kms:us-east-1:012345678910:alias/ExampleAlias.
*AWS authenticates the CMK asynchronously. Therefore, if you specify an identifier that is not valid, + *
Amazon Web Services authenticates the KMS key asynchronously. Therefore, if you specify an identifier that is not valid, * the action can appear to complete, but eventually fails.
- *The specified CMK must exist in the destination Region.
- *Amazon EBS does not support asymmetric CMKs.
+ *The specified KMS key must exist in the destination Region.
+ *Amazon EBS does not support asymmetric KMS keys.
*/ KmsKeyId?: string; @@ -4883,13 +4898,13 @@ export interface CopyImageRequest { /** *The Amazon Resource Name (ARN) of the Outpost to which to copy the AMI. Only - * specify this parameter when copying an AMI from an AWS Region to an Outpost. + * specify this parameter when copying an AMI from an Amazon Web Services Region to an Outpost. * The AMI must be in the Region of the destination Outpost. You cannot copy an * AMI from an Outpost to a Region, from one Outpost to another, or within the same * Outpost.
* *For more information, see - * Copying AMIs from an AWS Region to an Outpost in the + * Copying AMIs from an Amazon Web Services Region to an Outpost in the * Amazon Elastic Compute Cloud User Guide.
*/ DestinationOutpostArn?: string; @@ -7037,6 +7052,9 @@ export type _InstanceType = | "u-6tb1.metal" | "u-9tb1.112xlarge" | "u-9tb1.metal" + | "vt1.24xlarge" + | "vt1.3xlarge" + | "vt1.6xlarge" | "x1.16xlarge" | "x1.32xlarge" | "x1e.16xlarge" @@ -7105,6 +7123,8 @@ export interface Placement { * tenancy ofdedicated
runs on single-tenant hardware. The host
* tenancy is not supported for the ImportInstance command.
* This parameter is not supported by CreateFleet.
+ * + *T3 instances that use the unlimited
CPU credit option do not support host
tenancy.
By default, Amazon EC2 attempts to shut down and reboot the instance before creating the image. If the No Reboot
option is set, Amazon EC2 doesn't shut down the instance before creating the image. When this option is used, file system integrity on the created image can't be guaranteed.
By default, Amazon EC2 attempts to shut down and reboot the instance before creating the image.
+ * If the No Reboot
option is set, Amazon EC2 doesn't shut down the instance before creating
+ * the image. Without a reboot, the AMI will be crash consistent (all the volumes are snapshotted
+ * at the same time), but not application consistent (all the operating system buffers are not flushed
+ * to disk before the snapshots are created).
image
.
* To tag the snapshots that are created of the root volume and of other EBS volumes that + *
To tag the snapshots that are created of the root volume and of other Amazon EBS volumes that
* are attached to the instance, the value for ResourceType
must be
* snapshot
. The same tag is applied to all of the snapshots that are
* created.
The state of the prefix list.
+ *The current state of the prefix list.
*/ State?: PrefixListState | string; @@ -4382,7 +4382,7 @@ export namespace CreateReservedInstancesListingResult { export interface CreateRestoreImageTaskRequest { /** - *The name of the S3 bucket that contains the stored AMI object.
+ *The name of the Amazon S3 bucket that contains the stored AMI object.
*/ Bucket: string | undefined; @@ -5339,7 +5339,7 @@ export namespace CreateSpotDatafeedSubscriptionResult { } /** - *The tags to apply to the AMI object that will be stored in the S3 bucket. For more + *
The tags to apply to the AMI object that will be stored in the Amazon S3 bucket. For more * information, see Categorizing your storage using * tags in the Amazon Simple Storage Service User Guide.
*/ @@ -5375,14 +5375,14 @@ export interface CreateStoreImageTaskRequest { ImageId: string | undefined; /** - *The name of the S3 bucket in which the AMI object will be stored. The bucket must be in + *
The name of the Amazon S3 bucket in which the AMI object will be stored. The bucket must be in * the Region in which the request is being made. The AMI object appears in the bucket only after * the upload task has completed.
*/ Bucket: string | undefined; /** - *The tags to apply to the AMI object that will be stored in the S3 bucket.
+ *The tags to apply to the AMI object that will be stored in the Amazon S3 bucket.
*/ S3ObjectTags?: S3ObjectTag[]; @@ -7794,6 +7794,12 @@ export namespace Volume { } export interface CreateVpcRequest { + /** + *The IPv4 network range for the VPC, in CIDR notation. For example,
+ * 10.0.0.0/16
. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18
, we modify it to 100.68.0.0/18
.
Requests an Amazon-provided IPv6 CIDR block with a /56 prefix length for the VPC. * You cannot specify the range of IP addresses, or the size of the CIDR block.
@@ -7841,12 +7847,6 @@ export interface CreateVpcRequest { *The tags to assign to the VPC.
*/ TagSpecifications?: TagSpecification[]; - - /** - *The IPv4 network range for the VPC, in CIDR notation. For example,
- * 10.0.0.0/16
. We modify the specified CIDR block to its canonical form; for example, if you specify 100.68.0.18/18
, we modify it to 100.68.0.0/18
.
- * price_update
- The price for a launch configuration was adjusted
- * because it was too high. This change is permanent.
* submitted
- The EC2 Fleet or Spot Fleet request is being evaluated and Amazon EC2
* is preparing to launch the target number of Spot Instances.
The AWS account ID.
+ *The Amazon Web Services account ID.
*Constraints: Up to 10 000 account IDs can be specified in a single request.
*/ UserId?: string; @@ -8261,7 +8256,7 @@ export namespace ImageAttribute { export interface DescribeImagesRequest { /** *Scopes the images by users with explicit launch permissions.
- * Specify an AWS account ID, self
(the sender of the request),
+ * Specify an Amazon Web Services account ID, self
(the sender of the request),
* or all
(public AMIs).
* block-device-mapping.delete-on-termination
- A Boolean value that indicates
- * whether the Amazon EBS volume is deleted on instance termination.
@@ -8286,22 +8281,22 @@ export interface DescribeImagesRequest { *
- * block-device-mapping.snapshot-id
- The ID of the snapshot used for the EBS
+ * block-device-mapping.snapshot-id
- The ID of the snapshot used for the Amazon EBS
* volume.
- * block-device-mapping.volume-size
- The volume size of the EBS volume, in GiB.
block-device-mapping.volume-size
- The volume size of the Amazon EBS volume, in GiB.
*
- * block-device-mapping.volume-type
- The volume type of the EBS volume
- * (gp2
| io1
| io2
| st1
| sc1
|
- * standard
).
block-device-mapping.volume-type
- The volume type of the Amazon EBS volume
+ * (io1
| io2
| gp2
| gp3
| sc1
+ *
| st1
| standard
).
*
- * block-device-mapping.encrypted
- A Boolean that indicates whether the EBS volume is encrypted.
block-device-mapping.encrypted
- A Boolean that indicates whether the Amazon EBS volume is encrypted.
* @@ -8346,13 +8341,13 @@ export interface DescribeImagesRequest { *
* owner-alias
- The owner alias (amazon
| aws-marketplace
).
- * The valid aliases are defined in an Amazon-maintained list. This is not the AWS account alias that can be
+ * The valid aliases are defined in an Amazon-maintained list. This is not the Amazon Web Services account alias that can be
* set using the IAM console. We recommend that you use the Owner
* request parameter instead of this filter.
- * owner-id
- The AWS account ID of the owner. We recommend that you use the
+ *
+ * owner-id
- The Amazon Web Services account ID of the owner. We recommend that you use the
* Owner request parameter instead of this filter.
- * product-code.type
- The type of the product code (devpay
|
- * marketplace
).
product-code.type
- The type of the product code (marketplace
).
* @@ -8426,7 +8420,7 @@ export interface DescribeImagesRequest { /** *
Scopes the results to images with the specified owners. You can specify a combination of
- * AWS account IDs, self
, amazon
, and aws-marketplace
.
+ * Amazon Web Services account IDs, self
, amazon
, and aws-marketplace
.
* If you omit this parameter, the results include all images for which you have launch permissions,
* regardless of ownership.
The AWS account ID of the image owner.
+ *The ID of the Amazon Web Services account that owns the image.
*/ OwnerId?: string; @@ -8617,16 +8611,19 @@ export interface Image { /** *The platform details associated with the billing code of the AMI. For more information, - * see Obtaining - * Billing Information in the Amazon Elastic Compute Cloud User Guide.
+ * see Understanding + * AMI billing in the Amazon Elastic Compute Cloud User Guide. */ PlatformDetails?: string; /** *The operation of the Amazon EC2 instance and the billing code that is associated with the AMI.
- * usageOperation
corresponds to the lineitem/Operation column on your AWS Cost and Usage Report and in the AWS Price
- * List API. For the list of UsageOperation
codes, see Platform Details and Usage Operation Billing Codes in the
- * Amazon Elastic Compute Cloud User Guide.
usageOperation
corresponds to the lineitem/Operation column on your Amazon Web Services Cost and Usage Report and in the Amazon Web Services Price
+ * List API. You can view these fields on the Instances or
+ * AMIs pages in the Amazon EC2 console, or in the responses that are
+ * returned by the DescribeImages
+ * command in the Amazon EC2 API, or the describe-images
+ * command in the CLI.
*/
UsageOperation?: string;
@@ -8666,8 +8663,8 @@ export interface Image {
Hypervisor?: HypervisorType | string;
/**
- * The AWS account alias (for example, amazon
, self
) or
- * the AWS account ID of the AMI owner.
The Amazon Web Services account alias (for example, amazon
, self
) or
+ * the Amazon Web Services account ID of the AMI owner.
The type of root device used by the AMI. The AMI can use an EBS volume or an instance store volume.
+ *The type of root device used by the AMI. The AMI can use an Amazon EBS volume or an instance store volume.
*/ RootDeviceType?: DeviceType | string; diff --git a/clients/client-ec2/models/models_3.ts b/clients/client-ec2/models/models_3.ts index 885a073d2294..7508129e8d9b 100644 --- a/clients/client-ec2/models/models_3.ts +++ b/clients/client-ec2/models/models_3.ts @@ -2134,13 +2134,11 @@ export interface DescribeInstanceTypesRequest { *
- * auto-recovery-supported
- Indicates whether auto recovery is supported
- * (true
| false
).
auto-recovery-supported
- Indicates whether auto recovery is supported (true
| false
).
*
- * bare-metal
- Indicates whether it is a bare metal instance type
- * (true
| false
).
bare-metal
- Indicates whether it is a bare metal instance type (true
| false
).
* @@ -2196,8 +2194,7 @@ export interface DescribeInstanceTypesRequest { *
* ebs-info.nvme-support
- Indicates whether non-volatile memory express (NVMe)
- * is supported for EBS volumes (required
| supported
|
- * unsupported
).
required
| supported
| unsupported
).
* @@ -2206,8 +2203,7 @@ export interface DescribeInstanceTypesRequest { *
- * hibernation-supported
- Indicates whether On-Demand hibernation is supported
- * (true
| false
).
hibernation-supported
- Indicates whether On-Demand hibernation is supported (true
| false
).
* @@ -2271,7 +2267,7 @@ export interface DescribeInstanceTypesRequest { *
* network-info.encryption-in-transit-supported
- Indicates whether the instance type
- * automatically encrypts in-transit traffic between instances.
true
| false
).
* @@ -2285,8 +2281,7 @@ export interface DescribeInstanceTypesRequest { *
- * network-info.ipv6-supported
- Indicates whether the instance type supports
- * IPv6 (true
| false
).
network-info.ipv6-supported
- Indicates whether the instance type supports IPv6 (true
| false
).
* @@ -10920,7 +10915,7 @@ export interface StoreImageTaskResult { TaskStartTime?: Date; /** - *
The name of the S3 bucket that contains the stored AMI object.
+ *The name of the Amazon S3 bucket that contains the stored AMI object.
*/ Bucket?: string; diff --git a/clients/client-ec2/models/models_4.ts b/clients/client-ec2/models/models_4.ts index 120a10f8c9eb..c2f6fd872615 100644 --- a/clients/client-ec2/models/models_4.ts +++ b/clients/client-ec2/models/models_4.ts @@ -6483,8 +6483,8 @@ export interface ModifyCapacityReservationRequest { CapacityReservationId: string | undefined; /** - *The number of instances for which to reserve capacity.
- *Valid range: 1 - 1000
+ *The number of instances for which to reserve capacity. The number of instances can't be increased or
+ * decreased by more than 1000
in a single request.
The AWS account ID to add to the list of launch permissions for the AMI.
+ *The Amazon Web Services account ID to add to the list of launch permissions for the AMI.
*/ Add?: LaunchPermission[]; /** - *The AWS account ID to remove from the list of launch permissions for the AMI.
+ *The Amazon Web Services account ID to remove from the list of launch permissions for the AMI.
*/ Remove?: LaunchPermission[]; } @@ -7162,7 +7162,7 @@ export namespace LaunchPermissionModifications { export interface ModifyImageAttributeRequest { /** *The name of the attribute to modify.
- * The valid values are description
, launchPermission
, and productCodes
.
description
and launchPermission
.
*/
Attribute?: string;
@@ -7188,7 +7188,7 @@ export interface ModifyImageAttributeRequest {
OperationType?: OperationType | string;
/**
- * The DevPay product codes. After you add a product code to an AMI, it can't be removed.
+ *Not supported.
*/ ProductCodes?: string[]; @@ -7199,14 +7199,14 @@ export interface ModifyImageAttributeRequest { UserGroups?: string[]; /** - *The AWS account IDs. + *
The Amazon Web Services account IDs.
* This parameter can be used only when the Attribute
parameter is launchPermission
.
The value of the attribute being modified.
- * This parameter can be used only when the Attribute
parameter is description
or productCodes
.
Attribute
parameter is description
.
*/
Value?: string;
@@ -7525,6 +7525,8 @@ export interface InstanceCreditSpecificationRequest {
/**
* The credit option for CPU usage of the instance. Valid values are
* standard
and unlimited
.
T3 instances with host
tenancy do not support the unlimited
+ * CPU credit option.
The tenancy for the instance.
+ * + *For T3 instances, you can't change the tenancy from dedicated
to
+ * host
, or from host
to dedicated
. Attempting
+ * to make one of these unsupported tenancy changes results in the InvalidTenancy
+ * error code.
The maximum number of entries for the prefix list. You cannot modify the entries * of a prefix list and modify the size of a prefix list at the same time.
+ *If any of the resources that reference the prefix list cannot support the new + * maximum size, the modify operation fails. Check the state message for the IDs of + * the first ten resources that do not support the new maximum size.
*/ MaxEntries?: number; } diff --git a/clients/client-ec2/models/models_5.ts b/clients/client-ec2/models/models_5.ts index c8168fe61723..974373303ba3 100644 --- a/clients/client-ec2/models/models_5.ts +++ b/clients/client-ec2/models/models_5.ts @@ -1093,14 +1093,14 @@ export interface RegisterImageRequest { /** *The architecture of the AMI.
- *Default: For Amazon EBS-backed AMIs, i386
.
+ *
Default: For Amazon EBS-backed AMIs, i386
.
* For instance store-backed AMIs, the architecture specified in the manifest file.
The block device mapping entries.
- *If you specify an EBS volume using the ID of an EBS snapshot, you can't specify the encryption state of the volume.
+ *If you specify an Amazon EBS volume using the ID of an Amazon EBS snapshot, you can't specify the encryption state of the volume.
*If you create an AMI on an Outpost, then all backing snapshots must be on the same Outpost or in the Region
* of that Outpost. AMIs on an Outpost that include local snapshots can be used to launch instances on the same Outpost
* only. For more information,
@@ -1139,7 +1139,7 @@ export interface RegisterImageRequest {
/**
* The billing product codes. Your account must be authorized to specify billing product codes. Otherwise,
- * you can use the AWS Marketplace to bill for the use of an AMI.
Default: standard
(T2 instances) or unlimited
(T3/T3a
* instances)
For T3 instances with host
tenancy, only standard
is
+ * supported.
Specifies settings for cold storage.
+ *Specifies the configuration for cold storage options such as enabled
*/ export interface ColdStorageOptions { /** - *True to enable cold storage for an Elasticsearch domain.
+ *Enable cold storage option. Accepted values true or false
*/ Enabled: boolean | undefined; } @@ -1328,7 +1328,7 @@ export interface ElasticsearchClusterConfig { WarmCount?: number; /** - *Specifies the ColdStorageOptions
configuration for an Elasticsearch domain.
Specifies the ColdStorageOptions
config for Elasticsearch Domain
Container for the parameters to the ListDomainNames
operation.
Optional parameter to filter the output by domain engine type. Acceptable values are 'Elasticsearch' and 'OpenSearch'.
+ */ + EngineType?: EngineType | string; +} + +export namespace ListDomainNamesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListDomainNamesRequest): any => ({ + ...obj, + }); +} + export interface DomainInfo { /** * Specifies the DomainName
.
Specifies the EngineType
of the domain.
The result of a ListDomainNames
operation. Contains the names of all Elasticsearch domains owned by this account.
The result of a ListDomainNames
operation. Contains the names of all domains owned by this account and their respective engine types.
List of Elasticsearch domain names.
+ *List of domain names and respective engine types.
*/ DomainNames?: DomainInfo[]; } diff --git a/clients/client-elasticsearch-service/protocols/Aws_restJson1.ts b/clients/client-elasticsearch-service/protocols/Aws_restJson1.ts index 4b67a40f8ad9..802332825b07 100644 --- a/clients/client-elasticsearch-service/protocols/Aws_restJson1.ts +++ b/clients/client-elasticsearch-service/protocols/Aws_restJson1.ts @@ -1110,12 +1110,12 @@ export const serializeAws_restJson1ListDomainNamesCommand = async ( context: __SerdeContext ): Promise<__HttpRequest> => { const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); - const headers: any = { - "content-type": "application/json", - }; + const headers: any = {}; let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/2015-01-01/domain"; + const query: any = { + ...(input.EngineType !== undefined && { engineType: input.EngineType }), + }; let body: any; - body = ""; return new __HttpRequest({ protocol, hostname, @@ -1123,6 +1123,7 @@ export const serializeAws_restJson1ListDomainNamesCommand = async ( method: "GET", headers, path: resolvedPath, + query, body, }); }; @@ -5779,6 +5780,7 @@ const deserializeAws_restJson1DomainEndpointOptionsStatus = ( const deserializeAws_restJson1DomainInfo = (output: any, context: __SerdeContext): DomainInfo => { return { DomainName: __expectString(output.DomainName), + EngineType: __expectString(output.EngineType), } as any; }; diff --git a/clients/client-iot/commands/DeleteStreamCommand.ts b/clients/client-iot/commands/DeleteStreamCommand.ts index eec5ebdefd8d..187b7f238f96 100644 --- a/clients/client-iot/commands/DeleteStreamCommand.ts +++ b/clients/client-iot/commands/DeleteStreamCommand.ts @@ -1,6 +1,5 @@ import { IoTClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTClient"; -import { DeleteStreamRequest } from "../models/models_0"; -import { DeleteStreamResponse } from "../models/models_1"; +import { DeleteStreamRequest, DeleteStreamResponse } from "../models/models_1"; import { deserializeAws_restJson1DeleteStreamCommand, serializeAws_restJson1DeleteStreamCommand, diff --git a/clients/client-iot/commands/ListThingTypesCommand.ts b/clients/client-iot/commands/ListThingTypesCommand.ts index b353852434b0..b240e5100ef0 100644 --- a/clients/client-iot/commands/ListThingTypesCommand.ts +++ b/clients/client-iot/commands/ListThingTypesCommand.ts @@ -1,5 +1,6 @@ import { IoTClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTClient"; -import { ListThingTypesRequest, ListThingTypesResponse } from "../models/models_1"; +import { ListThingTypesRequest } from "../models/models_1"; +import { ListThingTypesResponse } from "../models/models_2"; import { deserializeAws_restJson1ListThingTypesCommand, serializeAws_restJson1ListThingTypesCommand, diff --git a/clients/client-iot/models/models_0.ts b/clients/client-iot/models/models_0.ts index 045eddf658d2..b76596cf2872 100644 --- a/clients/client-iot/models/models_0.ts +++ b/clients/client-iot/models/models_0.ts @@ -486,6 +486,9 @@ export namespace DynamoDBv2Action { /** *Describes an action that writes data to an Amazon Elasticsearch Service * domain.
+ *This action is deprecated. Use the OpenSearch action instead.
+ *Describes an action that writes data to an Amazon OpenSearch Service + * domain.
+ */ +export interface OpenSearchAction { + /** + *The IAM role ARN that has access to OpenSearch.
+ */ + roleArn: string | undefined; + + /** + *The endpoint of your OpenSearch domain.
+ */ + endpoint: string | undefined; + + /** + *The OpenSearch index where you want to store your data.
+ */ + index: string | undefined; + + /** + *The type of document you are storing.
+ */ + type: string | undefined; + + /** + *The unique identifier for the document you are storing.
+ */ + id: string | undefined; +} + +export namespace OpenSearchAction { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OpenSearchAction): any => ({ + ...obj, + }); +} + /** *Describes an action to republish to another topic.
*/ @@ -1464,6 +1507,9 @@ export interface Action { /** *Write data to an Amazon Elasticsearch Service domain.
+ *This action is deprecated. Use the OpenSearch action instead.
+ *Send messages to an Amazon Managed Streaming for Apache Kafka (Amazon MSK) or self-managed Apache Kafka cluster.
*/ kafka?: KafkaAction; + + /** + *Write data to an Amazon OpenSearch Service domain.
+ */ + openSearch?: OpenSearchAction; } export namespace Action { @@ -7807,19 +7858,3 @@ export namespace DeleteSecurityProfileResponse { ...obj, }); } - -export interface DeleteStreamRequest { - /** - *The stream ID.
- */ - streamId: string | undefined; -} - -export namespace DeleteStreamRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DeleteStreamRequest): any => ({ - ...obj, - }); -} diff --git a/clients/client-iot/models/models_1.ts b/clients/client-iot/models/models_1.ts index 14960dd4d38e..cb3172741965 100644 --- a/clients/client-iot/models/models_1.ts +++ b/clients/client-iot/models/models_1.ts @@ -54,6 +54,22 @@ import { } from "./models_0"; import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; +export interface DeleteStreamRequest { + /** + *The stream ID.
+ */ + streamId: string | undefined; +} + +export namespace DeleteStreamRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteStreamRequest): any => ({ + ...obj, + }); +} + export interface DeleteStreamResponse {} export namespace DeleteStreamResponse { @@ -8307,27 +8323,3 @@ export namespace ThingTypeDefinition { ...obj, }); } - -/** - *The output for the ListThingTypes operation.
- */ -export interface ListThingTypesResponse { - /** - *The thing types.
- */ - thingTypes?: ThingTypeDefinition[]; - - /** - *The token for the next set of results. Will not be returned if operation has returned all results.
- */ - nextToken?: string; -} - -export namespace ListThingTypesResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ListThingTypesResponse): any => ({ - ...obj, - }); -} diff --git a/clients/client-iot/models/models_2.ts b/clients/client-iot/models/models_2.ts index de4e2e1dd525..ce2accb0fdd3 100644 --- a/clients/client-iot/models/models_2.ts +++ b/clients/client-iot/models/models_2.ts @@ -45,10 +45,35 @@ import { RegistrationConfig, ThingGroupIndexingConfiguration, ThingIndexingConfiguration, + ThingTypeDefinition, ViolationEventOccurrenceRange, } from "./models_1"; import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; +/** + *The output for the ListThingTypes operation.
+ */ +export interface ListThingTypesResponse { + /** + *The thing types.
+ */ + thingTypes?: ThingTypeDefinition[]; + + /** + *The token for the next set of results. Will not be returned if operation has returned all results.
+ */ + nextToken?: string; +} + +export namespace ListThingTypesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListThingTypesResponse): any => ({ + ...obj, + }); +} + export interface ListTopicRuleDestinationsRequest { /** *The maximum number of results to return at one time.
@@ -1076,7 +1101,7 @@ export interface ThingConnectivity { timestamp?: number; /** - *The reason why the client is disconnected.
+ *The reason why the client is disconnected. If the thing has been disconnected for approximately an hour, the disconnectReason
value might be missing.
Creates a connector using the specified properties.
+ */ + public createConnector( + args: CreateConnectorCommandInput, + options?: __HttpHandlerOptions + ): PromiseCreates a custom plugin using the specified properties.
+ */ + public createCustomPlugin( + args: CreateCustomPluginCommandInput, + options?: __HttpHandlerOptions + ): PromiseCreates a worker configuration using the specified properties.
+ */ + public createWorkerConfiguration( + args: CreateWorkerConfigurationCommandInput, + options?: __HttpHandlerOptions + ): PromiseDeletes the specified connector.
+ */ + public deleteConnector( + args: DeleteConnectorCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns summary information about the connector.
+ */ + public describeConnector( + args: DescribeConnectorCommandInput, + options?: __HttpHandlerOptions + ): PromiseA summary description of the custom plugin.
+ */ + public describeCustomPlugin( + args: DescribeCustomPluginCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns information about a worker configuration.
+ */ + public describeWorkerConfiguration( + args: DescribeWorkerConfigurationCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors.
+ */ + public listConnectors( + args: ListConnectorsCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns a list of all of the custom plugins in this account and Region.
+ */ + public listCustomPlugins( + args: ListCustomPluginsCommandInput, + options?: __HttpHandlerOptions + ): PromiseReturns a list of all of the worker configurations in this account and Region.
+ */ + public listWorkerConfigurations( + args: ListWorkerConfigurationsCommandInput, + options?: __HttpHandlerOptions + ): PromiseUpdates the specified connector.
+ */ + public updateConnector( + args: UpdateConnectorCommandInput, + options?: __HttpHandlerOptions + ): PromiseCreates a connector using the specified properties.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaConnectClient, CreateConnectorCommand } from "@aws-sdk/client-kafkaconnect"; // ES Modules import + * // const { KafkaConnectClient, CreateConnectorCommand } = require("@aws-sdk/client-kafkaconnect"); // CommonJS import + * const client = new KafkaConnectClient(config); + * const command = new CreateConnectorCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateConnectorCommandInput} for command's `input` shape. + * @see {@link CreateConnectorCommandOutput} for command's `response` shape. + * @see {@link KafkaConnectClientResolvedConfig | config} for command's `input` shape. + * + */ +export class CreateConnectorCommand extends $Command< + CreateConnectorCommandInput, + CreateConnectorCommandOutput, + KafkaConnectClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateConnectorCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackCreates a custom plugin using the specified properties.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaConnectClient, CreateCustomPluginCommand } from "@aws-sdk/client-kafkaconnect"; // ES Modules import + * // const { KafkaConnectClient, CreateCustomPluginCommand } = require("@aws-sdk/client-kafkaconnect"); // CommonJS import + * const client = new KafkaConnectClient(config); + * const command = new CreateCustomPluginCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateCustomPluginCommandInput} for command's `input` shape. + * @see {@link CreateCustomPluginCommandOutput} for command's `response` shape. + * @see {@link KafkaConnectClientResolvedConfig | config} for command's `input` shape. + * + */ +export class CreateCustomPluginCommand extends $Command< + CreateCustomPluginCommandInput, + CreateCustomPluginCommandOutput, + KafkaConnectClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateCustomPluginCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackCreates a worker configuration using the specified properties.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaConnectClient, CreateWorkerConfigurationCommand } from "@aws-sdk/client-kafkaconnect"; // ES Modules import + * // const { KafkaConnectClient, CreateWorkerConfigurationCommand } = require("@aws-sdk/client-kafkaconnect"); // CommonJS import + * const client = new KafkaConnectClient(config); + * const command = new CreateWorkerConfigurationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateWorkerConfigurationCommandInput} for command's `input` shape. + * @see {@link CreateWorkerConfigurationCommandOutput} for command's `response` shape. + * @see {@link KafkaConnectClientResolvedConfig | config} for command's `input` shape. + * + */ +export class CreateWorkerConfigurationCommand extends $Command< + CreateWorkerConfigurationCommandInput, + CreateWorkerConfigurationCommandOutput, + KafkaConnectClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateWorkerConfigurationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDeletes the specified connector.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaConnectClient, DeleteConnectorCommand } from "@aws-sdk/client-kafkaconnect"; // ES Modules import + * // const { KafkaConnectClient, DeleteConnectorCommand } = require("@aws-sdk/client-kafkaconnect"); // CommonJS import + * const client = new KafkaConnectClient(config); + * const command = new DeleteConnectorCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteConnectorCommandInput} for command's `input` shape. + * @see {@link DeleteConnectorCommandOutput} for command's `response` shape. + * @see {@link KafkaConnectClientResolvedConfig | config} for command's `input` shape. + * + */ +export class DeleteConnectorCommand extends $Command< + DeleteConnectorCommandInput, + DeleteConnectorCommandOutput, + KafkaConnectClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteConnectorCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns summary information about the connector.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaConnectClient, DescribeConnectorCommand } from "@aws-sdk/client-kafkaconnect"; // ES Modules import + * // const { KafkaConnectClient, DescribeConnectorCommand } = require("@aws-sdk/client-kafkaconnect"); // CommonJS import + * const client = new KafkaConnectClient(config); + * const command = new DescribeConnectorCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeConnectorCommandInput} for command's `input` shape. + * @see {@link DescribeConnectorCommandOutput} for command's `response` shape. + * @see {@link KafkaConnectClientResolvedConfig | config} for command's `input` shape. + * + */ +export class DescribeConnectorCommand extends $Command< + DescribeConnectorCommandInput, + DescribeConnectorCommandOutput, + KafkaConnectClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeConnectorCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackA summary description of the custom plugin.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaConnectClient, DescribeCustomPluginCommand } from "@aws-sdk/client-kafkaconnect"; // ES Modules import + * // const { KafkaConnectClient, DescribeCustomPluginCommand } = require("@aws-sdk/client-kafkaconnect"); // CommonJS import + * const client = new KafkaConnectClient(config); + * const command = new DescribeCustomPluginCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeCustomPluginCommandInput} for command's `input` shape. + * @see {@link DescribeCustomPluginCommandOutput} for command's `response` shape. + * @see {@link KafkaConnectClientResolvedConfig | config} for command's `input` shape. + * + */ +export class DescribeCustomPluginCommand extends $Command< + DescribeCustomPluginCommandInput, + DescribeCustomPluginCommandOutput, + KafkaConnectClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeCustomPluginCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns information about a worker configuration.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaConnectClient, DescribeWorkerConfigurationCommand } from "@aws-sdk/client-kafkaconnect"; // ES Modules import + * // const { KafkaConnectClient, DescribeWorkerConfigurationCommand } = require("@aws-sdk/client-kafkaconnect"); // CommonJS import + * const client = new KafkaConnectClient(config); + * const command = new DescribeWorkerConfigurationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeWorkerConfigurationCommandInput} for command's `input` shape. + * @see {@link DescribeWorkerConfigurationCommandOutput} for command's `response` shape. + * @see {@link KafkaConnectClientResolvedConfig | config} for command's `input` shape. + * + */ +export class DescribeWorkerConfigurationCommand extends $Command< + DescribeWorkerConfigurationCommandInput, + DescribeWorkerConfigurationCommandOutput, + KafkaConnectClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeWorkerConfigurationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns a list of all the connectors in this account and Region. The list is limited to connectors whose name starts with the specified prefix. The response also includes a description of each of the listed connectors.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaConnectClient, ListConnectorsCommand } from "@aws-sdk/client-kafkaconnect"; // ES Modules import + * // const { KafkaConnectClient, ListConnectorsCommand } = require("@aws-sdk/client-kafkaconnect"); // CommonJS import + * const client = new KafkaConnectClient(config); + * const command = new ListConnectorsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListConnectorsCommandInput} for command's `input` shape. + * @see {@link ListConnectorsCommandOutput} for command's `response` shape. + * @see {@link KafkaConnectClientResolvedConfig | config} for command's `input` shape. + * + */ +export class ListConnectorsCommand extends $Command< + ListConnectorsCommandInput, + ListConnectorsCommandOutput, + KafkaConnectClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListConnectorsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns a list of all of the custom plugins in this account and Region.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaConnectClient, ListCustomPluginsCommand } from "@aws-sdk/client-kafkaconnect"; // ES Modules import + * // const { KafkaConnectClient, ListCustomPluginsCommand } = require("@aws-sdk/client-kafkaconnect"); // CommonJS import + * const client = new KafkaConnectClient(config); + * const command = new ListCustomPluginsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListCustomPluginsCommandInput} for command's `input` shape. + * @see {@link ListCustomPluginsCommandOutput} for command's `response` shape. + * @see {@link KafkaConnectClientResolvedConfig | config} for command's `input` shape. + * + */ +export class ListCustomPluginsCommand extends $Command< + ListCustomPluginsCommandInput, + ListCustomPluginsCommandOutput, + KafkaConnectClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListCustomPluginsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackReturns a list of all of the worker configurations in this account and Region.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaConnectClient, ListWorkerConfigurationsCommand } from "@aws-sdk/client-kafkaconnect"; // ES Modules import + * // const { KafkaConnectClient, ListWorkerConfigurationsCommand } = require("@aws-sdk/client-kafkaconnect"); // CommonJS import + * const client = new KafkaConnectClient(config); + * const command = new ListWorkerConfigurationsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListWorkerConfigurationsCommandInput} for command's `input` shape. + * @see {@link ListWorkerConfigurationsCommandOutput} for command's `response` shape. + * @see {@link KafkaConnectClientResolvedConfig | config} for command's `input` shape. + * + */ +export class ListWorkerConfigurationsCommand extends $Command< + ListWorkerConfigurationsCommandInput, + ListWorkerConfigurationsCommandOutput, + KafkaConnectClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListWorkerConfigurationsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackUpdates the specified connector.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaConnectClient, UpdateConnectorCommand } from "@aws-sdk/client-kafkaconnect"; // ES Modules import + * // const { KafkaConnectClient, UpdateConnectorCommand } = require("@aws-sdk/client-kafkaconnect"); // CommonJS import + * const client = new KafkaConnectClient(config); + * const command = new UpdateConnectorCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateConnectorCommandInput} for command's `input` shape. + * @see {@link UpdateConnectorCommandOutput} for command's `response` shape. + * @see {@link KafkaConnectClientResolvedConfig | config} for command's `input` shape. + * + */ +export class UpdateConnectorCommand extends $Command< + UpdateConnectorCommandInput, + UpdateConnectorCommandOutput, + KafkaConnectClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateConnectorCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackThe description of the scale-in policy for the connector.
+ */ +export interface ScaleInPolicyDescription { + /** + *Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
+ */ + cpuUtilizationPercentage?: number; +} + +export namespace ScaleInPolicyDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ScaleInPolicyDescription): any => ({ + ...obj, + }); +} + +/** + *The description of the scale-out policy for the connector.
+ */ +export interface ScaleOutPolicyDescription { + /** + *The CPU utilization percentage threshold at which you want connector scale out to be triggered.
+ */ + cpuUtilizationPercentage?: number; +} + +export namespace ScaleOutPolicyDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ScaleOutPolicyDescription): any => ({ + ...obj, + }); +} + +/** + *Information about the auto scaling parameters for the connector.
+ */ +export interface AutoScalingDescription { + /** + *The maximum number of workers allocated to the connector.
+ */ + maxWorkerCount?: number; + + /** + *The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
+ */ + mcuCount?: number; + + /** + *The minimum number of workers allocated to the connector.
+ */ + minWorkerCount?: number; + + /** + *The sacle-in policy for the connector.
+ */ + scaleInPolicy?: ScaleInPolicyDescription; + + /** + *The sacle-out policy for the connector.>
+ */ + scaleOutPolicy?: ScaleOutPolicyDescription; +} + +export namespace AutoScalingDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AutoScalingDescription): any => ({ + ...obj, + }); +} + +/** + *The description of a connector's provisioned capacity.
+ */ +export interface ProvisionedCapacityDescription { + /** + *The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
+ */ + mcuCount?: number; + + /** + *The number of workers that are allocated to the connector.
+ */ + workerCount?: number; +} + +export namespace ProvisionedCapacityDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ProvisionedCapacityDescription): any => ({ + ...obj, + }); +} + +/** + *A description of the connector's capacity.
+ */ +export interface CapacityDescription { + /** + *Describes the connector's auto scaling capacity.
+ */ + autoScaling?: AutoScalingDescription; + + /** + *Describes a connector's provisioned capacity.
+ */ + provisionedCapacity?: ProvisionedCapacityDescription; +} + +export namespace CapacityDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CapacityDescription): any => ({ + ...obj, + }); +} + +export enum ConnectorState { + CREATING = "CREATING", + DELETING = "DELETING", + FAILED = "FAILED", + RUNNING = "RUNNING", + UPDATING = "UPDATING", +} + +/** + *The description of the VPC in which the connector resides.
+ */ +export interface VpcDescription { + /** + *The security groups for the connector.
+ */ + securityGroups?: string[]; + + /** + *The subnets for the connector.
+ */ + subnets?: string[]; +} + +export namespace VpcDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: VpcDescription): any => ({ + ...obj, + }); +} + +/** + *The description of the Apache Kafka cluster to which the connector is connected.
+ */ +export interface ApacheKafkaClusterDescription { + /** + *The bootstrap servers of the cluster.
+ */ + bootstrapServers?: string; + + /** + *Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.
+ */ + vpc?: VpcDescription; +} + +export namespace ApacheKafkaClusterDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ApacheKafkaClusterDescription): any => ({ + ...obj, + }); +} + +/** + *Details of how to connect to the Apache Kafka cluster.
+ */ +export interface KafkaClusterDescription { + /** + *The Apache Kafka cluster to which the connector is connected.
+ */ + apacheKafkaCluster?: ApacheKafkaClusterDescription; +} + +export namespace KafkaClusterDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: KafkaClusterDescription): any => ({ + ...obj, + }); +} + +export enum KafkaClusterClientAuthenticationType { + IAM = "IAM", + NONE = "NONE", +} + +/** + *The client authentication information used in order to authenticate with the Apache Kafka cluster.
+ */ +export interface KafkaClusterClientAuthenticationDescription { + /** + *The type of client authentication used to connect to the Apache Kafka cluster. Value NONE means that no client authentication is used.
+ */ + authenticationType?: KafkaClusterClientAuthenticationType | string; +} + +export namespace KafkaClusterClientAuthenticationDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: KafkaClusterClientAuthenticationDescription): any => ({ + ...obj, + }); +} + +export enum KafkaClusterEncryptionInTransitType { + PLAINTEXT = "PLAINTEXT", + TLS = "TLS", +} + +/** + *The description of the encryption in transit to the Apache Kafka cluster.
+ */ +export interface KafkaClusterEncryptionInTransitDescription { + /** + *The type of encryption in transit to the Apache Kafka cluster.
+ */ + encryptionType?: KafkaClusterEncryptionInTransitType | string; +} + +export namespace KafkaClusterEncryptionInTransitDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: KafkaClusterEncryptionInTransitDescription): any => ({ + ...obj, + }); +} + +/** + *A description of the log delivery settings.
+ */ +export interface CloudWatchLogsLogDeliveryDescription { + /** + *Whether log delivery to Amazon CloudWatch Logs is enabled.
+ */ + enabled?: boolean; + + /** + *The name of the CloudWatch log group that is the destination for log delivery.
+ */ + logGroup?: string; +} + +export namespace CloudWatchLogsLogDeliveryDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CloudWatchLogsLogDeliveryDescription): any => ({ + ...obj, + }); +} + +/** + *A description of the settings for delivering logs to Amazon Kinesis Data Firehose.
+ */ +export interface FirehoseLogDeliveryDescription { + /** + *The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
+ */ + deliveryStream?: string; + + /** + *Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
+ */ + enabled?: boolean; +} + +export namespace FirehoseLogDeliveryDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FirehoseLogDeliveryDescription): any => ({ + ...obj, + }); +} + +/** + *The description of the details about delivering logs to Amazon S3.
+ */ +export interface S3LogDeliveryDescription { + /** + *The name of the S3 bucket that is the destination for log delivery.
+ */ + bucket?: string; + + /** + *Specifies whether connector logs get sent to the specified Amazon S3 destination.
+ */ + enabled?: boolean; + + /** + *The S3 prefix that is the destination for log delivery.
+ */ + prefix?: string; +} + +export namespace S3LogDeliveryDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: S3LogDeliveryDescription): any => ({ + ...obj, + }); +} + +/** + *Workers can send worker logs to different destination types. This configuration specifies the details of these destinations.
+ */ +export interface WorkerLogDeliveryDescription { + /** + *Details about delivering logs to Amazon CloudWatch Logs.
+ */ + cloudWatchLogs?: CloudWatchLogsLogDeliveryDescription; + + /** + *Details about delivering logs to Amazon Kinesis Data Firehose.
+ */ + firehose?: FirehoseLogDeliveryDescription; + + /** + *Details about delivering logs to Amazon S3.
+ */ + s3?: S3LogDeliveryDescription; +} + +export namespace WorkerLogDeliveryDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: WorkerLogDeliveryDescription): any => ({ + ...obj, + }); +} + +/** + *The description of the log delivery settings.
+ */ +export interface LogDeliveryDescription { + /** + *The workers can send worker logs to different destination types. This configuration specifies the details of these destinations.
+ */ + workerLogDelivery?: WorkerLogDeliveryDescription; +} + +export namespace LogDeliveryDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LogDeliveryDescription): any => ({ + ...obj, + }); +} + +/** + *Details about a custom plugin.
+ */ +export interface CustomPluginDescription { + /** + *The Amazon Resource Name (ARN) of the custom plugin.
+ */ + customPluginArn?: string; + + /** + *The revision of the custom plugin.
+ */ + revision?: number; +} + +export namespace CustomPluginDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CustomPluginDescription): any => ({ + ...obj, + }); +} + +/** + *The description of the plugin.
+ */ +export interface PluginDescription { + /** + *Details about a custom plugin.
+ */ + customPlugin?: CustomPluginDescription; +} + +export namespace PluginDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PluginDescription): any => ({ + ...obj, + }); +} + +/** + *The description of the worker configuration.
+ */ +export interface WorkerConfigurationDescription { + /** + *The revision of the worker configuration.
+ */ + revision?: number; + + /** + *The Amazon Resource Name (ARN) of the worker configuration.
+ */ + workerConfigurationArn?: string; +} + +export namespace WorkerConfigurationDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: WorkerConfigurationDescription): any => ({ + ...obj, + }); +} + +/** + *Summary of a connector.
+ */ +export interface ConnectorSummary { + /** + *The connector's compute capacity settings.
+ */ + capacity?: CapacityDescription; + + /** + *The Amazon Resource Name (ARN) of the connector.
+ */ + connectorArn?: string; + + /** + *The description of the connector.
+ */ + connectorDescription?: string; + + /** + *The name of the connector.
+ */ + connectorName?: string; + + /** + *The state of the connector.
+ */ + connectorState?: ConnectorState | string; + + /** + *The time that the connector was created.
+ */ + creationTime?: Date; + + /** + *The current version of the connector.
+ */ + currentVersion?: string; + + /** + *The details of the Apache Kafka cluster to which the connector is connected.
+ */ + kafkaCluster?: KafkaClusterDescription; + + /** + *The type of client authentication used to connect to the Apache Kafka cluster. The value is NONE when no client authentication is used.
+ */ + kafkaClusterClientAuthentication?: KafkaClusterClientAuthenticationDescription; + + /** + *Details of encryption in transit to the Apache Kafka cluster.
+ */ + kafkaClusterEncryptionInTransit?: KafkaClusterEncryptionInTransitDescription; + + /** + *The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
+ */ + kafkaConnectVersion?: string; + + /** + *The settings for delivering connector logs to Amazon CloudWatch Logs.
+ */ + logDelivery?: LogDeliveryDescription; + + /** + *Specifies which plugins were used for this connector.
+ */ + plugins?: PluginDescription[]; + + /** + *The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.
+ */ + serviceExecutionRoleArn?: string; + + /** + *The worker configurations that are in use with the connector.
+ */ + workerConfiguration?: WorkerConfigurationDescription; +} + +export namespace ConnectorSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConnectorSummary): any => ({ + ...obj, + }); +} + +export enum CustomPluginState { + ACTIVE = "ACTIVE", + CREATE_FAILED = "CREATE_FAILED", + CREATING = "CREATING", + DELETING = "DELETING", + UPDATE_FAILED = "UPDATE_FAILED", + UPDATING = "UPDATING", +} + +export enum CustomPluginContentType { + JAR = "JAR", + ZIP = "ZIP", +} + +/** + *Details about a custom plugin file.
+ */ +export interface CustomPluginFileDescription { + /** + *The hex-encoded MD5 checksum of the custom plugin file. You can use it to validate the file.
+ */ + fileMd5?: string; + + /** + *The size in bytes of the custom plugin file. You can use it to validate the file.
+ */ + fileSize?: number; +} + +export namespace CustomPluginFileDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CustomPluginFileDescription): any => ({ + ...obj, + }); +} + +/** + *The description of the location of an object in Amazon S3.
+ */ +export interface S3LocationDescription { + /** + *The Amazon Resource Name (ARN) of an S3 bucket.
+ */ + bucketArn?: string; + + /** + *The file key for an object in an S3 bucket.
+ */ + fileKey?: string; + + /** + *The version of an object in an S3 bucket.
+ */ + objectVersion?: string; +} + +export namespace S3LocationDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: S3LocationDescription): any => ({ + ...obj, + }); +} + +/** + *Information about the location of a custom plugin.
+ */ +export interface CustomPluginLocationDescription { + /** + *The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.
+ */ + s3Location?: S3LocationDescription; +} + +export namespace CustomPluginLocationDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CustomPluginLocationDescription): any => ({ + ...obj, + }); +} + +/** + *Details about the revision of a custom plugin.
+ */ +export interface CustomPluginRevisionSummary { + /** + *The format of the plugin file.
+ */ + contentType?: CustomPluginContentType | string; + + /** + *The time that the custom plugin was created.
+ */ + creationTime?: Date; + + /** + *The description of the custom plugin.
+ */ + description?: string; + + /** + *Details about the custom plugin file.
+ */ + fileDescription?: CustomPluginFileDescription; + + /** + *Information about the location of the custom plugin.
+ */ + location?: CustomPluginLocationDescription; + + /** + *The revision of the custom plugin.
+ */ + revision?: number; +} + +export namespace CustomPluginRevisionSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CustomPluginRevisionSummary): any => ({ + ...obj, + }); +} + +/** + *A summary of the custom plugin.
+ */ +export interface CustomPluginSummary { + /** + *The time that the custom plugin was created.
+ */ + creationTime?: Date; + + /** + *The Amazon Resource Name (ARN) of the custom plugin.
+ */ + customPluginArn?: string; + + /** + *The state of the custom plugin.
+ */ + customPluginState?: CustomPluginState | string; + + /** + *A description of the custom plugin.
+ */ + description?: string; + + /** + *The latest revision of the custom plugin.
+ */ + latestRevision?: CustomPluginRevisionSummary; + + /** + *The name of the custom plugin.
+ */ + name?: string; +} + +export namespace CustomPluginSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CustomPluginSummary): any => ({ + ...obj, + }); +} + +/** + *A plugin is an AWS resource that contains the code that defines a connector's logic.
+ */ +export interface CustomPlugin { + /** + *The Amazon Resource Name (ARN) of the custom plugin.
+ */ + customPluginArn: string | undefined; + + /** + *The revision of the custom plugin.
+ */ + revision: number | undefined; +} + +export namespace CustomPlugin { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CustomPlugin): any => ({ + ...obj, + }); +} + +/** + *A plugin is an AWS resource that contains the code that defines your connector logic.
+ */ +export interface Plugin { + /** + *Details about a custom plugin.
+ */ + customPlugin: CustomPlugin | undefined; +} + +export namespace Plugin { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Plugin): any => ({ + ...obj, + }); +} + +/** + *The summary of a worker configuration revision.
+ */ +export interface WorkerConfigurationRevisionSummary { + /** + *The time that a worker configuration revision was created.
+ */ + creationTime?: Date; + + /** + *The description of a worker configuration revision.
+ */ + description?: string; + + /** + *The revision of a worker configuration.
+ */ + revision?: number; +} + +export namespace WorkerConfigurationRevisionSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: WorkerConfigurationRevisionSummary): any => ({ + ...obj, + }); +} + +/** + *The summary of a worker configuration.
+ */ +export interface WorkerConfigurationSummary { + /** + *The time that a worker configuration was created.
+ */ + creationTime?: Date; + + /** + *The description of a worker configuration.
+ */ + description?: string; + + /** + *The latest revision of a worker configuration.
+ */ + latestRevision?: WorkerConfigurationRevisionSummary; + + /** + *The name of the worker configuration.
+ */ + name?: string; + + /** + *The Amazon Resource Name (ARN) of the worker configuration.
+ */ + workerConfigurationArn?: string; +} + +export namespace WorkerConfigurationSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: WorkerConfigurationSummary): any => ({ + ...obj, + }); +} + +/** + *Information about the VPC in which the connector resides.
+ */ +export interface Vpc { + /** + *The security groups for the connector.
+ */ + securityGroups?: string[]; + + /** + *The subnets for the connector.
+ */ + subnets: string[] | undefined; +} + +export namespace Vpc { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Vpc): any => ({ + ...obj, + }); +} + +/** + *The details of the Apache Kafka cluster to which the connector is connected.
+ */ +export interface ApacheKafkaCluster { + /** + *The bootstrap servers of the cluster.
+ */ + bootstrapServers: string | undefined; + + /** + *Details of an Amazon VPC which has network connectivity to the Apache Kafka cluster.
+ */ + vpc: Vpc | undefined; +} + +export namespace ApacheKafkaCluster { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ApacheKafkaCluster): any => ({ + ...obj, + }); +} + +/** + *The scale-in policy for the connector.
+ */ +export interface ScaleInPolicy { + /** + *Specifies the CPU utilization percentage threshold at which you want connector scale in to be triggered.
+ */ + cpuUtilizationPercentage: number | undefined; +} + +export namespace ScaleInPolicy { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ScaleInPolicy): any => ({ + ...obj, + }); +} + +/** + *The scale-out policy for the connector.
+ */ +export interface ScaleOutPolicy { + /** + *The CPU utilization percentage threshold at which you want connector scale out to be triggered.
+ */ + cpuUtilizationPercentage: number | undefined; +} + +export namespace ScaleOutPolicy { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ScaleOutPolicy): any => ({ + ...obj, + }); +} + +/** + *Specifies how the connector scales.
+ */ +export interface AutoScaling { + /** + *The maximum number of workers allocated to the connector.
+ */ + maxWorkerCount: number | undefined; + + /** + *The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
+ */ + mcuCount: number | undefined; + + /** + *The minimum number of workers allocated to the connector.
+ */ + minWorkerCount: number | undefined; + + /** + *The sacle-in policy for the connector.
+ */ + scaleInPolicy?: ScaleInPolicy; + + /** + *The sacle-out policy for the connector.
+ */ + scaleOutPolicy?: ScaleOutPolicy; +} + +export namespace AutoScaling { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AutoScaling): any => ({ + ...obj, + }); +} + +/** + *An update to the connector's scale-in policy.
+ */ +export interface ScaleInPolicyUpdate { + /** + *The target CPU utilization percentage threshold at which you want connector scale in to be triggered.
+ */ + cpuUtilizationPercentage: number | undefined; +} + +export namespace ScaleInPolicyUpdate { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ScaleInPolicyUpdate): any => ({ + ...obj, + }); +} + +/** + *An update to the connector's scale-out policy.
+ */ +export interface ScaleOutPolicyUpdate { + /** + *The target CPU utilization percentage threshold at which you want connector scale out to be triggered.
+ */ + cpuUtilizationPercentage: number | undefined; +} + +export namespace ScaleOutPolicyUpdate { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ScaleOutPolicyUpdate): any => ({ + ...obj, + }); +} + +/** + *The updates to the auto scaling parameters for the connector.
+ */ +export interface AutoScalingUpdate { + /** + *The target maximum number of workers allocated to the connector.
+ */ + maxWorkerCount: number | undefined; + + /** + *The target number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
+ */ + mcuCount: number | undefined; + + /** + *The target minimum number of workers allocated to the connector.
+ */ + minWorkerCount: number | undefined; + + /** + *The target sacle-in policy for the connector.
+ */ + scaleInPolicy: ScaleInPolicyUpdate | undefined; + + /** + *The target sacle-out policy for the connector.
+ */ + scaleOutPolicy: ScaleOutPolicyUpdate | undefined; +} + +export namespace AutoScalingUpdate { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AutoScalingUpdate): any => ({ + ...obj, + }); +} + +/** + *HTTP Status Code 400: Bad request due to incorrect input. Correct your request and then retry it.
+ */ +export interface BadRequestException extends __SmithyException, $MetadataBearer { + name: "BadRequestException"; + $fault: "client"; + message?: string; +} + +export namespace BadRequestException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BadRequestException): any => ({ + ...obj, + }); +} + +/** + *Details about a connector's provisioned capacity.
+ */ +export interface ProvisionedCapacity { + /** + *The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
+ */ + mcuCount: number | undefined; + + /** + *The number of workers that are allocated to the connector.
+ */ + workerCount: number | undefined; +} + +export namespace ProvisionedCapacity { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ProvisionedCapacity): any => ({ + ...obj, + }); +} + +/** + *Information about the capacity of the connector, whether it is auto scaled or provisioned.
+ */ +export interface Capacity { + /** + *Information about the auto scaling parameters for the connector.
+ */ + autoScaling?: AutoScaling; + + /** + *Details about a fixed capacity allocated to a connector.
+ */ + provisionedCapacity?: ProvisionedCapacity; +} + +export namespace Capacity { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Capacity): any => ({ + ...obj, + }); +} + +/** + *An update to a connector's fixed capacity.
+ */ +export interface ProvisionedCapacityUpdate { + /** + *The number of microcontroller units (MCUs) allocated to each connector worker. The valid values are 1,2,4,8.
+ */ + mcuCount: number | undefined; + + /** + *The number of workers that are allocated to the connector.
+ */ + workerCount: number | undefined; +} + +export namespace ProvisionedCapacityUpdate { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ProvisionedCapacityUpdate): any => ({ + ...obj, + }); +} + +/** + *The target capacity for the connector. The capacity can be auto scaled or provisioned.
+ */ +export interface CapacityUpdate { + /** + *The target auto scaling setting.
+ */ + autoScaling?: AutoScalingUpdate; + + /** + *The target settings for provisioned capacity.
+ */ + provisionedCapacity?: ProvisionedCapacityUpdate; +} + +export namespace CapacityUpdate { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CapacityUpdate): any => ({ + ...obj, + }); +} + +/** + *The settings for delivering connector logs to Amazon CloudWatch Logs.
+ */ +export interface CloudWatchLogsLogDelivery { + /** + *Whether log delivery to Amazon CloudWatch Logs is enabled.
+ */ + enabled: boolean | undefined; + + /** + *The name of the CloudWatch log group that is the destination for log delivery.
+ */ + logGroup?: string; +} + +export namespace CloudWatchLogsLogDelivery { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CloudWatchLogsLogDelivery): any => ({ + ...obj, + }); +} + +/** + *HTTP Status Code 409: Conflict. A resource with this name already exists. Retry your request with another name.
+ */ +export interface ConflictException extends __SmithyException, $MetadataBearer { + name: "ConflictException"; + $fault: "client"; + message?: string; +} + +export namespace ConflictException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConflictException): any => ({ + ...obj, + }); +} + +/** + *The details of the Apache Kafka cluster to which the connector is connected.
+ */ +export interface KafkaCluster { + /** + *The Apache Kafka cluster to which the connector is connected.
+ */ + apacheKafkaCluster: ApacheKafkaCluster | undefined; +} + +export namespace KafkaCluster { + /** + * @internal + */ + export const filterSensitiveLog = (obj: KafkaCluster): any => ({ + ...obj, + }); +} + +/** + *The client authentication information used in order to authenticate with the Apache Kafka cluster.
+ */ +export interface KafkaClusterClientAuthentication { + /** + *The type of client authentication used to connect to the Apache Kafka cluster. Value NONE means that no client authentication is used.
+ */ + authenticationType: KafkaClusterClientAuthenticationType | string | undefined; +} + +export namespace KafkaClusterClientAuthentication { + /** + * @internal + */ + export const filterSensitiveLog = (obj: KafkaClusterClientAuthentication): any => ({ + ...obj, + }); +} + +/** + *Details of encryption in transit to the Apache Kafka cluster.
+ */ +export interface KafkaClusterEncryptionInTransit { + /** + *The type of encryption in transit to the Apache Kafka cluster.
+ */ + encryptionType: KafkaClusterEncryptionInTransitType | string | undefined; +} + +export namespace KafkaClusterEncryptionInTransit { + /** + * @internal + */ + export const filterSensitiveLog = (obj: KafkaClusterEncryptionInTransit): any => ({ + ...obj, + }); +} + +/** + *The settings for delivering logs to Amazon Kinesis Data Firehose.
+ */ +export interface FirehoseLogDelivery { + /** + *The name of the Kinesis Data Firehose delivery stream that is the destination for log delivery.
+ */ + deliveryStream?: string; + + /** + *Specifies whether connector logs get delivered to Amazon Kinesis Data Firehose.
+ */ + enabled: boolean | undefined; +} + +export namespace FirehoseLogDelivery { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FirehoseLogDelivery): any => ({ + ...obj, + }); +} + +/** + *Details about delivering logs to Amazon S3.
+ */ +export interface S3LogDelivery { + /** + *The name of the S3 bucket that is the destination for log delivery.
+ */ + bucket?: string; + + /** + *Specifies whether connector logs get sent to the specified Amazon S3 destination.
+ */ + enabled: boolean | undefined; + + /** + *The S3 prefix that is the destination for log delivery.
+ */ + prefix?: string; +} + +export namespace S3LogDelivery { + /** + * @internal + */ + export const filterSensitiveLog = (obj: S3LogDelivery): any => ({ + ...obj, + }); +} + +/** + *Workers can send worker logs to different destination types. This configuration specifies the details of these destinations.
+ */ +export interface WorkerLogDelivery { + /** + *Details about delivering logs to Amazon CloudWatch Logs.
+ */ + cloudWatchLogs?: CloudWatchLogsLogDelivery; + + /** + *Details about delivering logs to Amazon Kinesis Data Firehose.
+ */ + firehose?: FirehoseLogDelivery; + + /** + *Details about delivering logs to Amazon S3.
+ */ + s3?: S3LogDelivery; +} + +export namespace WorkerLogDelivery { + /** + * @internal + */ + export const filterSensitiveLog = (obj: WorkerLogDelivery): any => ({ + ...obj, + }); +} + +/** + *Details about log delivery.
+ */ +export interface LogDelivery { + /** + *The workers can send worker logs to different destination types. This configuration specifies the details of these destinations.
+ */ + workerLogDelivery: WorkerLogDelivery | undefined; +} + +export namespace LogDelivery { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LogDelivery): any => ({ + ...obj, + }); +} + +/** + *The configuration of the workers, which are the processes that run the connector logic.
+ */ +export interface WorkerConfiguration { + /** + *The revision of the worker configuration.
+ */ + revision: number | undefined; + + /** + *The Amazon Resource Name (ARN) of the worker configuration.
+ */ + workerConfigurationArn: string | undefined; +} + +export namespace WorkerConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: WorkerConfiguration): any => ({ + ...obj, + }); +} + +export interface CreateConnectorRequest { + /** + *Information about the capacity allocated to the connector. Exactly one of the two properties must be specified.
+ */ + capacity: Capacity | undefined; + + /** + *A map of keys to values that represent the configuration for the connector.
+ */ + connectorConfiguration: { [key: string]: string } | undefined; + + /** + *A summary description of the connector.
+ */ + connectorDescription?: string; + + /** + *The name of the connector.
+ */ + connectorName: string | undefined; + + /** + *Specifies which Apache Kafka cluster to connect to.
+ */ + kafkaCluster: KafkaCluster | undefined; + + /** + *Details of the client authentication used by the Apache Kafka cluster.
+ */ + kafkaClusterClientAuthentication: KafkaClusterClientAuthentication | undefined; + + /** + *Details of encryption in transit to the Apache Kafka cluster.
+ */ + kafkaClusterEncryptionInTransit: KafkaClusterEncryptionInTransit | undefined; + + /** + *The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
+ */ + kafkaConnectVersion: string | undefined; + + /** + *Details about log delivery.
+ */ + logDelivery?: LogDelivery; + + /** + *Specifies which plugins to use for the connector.
+ */ + plugins: Plugin[] | undefined; + + /** + *The Amazon Resource Name (ARN) of the IAM role used by the connector to access the Amazon Web Services resources that it needs. The types of resources depends on the logic of the connector. For example, a connector that has Amazon S3 as a destination must have permissions that allow it to write to the S3 destination bucket.
+ */ + serviceExecutionRoleArn: string | undefined; + + /** + *Specifies which worker configuration to use with the connector.
+ */ + workerConfiguration?: WorkerConfiguration; +} + +export namespace CreateConnectorRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateConnectorRequest): any => ({ + ...obj, + }); +} + +export interface CreateConnectorResponse { + /** + *The Amazon Resource Name (ARN) that Amazon assigned to the connector.
+ */ + connectorArn?: string; + + /** + *The name of the connector.
+ */ + connectorName?: string; + + /** + *The state of the connector.
+ */ + connectorState?: ConnectorState | string; +} + +export namespace CreateConnectorResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateConnectorResponse): any => ({ + ...obj, + }); +} + +/** + *HTTP Status Code 403: Access forbidden. Correct your credentials and then retry your request.
+ */ +export interface ForbiddenException extends __SmithyException, $MetadataBearer { + name: "ForbiddenException"; + $fault: "client"; + message?: string; +} + +export namespace ForbiddenException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ForbiddenException): any => ({ + ...obj, + }); +} + +/** + *HTTP Status Code 500: Unexpected internal server error. Retrying your request might resolve the issue.
+ */ +export interface InternalServerErrorException extends __SmithyException, $MetadataBearer { + name: "InternalServerErrorException"; + $fault: "server"; + message?: string; +} + +export namespace InternalServerErrorException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InternalServerErrorException): any => ({ + ...obj, + }); +} + +/** + *HTTP Status Code 404: Resource not found due to incorrect input. Correct your request and then retry it.
+ */ +export interface NotFoundException extends __SmithyException, $MetadataBearer { + name: "NotFoundException"; + $fault: "client"; + message?: string; +} + +export namespace NotFoundException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: NotFoundException): any => ({ + ...obj, + }); +} + +/** + *HTTP Status Code 503: Service Unavailable. Retrying your request in some time might resolve the issue.
+ */ +export interface ServiceUnavailableException extends __SmithyException, $MetadataBearer { + name: "ServiceUnavailableException"; + $fault: "server"; + message?: string; +} + +export namespace ServiceUnavailableException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ServiceUnavailableException): any => ({ + ...obj, + }); +} + +/** + *HTTP Status Code 429: Limit exceeded. Resource limit reached.
+ */ +export interface TooManyRequestsException extends __SmithyException, $MetadataBearer { + name: "TooManyRequestsException"; + $fault: "client"; + message?: string; +} + +export namespace TooManyRequestsException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TooManyRequestsException): any => ({ + ...obj, + }); +} + +/** + *HTTP Status Code 401: Unauthorized request. The provided credentials couldn't be validated.
+ */ +export interface UnauthorizedException extends __SmithyException, $MetadataBearer { + name: "UnauthorizedException"; + $fault: "client"; + message?: string; +} + +export namespace UnauthorizedException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UnauthorizedException): any => ({ + ...obj, + }); +} + +/** + *The location of an object in Amazon S3.
+ */ +export interface S3Location { + /** + *The Amazon Resource Name (ARN) of an S3 bucket.
+ */ + bucketArn: string | undefined; + + /** + *The file key for an object in an S3 bucket.
+ */ + fileKey: string | undefined; + + /** + *The version of an object in an S3 bucket.
+ */ + objectVersion?: string; +} + +export namespace S3Location { + /** + * @internal + */ + export const filterSensitiveLog = (obj: S3Location): any => ({ + ...obj, + }); +} + +/** + *Information about the location of a custom plugin.
+ */ +export interface CustomPluginLocation { + /** + *The S3 bucket Amazon Resource Name (ARN), file key, and object version of the plugin file stored in Amazon S3.
+ */ + s3Location: S3Location | undefined; +} + +export namespace CustomPluginLocation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CustomPluginLocation): any => ({ + ...obj, + }); +} + +export interface CreateCustomPluginRequest { + /** + *The type of the plugin file.
+ */ + contentType: CustomPluginContentType | string | undefined; + + /** + *A summary description of the custom plugin.
+ */ + description?: string; + + /** + *Information about the location of a custom plugin.
+ */ + location: CustomPluginLocation | undefined; + + /** + *The name of the custom plugin.
+ */ + name: string | undefined; +} + +export namespace CreateCustomPluginRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateCustomPluginRequest): any => ({ + ...obj, + }); +} + +export interface CreateCustomPluginResponse { + /** + *The Amazon Resource Name (ARN) that Amazon assigned to the custom plugin.
+ */ + customPluginArn?: string; + + /** + *The state of the custom plugin.
+ */ + customPluginState?: CustomPluginState | string; + + /** + *The name of the custom plugin.
+ */ + name?: string; + + /** + *The revision of the custom plugin.
+ */ + revision?: number; +} + +export namespace CreateCustomPluginResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateCustomPluginResponse): any => ({ + ...obj, + }); +} + +export interface CreateWorkerConfigurationRequest { + /** + *A summary description of the worker configuration.
+ */ + description?: string; + + /** + *The name of the worker configuration.
+ */ + name: string | undefined; + + /** + *Base64 encoded contents of connect-distributed.properties file.
+ */ + propertiesFileContent: string | undefined; +} + +export namespace CreateWorkerConfigurationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateWorkerConfigurationRequest): any => ({ + ...obj, + }); +} + +export interface CreateWorkerConfigurationResponse { + /** + *The time that the worker configuration was created.
+ */ + creationTime?: Date; + + /** + *The latest revision of the worker configuration.
+ */ + latestRevision?: WorkerConfigurationRevisionSummary; + + /** + *The name of the worker configuration.
+ */ + name?: string; + + /** + *The Amazon Resource Name (ARN) that Amazon assigned to the worker configuration.
+ */ + workerConfigurationArn?: string; +} + +export namespace CreateWorkerConfigurationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateWorkerConfigurationResponse): any => ({ + ...obj, + }); +} + +export interface DeleteConnectorRequest { + /** + *The Amazon Resource Name (ARN) of the connector that you want to delete.
+ */ + connectorArn: string | undefined; + + /** + *The current version of the connector that you want to delete.
+ */ + currentVersion?: string; +} + +export namespace DeleteConnectorRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteConnectorRequest): any => ({ + ...obj, + }); +} + +export interface DeleteConnectorResponse { + /** + *The Amazon Resource Name (ARN) of the connector that you requested to delete.
+ */ + connectorArn?: string; + + /** + *The state of the connector that you requested to delete.
+ */ + connectorState?: ConnectorState | string; +} + +export namespace DeleteConnectorResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteConnectorResponse): any => ({ + ...obj, + }); +} + +export interface DescribeConnectorRequest { + /** + *The Amazon Resource Name (ARN) of the connector that you want to describe.
+ */ + connectorArn: string | undefined; +} + +export namespace DescribeConnectorRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeConnectorRequest): any => ({ + ...obj, + }); +} + +export interface DescribeConnectorResponse { + /** + *Information about the capacity of the connector, whether it is auto scaled or provisioned.
+ */ + capacity?: CapacityDescription; + + /** + *The Amazon Resource Name (ARN) of the connector.
+ */ + connectorArn?: string; + + /** + *A map of keys to values that represent the configuration for the connector.
+ */ + connectorConfiguration?: { [key: string]: string }; + + /** + *A summary description of the connector.
+ */ + connectorDescription?: string; + + /** + *The name of the connector.
+ */ + connectorName?: string; + + /** + *The state of the connector.
+ */ + connectorState?: ConnectorState | string; + + /** + *The time the connector was created.
+ */ + creationTime?: Date; + + /** + *The current version of the connector.
+ */ + currentVersion?: string; + + /** + *The Apache Kafka cluster that the connector is connected to.
+ */ + kafkaCluster?: KafkaClusterDescription; + + /** + *The type of client authentication used to connect to the Apache Kafka cluster. The value is NONE when no client authentication is used.
+ */ + kafkaClusterClientAuthentication?: KafkaClusterClientAuthenticationDescription; + + /** + *Details of encryption in transit to the Apache Kafka cluster.
+ */ + kafkaClusterEncryptionInTransit?: KafkaClusterEncryptionInTransitDescription; + + /** + *The version of Kafka Connect. It has to be compatible with both the Apache Kafka cluster's version and the plugins.
+ */ + kafkaConnectVersion?: string; + + /** + *Details about delivering logs to Amazon CloudWatch Logs.
+ */ + logDelivery?: LogDeliveryDescription; + + /** + *Specifies which plugins were used for this connector.
+ */ + plugins?: PluginDescription[]; + + /** + *The Amazon Resource Name (ARN) of the IAM role used by the connector to access Amazon Web Services resources.
+ */ + serviceExecutionRoleArn?: string; + + /** + *Specifies which worker configuration was used for the connector.
+ */ + workerConfiguration?: WorkerConfigurationDescription; +} + +export namespace DescribeConnectorResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeConnectorResponse): any => ({ + ...obj, + }); +} + +export interface DescribeCustomPluginRequest { + /** + *Returns information about a custom plugin.
+ */ + customPluginArn: string | undefined; +} + +export namespace DescribeCustomPluginRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeCustomPluginRequest): any => ({ + ...obj, + }); +} + +export interface DescribeCustomPluginResponse { + /** + *The time that the custom plugin was created.
+ */ + creationTime?: Date; + + /** + *The Amazon Resource Name (ARN) of the custom plugin.
+ */ + customPluginArn?: string; + + /** + *The state of the custom plugin.
+ */ + customPluginState?: CustomPluginState | string; + + /** + *The description of the custom plugin.
+ */ + description?: string; + + /** + *The latest successfully created revision of the custom plugin. If there are no successfully created revisions, this field will be absent.
+ */ + latestRevision?: CustomPluginRevisionSummary; + + /** + *The name of the custom plugin.
+ */ + name?: string; +} + +export namespace DescribeCustomPluginResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeCustomPluginResponse): any => ({ + ...obj, + }); +} + +export interface DescribeWorkerConfigurationRequest { + /** + *The Amazon Resource Name (ARN) of the worker configuration that you want to get information about.
+ */ + workerConfigurationArn: string | undefined; +} + +export namespace DescribeWorkerConfigurationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeWorkerConfigurationRequest): any => ({ + ...obj, + }); +} + +/** + *The description of the worker configuration revision.
+ */ +export interface WorkerConfigurationRevisionDescription { + /** + *The time that the worker configuration was created.
+ */ + creationTime?: Date; + + /** + *The description of the worker configuration revision.
+ */ + description?: string; + + /** + *Base64 encoded contents of the connect-distributed.properties file.
+ */ + propertiesFileContent?: string; + + /** + *The description of a revision of the worker configuration.
+ */ + revision?: number; +} + +export namespace WorkerConfigurationRevisionDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: WorkerConfigurationRevisionDescription): any => ({ + ...obj, + }); +} + +export interface DescribeWorkerConfigurationResponse { + /** + *The time that the worker configuration was created.
+ */ + creationTime?: Date; + + /** + *The description of the worker configuration.
+ */ + description?: string; + + /** + *The latest revision of the custom configuration.
+ */ + latestRevision?: WorkerConfigurationRevisionDescription; + + /** + *The name of the worker configuration.
+ */ + name?: string; + + /** + *The Amazon Resource Name (ARN) of the custom configuration.
+ */ + workerConfigurationArn?: string; +} + +export namespace DescribeWorkerConfigurationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeWorkerConfigurationResponse): any => ({ + ...obj, + }); +} + +export interface ListConnectorsRequest { + /** + *The name prefix that you want to use to search for and list connectors.
+ */ + connectorNamePrefix?: string; + + /** + *The maximum number of connectors to list in one response.
+ */ + maxResults?: number; + + /** + *If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
+ */ + nextToken?: string; +} + +export namespace ListConnectorsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListConnectorsRequest): any => ({ + ...obj, + }); +} + +export interface ListConnectorsResponse { + /** + *An array of connector descriptions.
+ */ + connectors?: ConnectorSummary[]; + + /** + *If the response of a ListConnectors operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where it left off.
+ */ + nextToken?: string; +} + +export namespace ListConnectorsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListConnectorsResponse): any => ({ + ...obj, + }); +} + +export interface ListCustomPluginsRequest { + /** + *The maximum number of custom plugins to list in one response.
+ */ + maxResults?: number; + + /** + *If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
+ */ + nextToken?: string; +} + +export namespace ListCustomPluginsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListCustomPluginsRequest): any => ({ + ...obj, + }); +} + +export interface ListCustomPluginsResponse { + /** + *An array of custom plugin descriptions.
+ */ + customPlugins?: CustomPluginSummary[]; + + /** + *If the response of a ListCustomPlugins operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
+ */ + nextToken?: string; +} + +export namespace ListCustomPluginsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListCustomPluginsResponse): any => ({ + ...obj, + }); +} + +export interface ListWorkerConfigurationsRequest { + /** + *The maximum number of worker configurations to list in one response.
+ */ + maxResults?: number; + + /** + *If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
+ */ + nextToken?: string; +} + +export namespace ListWorkerConfigurationsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListWorkerConfigurationsRequest): any => ({ + ...obj, + }); +} + +export interface ListWorkerConfigurationsResponse { + /** + *If the response of a ListWorkerConfigurations operation is truncated, it will include a NextToken. Send this NextToken in a subsequent request to continue listing from where the previous operation left off.
+ */ + nextToken?: string; + + /** + *An array of worker configuration descriptions.
+ */ + workerConfigurations?: WorkerConfigurationSummary[]; +} + +export namespace ListWorkerConfigurationsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListWorkerConfigurationsResponse): any => ({ + ...obj, + }); +} + +export interface UpdateConnectorRequest { + /** + *The target capacity.
+ */ + capacity: CapacityUpdate | undefined; + + /** + *The Amazon Resource Name (ARN) of the connector that you want to update.
+ */ + connectorArn: string | undefined; + + /** + *The current version of the connector that you want to update.
+ */ + currentVersion: string | undefined; +} + +export namespace UpdateConnectorRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateConnectorRequest): any => ({ + ...obj, + }); +} + +export interface UpdateConnectorResponse { + /** + *The Amazon Resource Name (ARN) of the connector.
+ */ + connectorArn?: string; + + /** + *The state of the connector.
+ */ + connectorState?: ConnectorState | string; +} + +export namespace UpdateConnectorResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateConnectorResponse): any => ({ + ...obj, + }); +} diff --git a/clients/client-kafkaconnect/package.json b/clients/client-kafkaconnect/package.json new file mode 100644 index 000000000000..bc3164b0e010 --- /dev/null +++ b/clients/client-kafkaconnect/package.json @@ -0,0 +1,91 @@ +{ + "name": "@aws-sdk/client-kafkaconnect", + "description": "AWS SDK for JavaScript Kafkaconnect Client for Node.js, Browser and React Native", + "version": "3.0.0", + "scripts": { + "clean": "yarn remove-definitions && yarn remove-dist && yarn remove-documentation", + "build-documentation": "yarn remove-documentation && typedoc ./", + "downlevel-dts": "downlevel-dts dist/types dist/types/ts3.4", + "remove-definitions": "rimraf ./types", + "remove-dist": "rimraf ./dist", + "remove-documentation": "rimraf ./docs", + "test": "jest --coverage --passWithNoTests", + "build:cjs": "tsc -p tsconfig.json", + "build:es": "tsc -p tsconfig.es.json", + "build": "yarn build:cjs && yarn build:es" + }, + "main": "./dist/cjs/index.js", + "types": "./dist/types/index.d.ts", + "module": "./dist/es/index.js", + "browser": { + "./runtimeConfig": "./runtimeConfig.browser" + }, + "react-native": { + "./runtimeConfig": "./runtimeConfig.native" + }, + "sideEffects": false, + "dependencies": { + "@aws-crypto/sha256-browser": "^1.1.0", + "@aws-crypto/sha256-js": "^1.1.0", + "@aws-sdk/client-sts": "3.31.0", + "@aws-sdk/config-resolver": "3.30.0", + "@aws-sdk/credential-provider-node": "3.31.0", + "@aws-sdk/fetch-http-handler": "3.29.0", + "@aws-sdk/hash-node": "3.29.0", + "@aws-sdk/invalid-dependency": "3.29.0", + "@aws-sdk/middleware-content-length": "3.29.0", + "@aws-sdk/middleware-host-header": "3.29.0", + "@aws-sdk/middleware-logger": "3.29.0", + "@aws-sdk/middleware-retry": "3.29.0", + "@aws-sdk/middleware-serde": "3.29.0", + "@aws-sdk/middleware-signing": "3.30.0", + "@aws-sdk/middleware-stack": "3.29.0", + "@aws-sdk/middleware-user-agent": "3.29.0", + "@aws-sdk/node-config-provider": "3.29.0", + "@aws-sdk/node-http-handler": "3.29.0", + "@aws-sdk/protocol-http": "3.29.0", + "@aws-sdk/smithy-client": "3.31.0", + "@aws-sdk/types": "3.29.0", + "@aws-sdk/url-parser": "3.29.0", + "@aws-sdk/util-base64-browser": "3.29.0", + "@aws-sdk/util-base64-node": "3.29.0", + "@aws-sdk/util-body-length-browser": "3.29.0", + "@aws-sdk/util-body-length-node": "3.29.0", + "@aws-sdk/util-user-agent-browser": "3.29.0", + "@aws-sdk/util-user-agent-node": "3.29.0", + "@aws-sdk/util-utf8-browser": "3.29.0", + "@aws-sdk/util-utf8-node": "3.29.0", + "tslib": "^2.3.0" + }, + "devDependencies": { + "@aws-sdk/client-documentation-generator": "3.29.0", + "@types/node": "^12.7.5", + "downlevel-dts": "0.7.0", + "jest": "^26.1.0", + "rimraf": "^3.0.0", + "ts-jest": "^26.4.1", + "typedoc": "^0.19.2", + "typescript": "~4.3.5" + }, + "engines": { + "node": ">=10.0.0" + }, + "typesVersions": { + "<4.0": { + "dist/types/*": [ + "dist/types/ts3.4/*" + ] + } + }, + "author": { + "name": "AWS SDK for JavaScript Team", + "url": "https://aws.amazon.com/javascript/" + }, + "license": "Apache-2.0", + "homepage": "https://github.com/aws/aws-sdk-js-v3/tree/main/clients/client-kafkaconnect", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-sdk-js-v3.git", + "directory": "clients/client-kafkaconnect" + } +} diff --git a/clients/client-kafkaconnect/pagination/Interfaces.ts b/clients/client-kafkaconnect/pagination/Interfaces.ts new file mode 100644 index 000000000000..fa01fc37d44e --- /dev/null +++ b/clients/client-kafkaconnect/pagination/Interfaces.ts @@ -0,0 +1,7 @@ +import { KafkaConnect } from "../KafkaConnect"; +import { KafkaConnectClient } from "../KafkaConnectClient"; +import { PaginationConfiguration } from "@aws-sdk/types"; + +export interface KafkaConnectPaginationConfiguration extends PaginationConfiguration { + client: KafkaConnect | KafkaConnectClient; +} diff --git a/clients/client-kafkaconnect/pagination/ListConnectorsPaginator.ts b/clients/client-kafkaconnect/pagination/ListConnectorsPaginator.ts new file mode 100644 index 000000000000..7c23f45a6875 --- /dev/null +++ b/clients/client-kafkaconnect/pagination/ListConnectorsPaginator.ts @@ -0,0 +1,58 @@ +import { KafkaConnect } from "../KafkaConnect"; +import { KafkaConnectClient } from "../KafkaConnectClient"; +import { + ListConnectorsCommand, + ListConnectorsCommandInput, + ListConnectorsCommandOutput, +} from "../commands/ListConnectorsCommand"; +import { KafkaConnectPaginationConfiguration } from "./Interfaces"; +import { Paginator } from "@aws-sdk/types"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: KafkaConnectClient, + input: ListConnectorsCommandInput, + ...args: any +): PromiseRetrieves information about all the Amazon Macie membership invitations that were received by an account.
+ *Retrieves information about the Amazon Macie membership invitations that were received by an account.
*/ public listInvitations( args: ListInvitationsCommandInput, @@ -1579,6 +1584,38 @@ export class Macie2 extends Macie2Client { } } + /** + *Retrieves information about all the managed data identifiers that Amazon Macie currently provides.
+ */ + public listManagedDataIdentifiers( + args: ListManagedDataIdentifiersCommandInput, + options?: __HttpHandlerOptions + ): PromiseRetrieves information about the accounts that are associated with an Amazon Macie administrator account.
*/ diff --git a/clients/client-macie2/Macie2Client.ts b/clients/client-macie2/Macie2Client.ts index 58c0cade51cb..607aea1fcff9 100644 --- a/clients/client-macie2/Macie2Client.ts +++ b/clients/client-macie2/Macie2Client.ts @@ -109,6 +109,10 @@ import { ListFindingsFiltersCommandOutput, } from "./commands/ListFindingsFiltersCommand"; import { ListInvitationsCommandInput, ListInvitationsCommandOutput } from "./commands/ListInvitationsCommand"; +import { + ListManagedDataIdentifiersCommandInput, + ListManagedDataIdentifiersCommandOutput, +} from "./commands/ListManagedDataIdentifiersCommand"; import { ListMembersCommandInput, ListMembersCommandOutput } from "./commands/ListMembersCommand"; import { ListOrganizationAdminAccountsCommandInput, @@ -245,6 +249,7 @@ export type ServiceInputTypes = | ListFindingsCommandInput | ListFindingsFiltersCommandInput | ListInvitationsCommandInput + | ListManagedDataIdentifiersCommandInput | ListMembersCommandInput | ListOrganizationAdminAccountsCommandInput | ListTagsForResourceCommandInput @@ -303,6 +308,7 @@ export type ServiceOutputTypes = | ListFindingsCommandOutput | ListFindingsFiltersCommandOutput | ListInvitationsCommandOutput + | ListManagedDataIdentifiersCommandOutput | ListMembersCommandOutput | ListOrganizationAdminAccountsCommandOutput | ListTagsForResourceCommandOutput diff --git a/clients/client-macie2/commands/ListInvitationsCommand.ts b/clients/client-macie2/commands/ListInvitationsCommand.ts index 4bf59cd476d4..04acec44ef2b 100644 --- a/clients/client-macie2/commands/ListInvitationsCommand.ts +++ b/clients/client-macie2/commands/ListInvitationsCommand.ts @@ -21,7 +21,7 @@ export interface ListInvitationsCommandInput extends ListInvitationsRequest {} export interface ListInvitationsCommandOutput extends ListInvitationsResponse, __MetadataBearer {} /** - *Retrieves information about all the Amazon Macie membership invitations that were received by an account.
+ *Retrieves information about the Amazon Macie membership invitations that were received by an account.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-macie2/commands/ListManagedDataIdentifiersCommand.ts b/clients/client-macie2/commands/ListManagedDataIdentifiersCommand.ts new file mode 100644 index 000000000000..c4534aa5fb79 --- /dev/null +++ b/clients/client-macie2/commands/ListManagedDataIdentifiersCommand.ts @@ -0,0 +1,97 @@ +import { Macie2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Macie2Client"; +import { ListManagedDataIdentifiersRequest, ListManagedDataIdentifiersResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListManagedDataIdentifiersCommand, + serializeAws_restJson1ListManagedDataIdentifiersCommand, +} from "../protocols/Aws_restJson1"; +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + MiddlewareStack, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +export interface ListManagedDataIdentifiersCommandInput extends ListManagedDataIdentifiersRequest {} +export interface ListManagedDataIdentifiersCommandOutput extends ListManagedDataIdentifiersResponse, __MetadataBearer {} + +/** + *Retrieves information about all the managed data identifiers that Amazon Macie currently provides.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Macie2Client, ListManagedDataIdentifiersCommand } from "@aws-sdk/client-macie2"; // ES Modules import + * // const { Macie2Client, ListManagedDataIdentifiersCommand } = require("@aws-sdk/client-macie2"); // CommonJS import + * const client = new Macie2Client(config); + * const command = new ListManagedDataIdentifiersCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListManagedDataIdentifiersCommandInput} for command's `input` shape. + * @see {@link ListManagedDataIdentifiersCommandOutput} for command's `response` shape. + * @see {@link Macie2ClientResolvedConfig | config} for command's `input` shape. + * + */ +export class ListManagedDataIdentifiersCommand extends $Command< + ListManagedDataIdentifiersCommandInput, + ListManagedDataIdentifiersCommandOutput, + Macie2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListManagedDataIdentifiersCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackSpecifies the location of 1-15 occurrences of sensitive data that was detected by managed data identifiers or a custom data identifier and produced a sensitive data finding.
+ *Specifies the location of 1-15 occurrences of sensitive data that was detected by a managed data identifier or a custom data identifier and produced a sensitive data finding.
*/ export interface Occurrences { /** @@ -975,7 +975,7 @@ export enum SensitiveDataItemCategory { } /** - *Provides information about a type of sensitive data that was detected by managed data identifiers and produced a sensitive data finding.
+ *Provides information about a type of sensitive data that was detected by a managed data identifier and produced a sensitive data finding.
*/ export interface DefaultDetection { /** @@ -1008,7 +1008,7 @@ export namespace DefaultDetection { */ export interface SensitiveDataItem { /** - *The category of sensitive data that was detected. For example: CREDENTIALS, for credentials data such as private keys or Amazon Web Services secret keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as driver's license identification numbers.
+ *The category of sensitive data that was detected. For example: CREDENTIALS, for credentials data such as private keys or Amazon Web Services secret keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as passport numbers.
*/ category?: SensitiveDataItemCategory | string; @@ -1769,16 +1769,16 @@ export namespace ServerSideEncryption { } /** - *Provides information about the user who owns an S3 bucket.
+ *Provides information about the Amazon Web Services account that owns an S3 bucket.
*/ export interface S3BucketOwner { /** - *The display name of the user who owns the bucket.
+ *The display name of the account that owns the bucket.
*/ displayName?: string; /** - *The Amazon Web Services account ID for the user who owns the bucket.
+ *The canonical user ID for the account that owns the bucket.
*/ id?: string; } @@ -1822,7 +1822,7 @@ export interface S3Bucket { name?: string; /** - *The display name and Amazon Web Services account ID for the user who owns the bucket.
+ *The display name and canonical user ID for the Amazon Web Services account that owns the bucket.
*/ owner?: S3BucketOwner; @@ -2492,7 +2492,7 @@ export interface JobSummary { jobId?: string; /** - *The current status of the job. Possible values are:
CANCELLED - You cancelled the job or, if it's a one-time job, you paused the job and didn't resume it within 30 days.
COMPLETE - For a one-time job, Amazon Macie finished processing the data specified for the job. This value doesn't apply to recurring jobs.
IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to one-time jobs.
PAUSED - Amazon Macie started running the job but additional processing would exceed the monthly sensitive data discovery quota for your account or one or more member accounts that the job analyzes data for.
RUNNING - For a one-time job, the job is in progress. For a recurring job, a scheduled run is in progress.
USER_PAUSED - You paused the job. If you paused the job while it had a status of RUNNING and you don't resume it within 30 days of pausing it, the job or job run will expire and be cancelled, depending on the job's type. To check the expiration date, refer to the UserPausedDetails.jobExpiresAt property.
The current status of the job. Possible values are:
CANCELLED - You cancelled the job or, if it's a one-time job, you paused the job and didn't resume it within 30 days.
COMPLETE - For a one-time job, Amazon Macie finished processing the data specified for the job. This value doesn't apply to recurring jobs.
IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to one-time jobs.
PAUSED - Macie started running the job but additional processing would exceed the monthly sensitive data discovery quota for your account or one or more member accounts that the job analyzes data for.
RUNNING - For a one-time job, the job is in progress. For a recurring job, a scheduled run is in progress.
USER_PAUSED - You paused the job. If you paused the job while it had a status of RUNNING and you don't resume it within 30 days of pausing it, the job or job run will expire and be cancelled, depending on the job's type. To check the expiration date, refer to the UserPausedDetails.jobExpiresAt property.
Provides information about a managed data identifier. For additional information, see Using managed data identifiers in the Amazon Macie User Guide.
+ */ +export interface ManagedDataIdentifierSummary { + /** + *The category of sensitive data that the managed data identifier detects: CREDENTIALS, for credentials data such as private keys or Amazon Web Services secret keys; FINANCIAL_INFORMATION, for financial data such as credit card numbers; or, PERSONAL_INFORMATION, for personal health information, such as health insurance identification numbers, or personally identifiable information, such as passport numbers.
+ */ + category?: SensitiveDataItemCategory | string; + + /** + *The unique identifier for the managed data identifier. This is a string that describes the type of sensitive data that the managed data identifier detects. For example: OPENSSH_PRIVATE_KEY for OpenSSH private keys, CREDIT_CARD_NUMBER for credit card numbers, or USA_PASSPORT_NUMBER for US passport numbers.
+ */ + id?: string; +} + +export namespace ManagedDataIdentifierSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ManagedDataIdentifierSummary): any => ({ + ...obj, + }); +} + /** *Provides statistical data and other information about an S3 bucket that Amazon Macie monitors and analyzes.
*/ @@ -3251,7 +3275,7 @@ export namespace AccountDetail { export interface BatchGetCustomDataIdentifiersRequest { /** - *An array of strings that lists the unique identifiers for the custom data identifiers to retrieve information about.
+ *An array of custom data identifier IDs, one for each custom data identifier to retrieve information about.
*/ ids?: string[]; } @@ -3272,7 +3296,7 @@ export interface BatchGetCustomDataIdentifiersResponse { customDataIdentifiers?: BatchGetCustomDataIdentifierSummary[]; /** - *An array of identifiers, one for each identifier that was specified in the request, but doesn't correlate to an existing custom data identifier.
+ *An array of custom data identifier IDs, one for each custom data identifier that was specified in the request but doesn't correlate to an existing custom data identifier.
*/ notFoundIdentifierIds?: string[]; } @@ -3543,6 +3567,13 @@ export namespace ClassificationExportConfiguration { }); } +export enum ManagedDataIdentifierSelector { + ALL = "ALL", + EXCLUDE = "EXCLUDE", + INCLUDE = "INCLUDE", + NONE = "NONE", +} + /** *Specifies one or more property- and tag-based conditions that define criteria for including or excluding S3 objects from a classification job.
*/ @@ -3713,7 +3744,7 @@ export interface CreateClassificationJobRequest { clientToken?: string; /** - *The custom data identifiers to use for data analysis and classification.
+ *An array of unique identifiers, one for each custom data identifier for the job to use when it analyzes data. To use only managed data identifiers, don't specify a value for this property and specify a value other than NONE for the managedDataIdentifierSelector property.
*/ customDataIdentifierIds?: string[]; @@ -3723,7 +3754,7 @@ export interface CreateClassificationJobRequest { description?: string; /** - *Specifies whether to analyze all existing, eligible objects immediately after the job is created.
+ *For a recurring job, specifies whether to analyze all existing, eligible objects immediately after the job is created (true). To analyze only those objects that are created or changed after you create the job and before the job's first scheduled run, set this value to false.
If you configure the job to run only once, don't specify a value for this property.
*/ initialRun?: boolean; @@ -3732,6 +3763,16 @@ export interface CreateClassificationJobRequest { */ jobType: JobType | string | undefined; + /** + *An array of unique identifiers, one for each managed data identifier for the job to include (use) or exclude (not use) when it analyzes data. Inclusion or exclusion depends on the managed data identifier selection type that you specify for the job (managedDataIdentifierSelector).
To retrieve a list of valid values for this property, use the ListManagedDataIdentifiers operation.
+ */ + managedDataIdentifierIds?: string[]; + + /** + *The selection type to apply when determining which managed data identifiers the job uses to analyze data. Valid values are:
ALL - Use all the managed data identifiers that Amazon Macie provides. If you specify this value, don't specify any values for the managedDataIdentifierIds property.
EXCLUDE - Use all the managed data identifiers that Macie provides except the managed data identifiers specified by the managedDataIdentifierIds property.
INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.
NONE - Don't use any managed data identifiers. If you specify this value, specify at least one custom data identifier for the job (customDataIdentifierIds) and don't specify any values for the managedDataIdentifierIds property.
If you don't specify a value for this property, the job uses all managed data identifiers. If you don't specify a value for this property or you specify ALL or EXCLUDE for a recurring job, the job also uses new managed data identifiers as they are released.
+ */ + managedDataIdentifierSelector?: ManagedDataIdentifierSelector | string; + /** *A custom name for the job. The name can contain as many as 500 characters.
*/ @@ -3743,7 +3784,7 @@ export interface CreateClassificationJobRequest { s3JobDefinition: S3JobDefinition | undefined; /** - *The sampling depth, as a percentage, to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at random, up to the specified percentage, and analyzes all the data in those objects.
+ *The sampling depth, as a percentage, for the job to apply when processing objects. This value determines the percentage of eligible objects that the job analyzes. If this value is less than 100, Amazon Macie selects the objects to analyze at random, up to the specified percentage, and analyzes all the data in those objects.
*/ samplingPercentage?: number; @@ -3800,17 +3841,17 @@ export interface CreateCustomDataIdentifierRequest { description?: string; /** - *An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 characters. Ignore words are case sensitive.
+ *An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 UTF-8 characters. Ignore words are case sensitive.
*/ ignoreWords?: string[]; /** - *An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 characters. Keywords aren't case sensitive.
+ *An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 UTF-8 characters. Keywords aren't case sensitive.
*/ keywords?: string[]; /** - *The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.
+ *The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.
*/ maximumMatchDistance?: number; @@ -4345,7 +4386,7 @@ export interface DescribeClassificationJobResponse { createdAt?: Date; /** - *The custom data identifiers that the job uses to analyze data.
+ *An array of unique identifiers, one for each custom data identifier that the job uses to analyze data. This value is null if the job uses only managed data identifiers to analyze data.
*/ customDataIdentifierIds?: string[]; @@ -4355,7 +4396,7 @@ export interface DescribeClassificationJobResponse { description?: string; /** - *Specifies whether the job is configured to analyze all existing, eligible objects immediately after it's created.
+ *For a recurring job, specifies whether you configured the job to analyze all existing, eligible objects immediately after the job was created (true). If you configured the job to analyze only those objects that were created or changed after the job was created and before the job's first scheduled run, this value is false. This value is also false for a one-time job.
*/ initialRun?: boolean; @@ -4370,7 +4411,7 @@ export interface DescribeClassificationJobResponse { jobId?: string; /** - *The current status of the job. Possible values are:
CANCELLED - You cancelled the job or, if it's a one-time job, you paused the job and didn't resume it within 30 days.
COMPLETE - For a one-time job, Amazon Macie finished processing the data specified for the job. This value doesn't apply to recurring jobs.
IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to one-time jobs.
PAUSED - Amazon Macie started running the job but additional processing would exceed the monthly sensitive data discovery quota for your account or one or more member accounts that the job analyzes data for.
RUNNING - For a one-time job, the job is in progress. For a recurring job, a scheduled run is in progress.
USER_PAUSED - You paused the job. If you paused the job while it had a status of RUNNING and you don't resume it within 30 days of pausing it, the job or job run will expire and be cancelled, depending on the job's type. To check the expiration date, refer to the UserPausedDetails.jobExpiresAt property.
The current status of the job. Possible values are:
CANCELLED - You cancelled the job or, if it's a one-time job, you paused the job and didn't resume it within 30 days.
COMPLETE - For a one-time job, Amazon Macie finished processing the data specified for the job. This value doesn't apply to recurring jobs.
IDLE - For a recurring job, the previous scheduled run is complete and the next scheduled run is pending. This value doesn't apply to one-time jobs.
PAUSED - Macie started running the job but additional processing would exceed the monthly sensitive data discovery quota for your account or one or more member accounts that the job analyzes data for.
RUNNING - For a one-time job, the job is in progress. For a recurring job, a scheduled run is in progress.
USER_PAUSED - You paused the job. If you paused the job while it had a status of RUNNING and you don't resume it within 30 days of pausing it, the job or job run will expire and be cancelled, depending on the job's type. To check the expiration date, refer to the UserPausedDetails.jobExpiresAt property.
An array of unique identifiers, one for each managed data identifier that the job is explicitly configured to include (use) or exclude (not use) when it analyzes data. Inclusion or exclusion depends on the managed data identifier selection type specified for the job (managedDataIdentifierSelector). This value is null if the job's managed data identifier selection type is ALL or the job uses only custom data identifiers (customDataIdentifierIds) to analyze data.
+ */ + managedDataIdentifierIds?: string[]; + + /** + *The selection type that determines which managed data identifiers the job uses to analyze data. Possible values are:
ALL - Use all the managed data identifiers that Amazon Macie provides.
EXCLUDE - Use all the managed data identifiers that Macie provides except the managed data identifiers specified by the managedDataIdentifierIds property.
INCLUDE - Use only the managed data identifiers specified by the managedDataIdentifierIds property.
NONE - Don't use any managed data identifiers.
If this value is null, the job uses all managed data identifiers. If this value is null, ALL, or EXCLUDE for a recurring job, the job also uses new managed data identifiers as they are released.
+ */ + managedDataIdentifierSelector?: ManagedDataIdentifierSelector | string; + /** *The custom name of the job.
*/ @@ -4405,7 +4456,7 @@ export interface DescribeClassificationJobResponse { samplingPercentage?: number; /** - *The recurrence pattern for running the job. If the job is configured to run only once, this value is null.
+ *The recurrence pattern for running the job. This value is null if the job is configured to run only once.
*/ scheduleFrequency?: JobScheduleFrequency; @@ -4894,7 +4945,7 @@ export interface GetCustomDataIdentifierResponse { keywords?: string[]; /** - *The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern.
+ *The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern.
*/ maximumMatchDistance?: number; @@ -5749,6 +5800,43 @@ export namespace ListInvitationsResponse { }); } +export interface ListManagedDataIdentifiersRequest { + /** + *The nextToken string that specifies which page of results to return in a paginated response.
+ */ + nextToken?: string; +} + +export namespace ListManagedDataIdentifiersRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListManagedDataIdentifiersRequest): any => ({ + ...obj, + }); +} + +export interface ListManagedDataIdentifiersResponse { + /** + *An array of objects, one for each managed data identifier.
+ */ + items?: ManagedDataIdentifierSummary[]; + + /** + *The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.
+ */ + nextToken?: string; +} + +export namespace ListManagedDataIdentifiersResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListManagedDataIdentifiersResponse): any => ({ + ...obj, + }); +} + export interface ListMembersRequest { /** *The maximum number of items to include in each page of a paginated response.
@@ -6094,17 +6182,17 @@ export namespace TagResourceResponse { export interface TestCustomDataIdentifierRequest { /** - *An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 characters. Ignore words are case sensitive.
+ *An array that lists specific character sequences (ignore words) to exclude from the results. If the text matched by the regular expression is the same as any string in this array, Amazon Macie ignores it. The array can contain as many as 10 ignore words. Each ignore word can contain 4-90 UTF-8 characters. Ignore words are case sensitive.
*/ ignoreWords?: string[]; /** - *An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 characters. Keywords aren't case sensitive.
+ *An array that lists specific character sequences (keywords), one of which must be within proximity (maximumMatchDistance) of the regular expression to match. The array can contain as many as 50 keywords. Each keyword can contain 3-90 UTF-8 characters. Keywords aren't case sensitive.
*/ keywords?: string[]; /** - *The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.
+ *The maximum number of characters that can exist between text that matches the regex pattern and the character sequences specified by the keywords array. Amazon Macie includes or excludes a result based on the proximity of a keyword to text that matches the regex pattern. The distance can be 1-300 characters. The default value is 50.
*/ maximumMatchDistance?: number; diff --git a/clients/client-macie2/protocols/Aws_restJson1.ts b/clients/client-macie2/protocols/Aws_restJson1.ts index a07566a3a91f..dee5838fbf41 100644 --- a/clients/client-macie2/protocols/Aws_restJson1.ts +++ b/clients/client-macie2/protocols/Aws_restJson1.ts @@ -109,6 +109,10 @@ import { ListFindingsFiltersCommandOutput, } from "../commands/ListFindingsFiltersCommand"; import { ListInvitationsCommandInput, ListInvitationsCommandOutput } from "../commands/ListInvitationsCommand"; +import { + ListManagedDataIdentifiersCommandInput, + ListManagedDataIdentifiersCommandOutput, +} from "../commands/ListManagedDataIdentifiersCommand"; import { ListMembersCommandInput, ListMembersCommandOutput } from "../commands/ListMembersCommand"; import { ListOrganizationAdminAccountsCommandInput, @@ -216,6 +220,7 @@ import { ListJobsFilterCriteria, ListJobsFilterTerm, ListJobsSortCriteria, + ManagedDataIdentifierSummary, MatchingBucket, MatchingResource, Member, @@ -368,6 +373,14 @@ export const serializeAws_restJson1CreateClassificationJobCommand = async ( ...(input.description !== undefined && input.description !== null && { description: input.description }), ...(input.initialRun !== undefined && input.initialRun !== null && { initialRun: input.initialRun }), ...(input.jobType !== undefined && input.jobType !== null && { jobType: input.jobType }), + ...(input.managedDataIdentifierIds !== undefined && + input.managedDataIdentifierIds !== null && { + managedDataIdentifierIds: serializeAws_restJson1__listOf__string(input.managedDataIdentifierIds, context), + }), + ...(input.managedDataIdentifierSelector !== undefined && + input.managedDataIdentifierSelector !== null && { + managedDataIdentifierSelector: input.managedDataIdentifierSelector, + }), ...(input.name !== undefined && input.name !== null && { name: input.name }), ...(input.s3JobDefinition !== undefined && input.s3JobDefinition !== null && { @@ -1427,6 +1440,31 @@ export const serializeAws_restJson1ListInvitationsCommand = async ( }); }; +export const serializeAws_restJson1ListManagedDataIdentifiersCommand = async ( + input: ListManagedDataIdentifiersCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/managed-data-identifiers/list"; + let body: any; + body = JSON.stringify({ + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1ListMembersCommand = async ( input: ListMembersCommandInput, context: __SerdeContext @@ -3324,6 +3362,8 @@ export const deserializeAws_restJson1DescribeClassificationJobCommand = async ( jobType: undefined, lastRunErrorStatus: undefined, lastRunTime: undefined, + managedDataIdentifierIds: undefined, + managedDataIdentifierSelector: undefined, name: undefined, s3JobDefinition: undefined, samplingPercentage: undefined, @@ -3366,6 +3406,15 @@ export const deserializeAws_restJson1DescribeClassificationJobCommand = async ( if (data.lastRunTime !== undefined && data.lastRunTime !== null) { contents.lastRunTime = __expectNonNull(__parseRfc3339DateTime(data.lastRunTime)); } + if (data.managedDataIdentifierIds !== undefined && data.managedDataIdentifierIds !== null) { + contents.managedDataIdentifierIds = deserializeAws_restJson1__listOf__string( + data.managedDataIdentifierIds, + context + ); + } + if (data.managedDataIdentifierSelector !== undefined && data.managedDataIdentifierSelector !== null) { + contents.managedDataIdentifierSelector = __expectString(data.managedDataIdentifierSelector); + } if (data.name !== undefined && data.name !== null) { contents.name = __expectString(data.name); } @@ -6456,6 +6505,57 @@ const deserializeAws_restJson1ListInvitationsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1ListManagedDataIdentifiersCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): PromiseSpecifies the configuration for cold storage options such as enabled
+ */ export interface ColdStorageOptions { + /** + *Enable cold storage option. Accepted values true or false
+ */ Enabled: boolean | undefined; } @@ -1400,6 +1406,9 @@ export interface ClusterConfig { */ WarmCount?: number; + /** + *Specifies the ColdStorageOptions
config for a Domain
Container for the parameters to the ListDomainNames
operation.
Optional parameter to filter the output by domain engine type. Acceptable values are 'Elasticsearch' and 'OpenSearch'.
+ */ + EngineType?: EngineType | string; +} + +export namespace ListDomainNamesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListDomainNamesRequest): any => ({ + ...obj, + }); +} + export interface DomainInfo { /** *The DomainName
.
*
Specifies the EngineType
of the domain.
The result of a ListDomainNames
operation. Contains the names of all domains owned by
- * this account.
- *
The result of a ListDomainNames
operation. Contains the names of all domains owned by this account and their respective engine types.
List of domain names.
+ *List of domain names and respective engine types.
*/ DomainNames?: DomainInfo[]; } diff --git a/clients/client-opensearch/protocols/Aws_restJson1.ts b/clients/client-opensearch/protocols/Aws_restJson1.ts index ef0110755943..84bbe78135b5 100644 --- a/clients/client-opensearch/protocols/Aws_restJson1.ts +++ b/clients/client-opensearch/protocols/Aws_restJson1.ts @@ -1062,12 +1062,12 @@ export const serializeAws_restJson1ListDomainNamesCommand = async ( context: __SerdeContext ): Promise<__HttpRequest> => { const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); - const headers: any = { - "content-type": "application/json", - }; + const headers: any = {}; let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/2021-01-01/domain"; + const query: any = { + ...(input.EngineType !== undefined && { engineType: input.EngineType }), + }; let body: any; - body = ""; return new __HttpRequest({ protocol, hostname, @@ -1075,6 +1075,7 @@ export const serializeAws_restJson1ListDomainNamesCommand = async ( method: "GET", headers, path: resolvedPath, + query, body, }); }; @@ -5750,6 +5751,7 @@ const deserializeAws_restJson1DomainEndpointOptionsStatus = ( const deserializeAws_restJson1DomainInfo = (output: any, context: __SerdeContext): DomainInfo => { return { DomainName: __expectString(output.DomainName), + EngineType: __expectString(output.EngineType), } as any; }; diff --git a/clients/client-pinpoint/Pinpoint.ts b/clients/client-pinpoint/Pinpoint.ts index b432cff35cdb..342186964a3b 100644 --- a/clients/client-pinpoint/Pinpoint.ts +++ b/clients/client-pinpoint/Pinpoint.ts @@ -20,6 +20,11 @@ import { CreateImportJobCommandInput, CreateImportJobCommandOutput, } from "./commands/CreateImportJobCommand"; +import { + CreateInAppTemplateCommand, + CreateInAppTemplateCommandInput, + CreateInAppTemplateCommandOutput, +} from "./commands/CreateInAppTemplateCommand"; import { CreateJourneyCommand, CreateJourneyCommandInput, @@ -111,6 +116,11 @@ import { DeleteGcmChannelCommandInput, DeleteGcmChannelCommandOutput, } from "./commands/DeleteGcmChannelCommand"; +import { + DeleteInAppTemplateCommand, + DeleteInAppTemplateCommandInput, + DeleteInAppTemplateCommandOutput, +} from "./commands/DeleteInAppTemplateCommand"; import { DeleteJourneyCommand, DeleteJourneyCommandInput, @@ -266,6 +276,16 @@ import { GetImportJobsCommandInput, GetImportJobsCommandOutput, } from "./commands/GetImportJobsCommand"; +import { + GetInAppMessagesCommand, + GetInAppMessagesCommandInput, + GetInAppMessagesCommandOutput, +} from "./commands/GetInAppMessagesCommand"; +import { + GetInAppTemplateCommand, + GetInAppTemplateCommandInput, + GetInAppTemplateCommandOutput, +} from "./commands/GetInAppTemplateCommand"; import { GetJourneyCommand, GetJourneyCommandInput, GetJourneyCommandOutput } from "./commands/GetJourneyCommand"; import { GetJourneyDateRangeKpiCommand, @@ -461,6 +481,11 @@ import { UpdateGcmChannelCommandInput, UpdateGcmChannelCommandOutput, } from "./commands/UpdateGcmChannelCommand"; +import { + UpdateInAppTemplateCommand, + UpdateInAppTemplateCommandInput, + UpdateInAppTemplateCommandOutput, +} from "./commands/UpdateInAppTemplateCommand"; import { UpdateJourneyCommand, UpdateJourneyCommandInput, @@ -671,6 +696,38 @@ export class Pinpoint extends PinpointClient { } } + /** + *Creates a new message template for messages using the in-app message channel.
+ */ + public createInAppTemplate( + args: CreateInAppTemplateCommandInput, + options?: __HttpHandlerOptions + ): PromiseCreates a journey for an application.
*/ @@ -1273,6 +1330,38 @@ export class Pinpoint extends PinpointClient { } } + /** + *Deletes a message template for messages sent using the in-app message channel.
+ */ + public deleteInAppTemplate( + args: DeleteInAppTemplateCommandInput, + options?: __HttpHandlerOptions + ): PromiseDeletes a journey from an application.
*/ @@ -2354,6 +2443,70 @@ export class Pinpoint extends PinpointClient { } } + /** + *Retrieves the in-app messages targeted for the provided endpoint ID.
+ */ + public getInAppMessages( + args: GetInAppMessagesCommandInput, + options?: __HttpHandlerOptions + ): PromiseRetrieves the content and settings of a message template for messages sent through the in-app channel.
+ */ + public getInAppTemplate( + args: GetInAppTemplateCommandInput, + options?: __HttpHandlerOptions + ): PromiseRetrieves information about the status, configuration, and other settings for a journey.
*/ @@ -3694,6 +3847,38 @@ export class Pinpoint extends PinpointClient { } } + /** + *Updates an existing message template for messages sent through the in-app message channel.
+ */ + public updateInAppTemplate( + args: UpdateInAppTemplateCommandInput, + options?: __HttpHandlerOptions + ): PromiseUpdates the configuration and other settings for a journey.
*/ diff --git a/clients/client-pinpoint/PinpointClient.ts b/clients/client-pinpoint/PinpointClient.ts index b92cb8461b06..cef8b5b499e1 100644 --- a/clients/client-pinpoint/PinpointClient.ts +++ b/clients/client-pinpoint/PinpointClient.ts @@ -6,6 +6,10 @@ import { } from "./commands/CreateEmailTemplateCommand"; import { CreateExportJobCommandInput, CreateExportJobCommandOutput } from "./commands/CreateExportJobCommand"; import { CreateImportJobCommandInput, CreateImportJobCommandOutput } from "./commands/CreateImportJobCommand"; +import { + CreateInAppTemplateCommandInput, + CreateInAppTemplateCommandOutput, +} from "./commands/CreateInAppTemplateCommand"; import { CreateJourneyCommandInput, CreateJourneyCommandOutput } from "./commands/CreateJourneyCommand"; import { CreatePushTemplateCommandInput, CreatePushTemplateCommandOutput } from "./commands/CreatePushTemplateCommand"; import { @@ -43,6 +47,10 @@ import { import { DeleteEndpointCommandInput, DeleteEndpointCommandOutput } from "./commands/DeleteEndpointCommand"; import { DeleteEventStreamCommandInput, DeleteEventStreamCommandOutput } from "./commands/DeleteEventStreamCommand"; import { DeleteGcmChannelCommandInput, DeleteGcmChannelCommandOutput } from "./commands/DeleteGcmChannelCommand"; +import { + DeleteInAppTemplateCommandInput, + DeleteInAppTemplateCommandOutput, +} from "./commands/DeleteInAppTemplateCommand"; import { DeleteJourneyCommandInput, DeleteJourneyCommandOutput } from "./commands/DeleteJourneyCommand"; import { DeletePushTemplateCommandInput, DeletePushTemplateCommandOutput } from "./commands/DeletePushTemplateCommand"; import { @@ -108,6 +116,8 @@ import { GetExportJobsCommandInput, GetExportJobsCommandOutput } from "./command import { GetGcmChannelCommandInput, GetGcmChannelCommandOutput } from "./commands/GetGcmChannelCommand"; import { GetImportJobCommandInput, GetImportJobCommandOutput } from "./commands/GetImportJobCommand"; import { GetImportJobsCommandInput, GetImportJobsCommandOutput } from "./commands/GetImportJobsCommand"; +import { GetInAppMessagesCommandInput, GetInAppMessagesCommandOutput } from "./commands/GetInAppMessagesCommand"; +import { GetInAppTemplateCommandInput, GetInAppTemplateCommandOutput } from "./commands/GetInAppTemplateCommand"; import { GetJourneyCommandInput, GetJourneyCommandOutput } from "./commands/GetJourneyCommand"; import { GetJourneyDateRangeKpiCommandInput, @@ -199,6 +209,10 @@ import { UpdateEndpointsBatchCommandOutput, } from "./commands/UpdateEndpointsBatchCommand"; import { UpdateGcmChannelCommandInput, UpdateGcmChannelCommandOutput } from "./commands/UpdateGcmChannelCommand"; +import { + UpdateInAppTemplateCommandInput, + UpdateInAppTemplateCommandOutput, +} from "./commands/UpdateInAppTemplateCommand"; import { UpdateJourneyCommandInput, UpdateJourneyCommandOutput } from "./commands/UpdateJourneyCommand"; import { UpdateJourneyStateCommandInput, UpdateJourneyStateCommandOutput } from "./commands/UpdateJourneyStateCommand"; import { UpdatePushTemplateCommandInput, UpdatePushTemplateCommandOutput } from "./commands/UpdatePushTemplateCommand"; @@ -276,6 +290,7 @@ export type ServiceInputTypes = | CreateEmailTemplateCommandInput | CreateExportJobCommandInput | CreateImportJobCommandInput + | CreateInAppTemplateCommandInput | CreateJourneyCommandInput | CreatePushTemplateCommandInput | CreateRecommenderConfigurationCommandInput @@ -295,6 +310,7 @@ export type ServiceInputTypes = | DeleteEndpointCommandInput | DeleteEventStreamCommandInput | DeleteGcmChannelCommandInput + | DeleteInAppTemplateCommandInput | DeleteJourneyCommandInput | DeletePushTemplateCommandInput | DeleteRecommenderConfigurationCommandInput @@ -330,6 +346,8 @@ export type ServiceInputTypes = | GetGcmChannelCommandInput | GetImportJobCommandInput | GetImportJobsCommandInput + | GetInAppMessagesCommandInput + | GetInAppTemplateCommandInput | GetJourneyCommandInput | GetJourneyDateRangeKpiCommandInput | GetJourneyExecutionActivityMetricsCommandInput @@ -373,6 +391,7 @@ export type ServiceInputTypes = | UpdateEndpointCommandInput | UpdateEndpointsBatchCommandInput | UpdateGcmChannelCommandInput + | UpdateInAppTemplateCommandInput | UpdateJourneyCommandInput | UpdateJourneyStateCommandInput | UpdatePushTemplateCommandInput @@ -390,6 +409,7 @@ export type ServiceOutputTypes = | CreateEmailTemplateCommandOutput | CreateExportJobCommandOutput | CreateImportJobCommandOutput + | CreateInAppTemplateCommandOutput | CreateJourneyCommandOutput | CreatePushTemplateCommandOutput | CreateRecommenderConfigurationCommandOutput @@ -409,6 +429,7 @@ export type ServiceOutputTypes = | DeleteEndpointCommandOutput | DeleteEventStreamCommandOutput | DeleteGcmChannelCommandOutput + | DeleteInAppTemplateCommandOutput | DeleteJourneyCommandOutput | DeletePushTemplateCommandOutput | DeleteRecommenderConfigurationCommandOutput @@ -444,6 +465,8 @@ export type ServiceOutputTypes = | GetGcmChannelCommandOutput | GetImportJobCommandOutput | GetImportJobsCommandOutput + | GetInAppMessagesCommandOutput + | GetInAppTemplateCommandOutput | GetJourneyCommandOutput | GetJourneyDateRangeKpiCommandOutput | GetJourneyExecutionActivityMetricsCommandOutput @@ -487,6 +510,7 @@ export type ServiceOutputTypes = | UpdateEndpointCommandOutput | UpdateEndpointsBatchCommandOutput | UpdateGcmChannelCommandOutput + | UpdateInAppTemplateCommandOutput | UpdateJourneyCommandOutput | UpdateJourneyStateCommandOutput | UpdatePushTemplateCommandOutput diff --git a/clients/client-pinpoint/commands/CreateInAppTemplateCommand.ts b/clients/client-pinpoint/commands/CreateInAppTemplateCommand.ts new file mode 100644 index 000000000000..1eda0951ceb9 --- /dev/null +++ b/clients/client-pinpoint/commands/CreateInAppTemplateCommand.ts @@ -0,0 +1,94 @@ +import { PinpointClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../PinpointClient"; +import { CreateInAppTemplateRequest, CreateInAppTemplateResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateInAppTemplateCommand, + serializeAws_restJson1CreateInAppTemplateCommand, +} from "../protocols/Aws_restJson1"; +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + MiddlewareStack, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +export interface CreateInAppTemplateCommandInput extends CreateInAppTemplateRequest {} +export interface CreateInAppTemplateCommandOutput extends CreateInAppTemplateResponse, __MetadataBearer {} + +/** + *Creates a new message template for messages using the in-app message channel.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { PinpointClient, CreateInAppTemplateCommand } from "@aws-sdk/client-pinpoint"; // ES Modules import + * // const { PinpointClient, CreateInAppTemplateCommand } = require("@aws-sdk/client-pinpoint"); // CommonJS import + * const client = new PinpointClient(config); + * const command = new CreateInAppTemplateCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateInAppTemplateCommandInput} for command's `input` shape. + * @see {@link CreateInAppTemplateCommandOutput} for command's `response` shape. + * @see {@link PinpointClientResolvedConfig | config} for command's `input` shape. + * + */ +export class CreateInAppTemplateCommand extends $Command< + CreateInAppTemplateCommandInput, + CreateInAppTemplateCommandOutput, + PinpointClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateInAppTemplateCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackDeletes a message template for messages sent using the in-app message channel.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { PinpointClient, DeleteInAppTemplateCommand } from "@aws-sdk/client-pinpoint"; // ES Modules import + * // const { PinpointClient, DeleteInAppTemplateCommand } = require("@aws-sdk/client-pinpoint"); // CommonJS import + * const client = new PinpointClient(config); + * const command = new DeleteInAppTemplateCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteInAppTemplateCommandInput} for command's `input` shape. + * @see {@link DeleteInAppTemplateCommandOutput} for command's `response` shape. + * @see {@link PinpointClientResolvedConfig | config} for command's `input` shape. + * + */ +export class DeleteInAppTemplateCommand extends $Command< + DeleteInAppTemplateCommandInput, + DeleteInAppTemplateCommandOutput, + PinpointClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteInAppTemplateCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackRetrieves the in-app messages targeted for the provided endpoint ID.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { PinpointClient, GetInAppMessagesCommand } from "@aws-sdk/client-pinpoint"; // ES Modules import + * // const { PinpointClient, GetInAppMessagesCommand } = require("@aws-sdk/client-pinpoint"); // CommonJS import + * const client = new PinpointClient(config); + * const command = new GetInAppMessagesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetInAppMessagesCommandInput} for command's `input` shape. + * @see {@link GetInAppMessagesCommandOutput} for command's `response` shape. + * @see {@link PinpointClientResolvedConfig | config} for command's `input` shape. + * + */ +export class GetInAppMessagesCommand extends $Command< + GetInAppMessagesCommandInput, + GetInAppMessagesCommandOutput, + PinpointClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetInAppMessagesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackRetrieves the content and settings of a message template for messages sent through the in-app channel.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { PinpointClient, GetInAppTemplateCommand } from "@aws-sdk/client-pinpoint"; // ES Modules import + * // const { PinpointClient, GetInAppTemplateCommand } = require("@aws-sdk/client-pinpoint"); // CommonJS import + * const client = new PinpointClient(config); + * const command = new GetInAppTemplateCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetInAppTemplateCommandInput} for command's `input` shape. + * @see {@link GetInAppTemplateCommandOutput} for command's `response` shape. + * @see {@link PinpointClientResolvedConfig | config} for command's `input` shape. + * + */ +export class GetInAppTemplateCommand extends $Command< + GetInAppTemplateCommandInput, + GetInAppTemplateCommandOutput, + PinpointClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetInAppTemplateCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackUpdates an existing message template for messages sent through the in-app message channel.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { PinpointClient, UpdateInAppTemplateCommand } from "@aws-sdk/client-pinpoint"; // ES Modules import + * // const { PinpointClient, UpdateInAppTemplateCommand } = require("@aws-sdk/client-pinpoint"); // CommonJS import + * const client = new PinpointClient(config); + * const command = new UpdateInAppTemplateCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateInAppTemplateCommandInput} for command's `input` shape. + * @see {@link UpdateInAppTemplateCommandOutput} for command's `response` shape. + * @see {@link PinpointClientResolvedConfig | config} for command's `input` shape. + * + */ +export class UpdateInAppTemplateCommand extends $Command< + UpdateInAppTemplateCommandInput, + UpdateInAppTemplateCommandOutput, + PinpointClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateInAppTemplateCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackSpecifies channel-specific content and settings for a message template that can be used in push notifications that are sent through the ADM (Amazon Device Messaging), Baidu (Baidu Cloud Push), or GCM (Firebase Cloud Messaging, formerly Google Cloud Messaging) channel.
*/ @@ -2246,6 +2254,11 @@ export interface CampaignLimits { *The maximum number of messages that a campaign can send to a single endpoint during the course of the campaign. If a campaign recurs, this setting applies to all runs of the campaign. The maximum value is 100.
*/ Total?: number; + + /** + *The maximum total number of messages that the campaign can send per user session.
+ */ + Session?: number; } export namespace CampaignLimits { @@ -2581,6 +2594,12 @@ export namespace BaiduMessage { }); } +export enum ButtonAction { + CLOSE = "CLOSE", + DEEP_LINK = "DEEP_LINK", + LINK = "LINK", +} + /** *Specifies the contents of a message that's sent through a custom channel to recipients of a campaign.
*/ @@ -2712,6 +2731,253 @@ export namespace CampaignEventFilter { }); } +/** + *Text config for Message Body.
+ */ +export interface InAppMessageBodyConfig { + /** + *The alignment of the text. Valid values: LEFT, CENTER, RIGHT.
+ */ + Alignment: Alignment | string | undefined; + + /** + *Message Body.
+ */ + Body: string | undefined; + + /** + *The text color.
+ */ + TextColor: string | undefined; +} + +export namespace InAppMessageBodyConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InAppMessageBodyConfig): any => ({ + ...obj, + }); +} + +/** + *Text config for Message Header.
+ */ +export interface InAppMessageHeaderConfig { + /** + *The alignment of the text. Valid values: LEFT, CENTER, RIGHT.
+ */ + Alignment: Alignment | string | undefined; + + /** + *Message Header.
+ */ + Header: string | undefined; + + /** + *The text color.
+ */ + TextColor: string | undefined; +} + +export namespace InAppMessageHeaderConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InAppMessageHeaderConfig): any => ({ + ...obj, + }); +} + +/** + *Override button configuration.
+ */ +export interface OverrideButtonConfiguration { + /** + *Action triggered by the button.
+ */ + ButtonAction: ButtonAction | string | undefined; + + /** + *Button destination.
+ */ + Link?: string; +} + +export namespace OverrideButtonConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OverrideButtonConfiguration): any => ({ + ...obj, + }); +} + +/** + *Default button configuration.
+ */ +export interface DefaultButtonConfiguration { + /** + *The background color of the button.
+ */ + BackgroundColor?: string; + + /** + *The border radius of the button.
+ */ + BorderRadius?: number; + + /** + *Action triggered by the button.
+ */ + ButtonAction: ButtonAction | string | undefined; + + /** + *Button destination.
+ */ + Link?: string; + + /** + *Button text.
+ */ + Text: string | undefined; + + /** + *The text color of the button.
+ */ + TextColor?: string; +} + +export namespace DefaultButtonConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DefaultButtonConfiguration): any => ({ + ...obj, + }); +} + +/** + *Button Config for an in-app message.
+ */ +export interface InAppMessageButton { + /** + *Default button content.
+ */ + Android?: OverrideButtonConfiguration; + + /** + *Default button content.
+ */ + DefaultConfig?: DefaultButtonConfiguration; + + /** + *Default button content.
+ */ + IOS?: OverrideButtonConfiguration; + + /** + *Default button content.
+ */ + Web?: OverrideButtonConfiguration; +} + +export namespace InAppMessageButton { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InAppMessageButton): any => ({ + ...obj, + }); +} + +/** + *The configuration for the message content.
+ */ +export interface InAppMessageContent { + /** + *The background color for the message.
+ */ + BackgroundColor?: string; + + /** + *The configuration for the message body.
+ */ + BodyConfig?: InAppMessageBodyConfig; + + /** + *The configuration for the message header.
+ */ + HeaderConfig?: InAppMessageHeaderConfig; + + /** + *The image url for the background of message.
+ */ + ImageUrl?: string; + + /** + *The first button inside the message.
+ */ + PrimaryBtn?: InAppMessageButton; + + /** + *The second button inside message.
+ */ + SecondaryBtn?: InAppMessageButton; +} + +export namespace InAppMessageContent { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InAppMessageContent): any => ({ + ...obj, + }); +} + +export enum Layout { + BOTTOM_BANNER = "BOTTOM_BANNER", + CAROUSEL = "CAROUSEL", + MIDDLE_BANNER = "MIDDLE_BANNER", + MOBILE_FEED = "MOBILE_FEED", + OVERLAYS = "OVERLAYS", + TOP_BANNER = "TOP_BANNER", +} + +/** + *In-app message configuration.
+ */ +export interface CampaignInAppMessage { + /** + *The message body of the notification, the email body or the text message.
+ */ + Body?: string; + + /** + *In-app message content.
+ */ + Content?: InAppMessageContent[]; + + /** + *Custom config to be sent to client.
+ */ + CustomConfig?: { [key: string]: string }; + + /** + *In-app message layout.
+ */ + Layout?: Layout | string; +} + +export namespace CampaignInAppMessage { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CampaignInAppMessage): any => ({ + ...obj, + }); +} + /** *Specifies the delivery configuration settings for sending a campaign or campaign treatment through a custom channel. This object is required if you use the CampaignCustomMessage object to define the message to send for the campaign or campaign treatment.
*/ @@ -2897,6 +3163,11 @@ export interface MessageConfiguration { *The message that the campaign sends through the SMS channel. If specified, this message overrides the default message.
*/ SMSMessage?: CampaignSmsMessage; + + /** + *The in-app message configuration.
+ */ + InAppMessage?: CampaignInAppMessage; } export namespace MessageConfiguration { @@ -2912,6 +3183,7 @@ export enum Frequency { DAILY = "DAILY", EVENT = "EVENT", HOURLY = "HOURLY", + IN_APP_EVENT = "IN_APP_EVENT", MONTHLY = "MONTHLY", ONCE = "ONCE", WEEKLY = "WEEKLY", @@ -3238,6 +3510,11 @@ export interface CampaignResponse { *The version number of the campaign.
*/ Version?: number; + + /** + *Defines the priority of the campaign, used to decide the order of messages displayed to user if there are multiple messages scheduled to be displayed at the same moment.
+ */ + Priority?: number; } export namespace CampaignResponse { @@ -3721,6 +3998,11 @@ export interface WriteCampaignRequest { *A custom name of the default treatment for the campaign, if the campaign has multiple treatments. A treatment is a variation of a campaign that's used for A/B testing.
*/ TreatmentName?: string; + + /** + *Defines the priority of the campaign, used to decide the order of messages displayed to user if there are multiple messages scheduled to be displayed at the same moment.
+ */ + Priority?: number; } export namespace WriteCampaignRequest { @@ -4310,131 +4592,236 @@ export namespace CreateImportJobResponse { } /** - *Specifies limits on the messages that a journey can send and the number of times participants can enter a journey.
+ *InApp Template Request.
*/ -export interface JourneyLimits { +export interface InAppTemplateRequest { /** - *The maximum number of messages that the journey can send to a single participant during a 24-hour period. The maximum value is 100.
+ *The content of the message, can include up to 5 modals. Each modal must contain a message, a header, and background color. ImageUrl and buttons are optional.
*/ - DailyCap?: number; + Content?: InAppMessageContent[]; /** - *The maximum number of times that a participant can enter the journey. The maximum value is 100. To allow participants to enter the journey an unlimited number of times, set this value to 0.
+ *Custom config to be sent to client.
*/ - EndpointReentryCap?: number; + CustomConfig?: { [key: string]: string }; /** - *The maximum number of messages that the journey can send each second.
+ *The layout of the message.
*/ - MessagesPerSecond?: number; + Layout?: Layout | string; /** - *Minimum time that must pass before an endpoint can re-enter a given journey. The duration should use an ISO 8601 format, such as PT1H.
+ *A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.
*/ - EndpointReentryInterval?: string; + tags?: { [key: string]: string }; + + /** + *The description of the template.
+ */ + TemplateDescription?: string; } -export namespace JourneyLimits { +export namespace InAppTemplateRequest { /** * @internal */ - export const filterSensitiveLog = (obj: JourneyLimits): any => ({ + export const filterSensitiveLog = (obj: InAppTemplateRequest): any => ({ ...obj, }); } -/** - *Specifies the schedule settings for a journey.
- */ -export interface JourneySchedule { - /** - *The scheduled time, in ISO 8601 format, when the journey ended or will end.
- */ - EndTime?: Date; - +export interface CreateInAppTemplateRequest { /** - *The scheduled time, in ISO 8601 format, when the journey began or will begin.
+ *InApp Template Request.
*/ - StartTime?: Date; + InAppTemplateRequest: InAppTemplateRequest | undefined; /** - *The starting UTC offset for the journey schedule, if the value of the journey's LocalTime property is true. Valid values are: UTC, - * UTC+01, UTC+02, UTC+03, UTC+03:30, UTC+04, UTC+04:30, UTC+05, UTC+05:30, - * UTC+05:45, UTC+06, UTC+06:30, UTC+07, UTC+08, UTC+08:45, UTC+09, UTC+09:30, - * UTC+10, UTC+10:30, UTC+11, UTC+12, UTC+12:45, UTC+13, UTC+13:45, UTC-02, - * UTC-02:30, UTC-03, UTC-03:30, UTC-04, UTC-05, UTC-06, UTC-07, UTC-08, UTC-09, - * UTC-09:30, UTC-10, and UTC-11.
+ *The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.
*/ - Timezone?: string; + TemplateName: string | undefined; } -export namespace JourneySchedule { +export namespace CreateInAppTemplateRequest { /** * @internal */ - export const filterSensitiveLog = (obj: JourneySchedule): any => ({ + export const filterSensitiveLog = (obj: CreateInAppTemplateRequest): any => ({ ...obj, }); } /** - *Specifies the settings for an event that causes a campaign to be sent or a journey activity to be performed.
+ *Provides information about a request to create a message template.
*/ -export interface EventFilter { +export interface TemplateCreateMessageBody { /** - *The dimensions for the event filter to use for the campaign or the journey activity.
+ *The Amazon Resource Name (ARN) of the message template that was created.
*/ - Dimensions: EventDimensions | undefined; + Arn?: string; /** - *The type of event that causes the campaign to be sent or the journey activity to be performed. Valid values are: SYSTEM, sends the campaign or performs the activity when a system event occurs; and, ENDPOINT, sends the campaign or performs the activity when an endpoint event (Events resource) occurs.
+ *The message that's returned from the API for the request to create the message template.
*/ - FilterType: FilterType | string | undefined; + Message?: string; + + /** + *The unique identifier for the request to create the message template.
+ */ + RequestID?: string; } -export namespace EventFilter { +export namespace TemplateCreateMessageBody { /** * @internal */ - export const filterSensitiveLog = (obj: EventFilter): any => ({ + export const filterSensitiveLog = (obj: TemplateCreateMessageBody): any => ({ ...obj, }); } -/** - *Specifies the settings for an event that causes a journey activity to start.
- */ -export interface EventStartCondition { +export interface CreateInAppTemplateResponse { /** - *Specifies the settings for an event that causes a campaign to be sent or a journey activity to be performed.
+ *Provides information about a request to create a message template.
*/ - EventFilter?: EventFilter; - - SegmentId?: string; + TemplateCreateMessageBody: TemplateCreateMessageBody | undefined; } -export namespace EventStartCondition { +export namespace CreateInAppTemplateResponse { /** * @internal */ - export const filterSensitiveLog = (obj: EventStartCondition): any => ({ + export const filterSensitiveLog = (obj: CreateInAppTemplateResponse): any => ({ ...obj, }); } /** - *Specifies the conditions for the first activity in a journey. This activity and its conditions determine which users are participants in a journey.
+ *Specifies limits on the messages that a journey can send and the number of times participants can enter a journey.
*/ -export interface StartCondition { +export interface JourneyLimits { /** - *The custom description of the condition.
+ *The maximum number of messages that the journey can send to a single participant during a 24-hour period. The maximum value is 100.
*/ - Description?: string; + DailyCap?: number; /** - *Specifies the settings for an event that causes a journey activity to start.
+ *The maximum number of times that a participant can enter the journey. The maximum value is 100. To allow participants to enter the journey an unlimited number of times, set this value to 0.
*/ - EventStartCondition?: EventStartCondition; + EndpointReentryCap?: number; + + /** + *The maximum number of messages that the journey can send each second.
+ */ + MessagesPerSecond?: number; + + /** + *Minimum time that must pass before an endpoint can re-enter a given journey. The duration should use an ISO 8601 format, such as PT1H.
+ */ + EndpointReentryInterval?: string; +} + +export namespace JourneyLimits { + /** + * @internal + */ + export const filterSensitiveLog = (obj: JourneyLimits): any => ({ + ...obj, + }); +} + +/** + *Specifies the schedule settings for a journey.
+ */ +export interface JourneySchedule { + /** + *The scheduled time, in ISO 8601 format, when the journey ended or will end.
+ */ + EndTime?: Date; + + /** + *The scheduled time, in ISO 8601 format, when the journey began or will begin.
+ */ + StartTime?: Date; + + /** + *The starting UTC offset for the journey schedule, if the value of the journey's LocalTime property is true. Valid values are: UTC, + * UTC+01, UTC+02, UTC+03, UTC+03:30, UTC+04, UTC+04:30, UTC+05, UTC+05:30, + * UTC+05:45, UTC+06, UTC+06:30, UTC+07, UTC+08, UTC+08:45, UTC+09, UTC+09:30, + * UTC+10, UTC+10:30, UTC+11, UTC+12, UTC+12:45, UTC+13, UTC+13:45, UTC-02, + * UTC-02:30, UTC-03, UTC-03:30, UTC-04, UTC-05, UTC-06, UTC-07, UTC-08, UTC-09, + * UTC-09:30, UTC-10, and UTC-11.
+ */ + Timezone?: string; +} + +export namespace JourneySchedule { + /** + * @internal + */ + export const filterSensitiveLog = (obj: JourneySchedule): any => ({ + ...obj, + }); +} + +/** + *Specifies the settings for an event that causes a campaign to be sent or a journey activity to be performed.
+ */ +export interface EventFilter { + /** + *The dimensions for the event filter to use for the campaign or the journey activity.
+ */ + Dimensions: EventDimensions | undefined; + + /** + *The type of event that causes the campaign to be sent or the journey activity to be performed. Valid values are: SYSTEM, sends the campaign or performs the activity when a system event occurs; and, ENDPOINT, sends the campaign or performs the activity when an endpoint event (Events resource) occurs.
+ */ + FilterType: FilterType | string | undefined; +} + +export namespace EventFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EventFilter): any => ({ + ...obj, + }); +} + +/** + *Specifies the settings for an event that causes a journey activity to start.
+ */ +export interface EventStartCondition { + /** + *Specifies the settings for an event that causes a campaign to be sent or a journey activity to be performed.
+ */ + EventFilter?: EventFilter; + + SegmentId?: string; +} + +export namespace EventStartCondition { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EventStartCondition): any => ({ + ...obj, + }); +} + +/** + *Specifies the conditions for the first activity in a journey. This activity and its conditions determine which users are participants in a journey.
+ */ +export interface StartCondition { + /** + *The custom description of the condition.
+ */ + Description?: string; + + /** + *Specifies the settings for an event that causes a journey activity to start.
+ */ + EventStartCondition?: EventStartCondition; /** *The segment that's associated with the first activity in the journey. This segment determines which users are participants in the journey.
@@ -6374,6 +6761,43 @@ export namespace DeleteGcmChannelResponse { }); } +export interface DeleteInAppTemplateRequest { + /** + *The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.
+ */ + TemplateName: string | undefined; + + /** + *The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.
If specified, this value must match the identifier for an existing template version. If specified for an update operation, this value must match the identifier for the latest existing version of the template. This restriction helps ensure that race conditions don't occur.
If you don't specify a value for this parameter, Amazon Pinpoint does the following:
For a get operation, retrieves information about the active version of the template.
For an update operation, saves the updates to (overwrites) the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.
For a delete operation, deletes the template, including all versions of the template.
Provides information about an API request or response.
+ */ + MessageBody: MessageBody | undefined; +} + +export namespace DeleteInAppTemplateResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteInAppTemplateResponse): any => ({ + ...obj, + }); +} + export interface DeleteJourneyRequest { /** *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
@@ -7287,6 +7711,7 @@ export namespace EmailChannelRequest { export enum TemplateType { EMAIL = "EMAIL", + INAPP = "INAPP", PUSH = "PUSH", SMS = "SMS", VOICE = "VOICE", @@ -8713,302 +9138,3 @@ export namespace GetEndpointRequest { ...obj, }); } - -export interface GetEndpointResponse { - /** - *Provides information about the channel type and other settings for an endpoint.
- */ - EndpointResponse: EndpointResponse | undefined; -} - -export namespace GetEndpointResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetEndpointResponse): any => ({ - ...obj, - }); -} - -export interface GetEventStreamRequest { - /** - *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
- */ - ApplicationId: string | undefined; -} - -export namespace GetEventStreamRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetEventStreamRequest): any => ({ - ...obj, - }); -} - -export interface GetEventStreamResponse { - /** - *Specifies settings for publishing event data to an Amazon Kinesis data stream or an Amazon Kinesis Data Firehose delivery stream.
- */ - EventStream: EventStream | undefined; -} - -export namespace GetEventStreamResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetEventStreamResponse): any => ({ - ...obj, - }); -} - -export interface GetExportJobRequest { - /** - *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
- */ - ApplicationId: string | undefined; - - /** - *The unique identifier for the job.
- */ - JobId: string | undefined; -} - -export namespace GetExportJobRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetExportJobRequest): any => ({ - ...obj, - }); -} - -export interface GetExportJobResponse { - /** - *Provides information about the status and settings of a job that exports endpoint definitions to a file. The file can be added directly to an Amazon Simple Storage Service (Amazon S3) bucket by using the Amazon Pinpoint API or downloaded directly to a computer by using the Amazon Pinpoint console.
- */ - ExportJobResponse: ExportJobResponse | undefined; -} - -export namespace GetExportJobResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetExportJobResponse): any => ({ - ...obj, - }); -} - -export interface GetExportJobsRequest { - /** - *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
- */ - ApplicationId: string | undefined; - - /** - *The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.
- */ - PageSize?: string; - - /** - *The NextToken string that specifies which page of results to return in a paginated response.
- */ - Token?: string; -} - -export namespace GetExportJobsRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetExportJobsRequest): any => ({ - ...obj, - }); -} - -export interface GetExportJobsResponse { - /** - *Provides information about all the export jobs that are associated with an application or segment. An export job is a job that exports endpoint definitions to a file.
- */ - ExportJobsResponse: ExportJobsResponse | undefined; -} - -export namespace GetExportJobsResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetExportJobsResponse): any => ({ - ...obj, - }); -} - -export interface GetGcmChannelRequest { - /** - *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
- */ - ApplicationId: string | undefined; -} - -export namespace GetGcmChannelRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetGcmChannelRequest): any => ({ - ...obj, - }); -} - -export interface GetGcmChannelResponse { - /** - *Provides information about the status and settings of the GCM channel for an application. The GCM channel enables Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service.
- */ - GCMChannelResponse: GCMChannelResponse | undefined; -} - -export namespace GetGcmChannelResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetGcmChannelResponse): any => ({ - ...obj, - }); -} - -export interface GetImportJobRequest { - /** - *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
- */ - ApplicationId: string | undefined; - - /** - *The unique identifier for the job.
- */ - JobId: string | undefined; -} - -export namespace GetImportJobRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetImportJobRequest): any => ({ - ...obj, - }); -} - -export interface GetImportJobResponse { - /** - *Provides information about the status and settings of a job that imports endpoint definitions from one or more files. The files can be stored in an Amazon Simple Storage Service (Amazon S3) bucket or uploaded directly from a computer by using the Amazon Pinpoint console.
- */ - ImportJobResponse: ImportJobResponse | undefined; -} - -export namespace GetImportJobResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetImportJobResponse): any => ({ - ...obj, - }); -} - -export interface GetImportJobsRequest { - /** - *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
- */ - ApplicationId: string | undefined; - - /** - *The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.
- */ - PageSize?: string; - - /** - *The NextToken string that specifies which page of results to return in a paginated response.
- */ - Token?: string; -} - -export namespace GetImportJobsRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetImportJobsRequest): any => ({ - ...obj, - }); -} - -/** - *Provides information about the status and settings of all the import jobs that are associated with an application or segment. An import job is a job that imports endpoint definitions from one or more files.
- */ -export interface ImportJobsResponse { - /** - *An array of responses, one for each import job that's associated with the application (Import Jobs resource) or segment (Segment Import Jobs resource).
- */ - Item: ImportJobResponse[] | undefined; - - /** - *The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.
- */ - NextToken?: string; -} - -export namespace ImportJobsResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ImportJobsResponse): any => ({ - ...obj, - }); -} - -export interface GetImportJobsResponse { - /** - *Provides information about the status and settings of all the import jobs that are associated with an application or segment. An import job is a job that imports endpoint definitions from one or more files.
- */ - ImportJobsResponse: ImportJobsResponse | undefined; -} - -export namespace GetImportJobsResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetImportJobsResponse): any => ({ - ...obj, - }); -} - -export interface GetJourneyRequest { - /** - *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
- */ - ApplicationId: string | undefined; - - /** - *The unique identifier for the journey.
- */ - JourneyId: string | undefined; -} - -export namespace GetJourneyRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetJourneyRequest): any => ({ - ...obj, - }); -} - -export interface GetJourneyResponse { - /** - *Provides information about the status, configuration, and other settings for a journey.
- */ - JourneyResponse: JourneyResponse | undefined; -} - -export namespace GetJourneyResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetJourneyResponse): any => ({ - ...obj, - }); -} diff --git a/clients/client-pinpoint/models/models_1.ts b/clients/client-pinpoint/models/models_1.ts index e5f90420a968..7132f872c8fb 100644 --- a/clients/client-pinpoint/models/models_1.ts +++ b/clients/client-pinpoint/models/models_1.ts @@ -17,6 +17,7 @@ import { BaiduChannelRequest, BaiduChannelResponse, BaseKpiResult, + CampaignEventFilter, CampaignHook, CampaignLimits, CampaignResponse, @@ -29,16 +30,21 @@ import { EndpointBatchRequest, EndpointMessageResult, EndpointRequest, + EndpointResponse, EndpointSendConfiguration, EndpointsResponse, EventStream, EventsRequest, EventsResponse, + ExportJobResponse, ExportJobsResponse, GCMChannelRequest, GCMChannelResponse, - ImportJobsResponse, + ImportJobResponse, + InAppMessageContent, + InAppTemplateRequest, JourneyResponse, + Layout, MessageBody, PushNotificationTemplateRequest, QuietTime, @@ -56,6 +62,579 @@ import { WriteSegmentRequest, } from "./models_0"; +export interface GetEndpointResponse { + /** + *Provides information about the channel type and other settings for an endpoint.
+ */ + EndpointResponse: EndpointResponse | undefined; +} + +export namespace GetEndpointResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetEndpointResponse): any => ({ + ...obj, + }); +} + +export interface GetEventStreamRequest { + /** + *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
+ */ + ApplicationId: string | undefined; +} + +export namespace GetEventStreamRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetEventStreamRequest): any => ({ + ...obj, + }); +} + +export interface GetEventStreamResponse { + /** + *Specifies settings for publishing event data to an Amazon Kinesis data stream or an Amazon Kinesis Data Firehose delivery stream.
+ */ + EventStream: EventStream | undefined; +} + +export namespace GetEventStreamResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetEventStreamResponse): any => ({ + ...obj, + }); +} + +export interface GetExportJobRequest { + /** + *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
+ */ + ApplicationId: string | undefined; + + /** + *The unique identifier for the job.
+ */ + JobId: string | undefined; +} + +export namespace GetExportJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetExportJobRequest): any => ({ + ...obj, + }); +} + +export interface GetExportJobResponse { + /** + *Provides information about the status and settings of a job that exports endpoint definitions to a file. The file can be added directly to an Amazon Simple Storage Service (Amazon S3) bucket by using the Amazon Pinpoint API or downloaded directly to a computer by using the Amazon Pinpoint console.
+ */ + ExportJobResponse: ExportJobResponse | undefined; +} + +export namespace GetExportJobResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetExportJobResponse): any => ({ + ...obj, + }); +} + +export interface GetExportJobsRequest { + /** + *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
+ */ + ApplicationId: string | undefined; + + /** + *The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.
+ */ + PageSize?: string; + + /** + *The NextToken string that specifies which page of results to return in a paginated response.
+ */ + Token?: string; +} + +export namespace GetExportJobsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetExportJobsRequest): any => ({ + ...obj, + }); +} + +export interface GetExportJobsResponse { + /** + *Provides information about all the export jobs that are associated with an application or segment. An export job is a job that exports endpoint definitions to a file.
+ */ + ExportJobsResponse: ExportJobsResponse | undefined; +} + +export namespace GetExportJobsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetExportJobsResponse): any => ({ + ...obj, + }); +} + +export interface GetGcmChannelRequest { + /** + *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
+ */ + ApplicationId: string | undefined; +} + +export namespace GetGcmChannelRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetGcmChannelRequest): any => ({ + ...obj, + }); +} + +export interface GetGcmChannelResponse { + /** + *Provides information about the status and settings of the GCM channel for an application. The GCM channel enables Amazon Pinpoint to send push notifications through the Firebase Cloud Messaging (FCM), formerly Google Cloud Messaging (GCM), service.
+ */ + GCMChannelResponse: GCMChannelResponse | undefined; +} + +export namespace GetGcmChannelResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetGcmChannelResponse): any => ({ + ...obj, + }); +} + +export interface GetImportJobRequest { + /** + *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
+ */ + ApplicationId: string | undefined; + + /** + *The unique identifier for the job.
+ */ + JobId: string | undefined; +} + +export namespace GetImportJobRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetImportJobRequest): any => ({ + ...obj, + }); +} + +export interface GetImportJobResponse { + /** + *Provides information about the status and settings of a job that imports endpoint definitions from one or more files. The files can be stored in an Amazon Simple Storage Service (Amazon S3) bucket or uploaded directly from a computer by using the Amazon Pinpoint console.
+ */ + ImportJobResponse: ImportJobResponse | undefined; +} + +export namespace GetImportJobResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetImportJobResponse): any => ({ + ...obj, + }); +} + +export interface GetImportJobsRequest { + /** + *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
+ */ + ApplicationId: string | undefined; + + /** + *The maximum number of items to include in each page of a paginated response. This parameter is not supported for application, campaign, and journey metrics.
+ */ + PageSize?: string; + + /** + *The NextToken string that specifies which page of results to return in a paginated response.
+ */ + Token?: string; +} + +export namespace GetImportJobsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetImportJobsRequest): any => ({ + ...obj, + }); +} + +/** + *Provides information about the status and settings of all the import jobs that are associated with an application or segment. An import job is a job that imports endpoint definitions from one or more files.
+ */ +export interface ImportJobsResponse { + /** + *An array of responses, one for each import job that's associated with the application (Import Jobs resource) or segment (Segment Import Jobs resource).
+ */ + Item: ImportJobResponse[] | undefined; + + /** + *The string to use in a subsequent request to get the next page of results in a paginated response. This value is null if there are no additional pages.
+ */ + NextToken?: string; +} + +export namespace ImportJobsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImportJobsResponse): any => ({ + ...obj, + }); +} + +export interface GetImportJobsResponse { + /** + *Provides information about the status and settings of all the import jobs that are associated with an application or segment. An import job is a job that imports endpoint definitions from one or more files.
+ */ + ImportJobsResponse: ImportJobsResponse | undefined; +} + +export namespace GetImportJobsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetImportJobsResponse): any => ({ + ...obj, + }); +} + +export interface GetInAppMessagesRequest { + /** + *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
+ */ + ApplicationId: string | undefined; + + /** + *The unique identifier for the endpoint.
+ */ + EndpointId: string | undefined; +} + +export namespace GetInAppMessagesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetInAppMessagesRequest): any => ({ + ...obj, + }); +} + +/** + *Provides all fields required for building an in-app message.
+ */ +export interface InAppMessage { + /** + *In-app message content.
+ */ + Content?: InAppMessageContent[]; + + /** + *Custom config to be sent to SDK.
+ */ + CustomConfig?: { [key: string]: string }; + + /** + *The layout of the message.
+ */ + Layout?: Layout | string; +} + +export namespace InAppMessage { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InAppMessage): any => ({ + ...obj, + }); +} + +/** + *Schedule of the campaign.
+ */ +export interface InAppCampaignSchedule { + /** + *The scheduled time after which the in-app message should not be shown. Timestamp is in ISO 8601 format.
+ */ + EndDate?: string; + + /** + *The event filter the SDK has to use to show the in-app message in the application.
+ */ + EventFilter?: CampaignEventFilter; + + /** + *Time during which the in-app message should not be shown to the user.
+ */ + QuietTime?: QuietTime; +} + +export namespace InAppCampaignSchedule { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InAppCampaignSchedule): any => ({ + ...obj, + }); +} + +/** + *Targeted in-app message campaign.
+ */ +export interface InAppMessageCampaign { + /** + *Campaign id of the corresponding campaign.
+ */ + CampaignId?: string; + + /** + *Daily cap which controls the number of times any in-app messages can be shown to the endpoint during a day.
+ */ + DailyCap?: number; + + /** + *In-app message content with all fields required for rendering an in-app message.
+ */ + InAppMessage?: InAppMessage; + + /** + *Priority of the in-app message.
+ */ + Priority?: number; + + /** + *Schedule of the campaign.
+ */ + Schedule?: InAppCampaignSchedule; + + /** + *Session cap which controls the number of times an in-app message can be shown to the endpoint during an application session.
+ */ + SessionCap?: number; + + /** + *Total cap which controls the number of times an in-app message can be shown to the endpoint.
+ */ + TotalCap?: number; + + /** + *Treatment id of the campaign.
+ */ + TreatmentId?: string; +} + +export namespace InAppMessageCampaign { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InAppMessageCampaign): any => ({ + ...obj, + }); +} + +/** + *Get in-app messages response object.
+ */ +export interface InAppMessagesResponse { + /** + *List of targeted in-app message campaigns.
+ */ + InAppMessageCampaigns?: InAppMessageCampaign[]; +} + +export namespace InAppMessagesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InAppMessagesResponse): any => ({ + ...obj, + }); +} + +export interface GetInAppMessagesResponse { + /** + *Get in-app messages response object.
+ */ + InAppMessagesResponse: InAppMessagesResponse | undefined; +} + +export namespace GetInAppMessagesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetInAppMessagesResponse): any => ({ + ...obj, + }); +} + +export interface GetInAppTemplateRequest { + /** + *The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.
+ */ + TemplateName: string | undefined; + + /** + *The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.
If specified, this value must match the identifier for an existing template version. If specified for an update operation, this value must match the identifier for the latest existing version of the template. This restriction helps ensure that race conditions don't occur.
If you don't specify a value for this parameter, Amazon Pinpoint does the following:
For a get operation, retrieves information about the active version of the template.
For an update operation, saves the updates to (overwrites) the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.
For a delete operation, deletes the template, including all versions of the template.
In-App Template Response.
+ */ +export interface InAppTemplateResponse { + /** + *The resource arn of the template.
+ */ + Arn?: string; + + /** + *The content of the message, can include up to 5 modals. Each modal must contain a message, a header, and background color. ImageUrl and buttons are optional.
+ */ + Content?: InAppMessageContent[]; + + /** + *The creation date of the template.
+ */ + CreationDate: string | undefined; + + /** + *Custom config to be sent to client.
+ */ + CustomConfig?: { [key: string]: string }; + + /** + *The last modified date of the template.
+ */ + LastModifiedDate: string | undefined; + + /** + *The layout of the message.
+ */ + Layout?: Layout | string; + + /** + *A string-to-string map of key-value pairs that defines the tags to associate with the message template. Each tag consists of a required tag key and an associated tag value.
+ */ + tags?: { [key: string]: string }; + + /** + *The description of the template.
+ */ + TemplateDescription?: string; + + /** + *The name of the template.
+ */ + TemplateName: string | undefined; + + /** + *The type of the template.
+ */ + TemplateType: TemplateType | string | undefined; + + /** + *The version id of the template.
+ */ + Version?: string; +} + +export namespace InAppTemplateResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InAppTemplateResponse): any => ({ + ...obj, + }); +} + +export interface GetInAppTemplateResponse { + /** + *In-App Template Response.
+ */ + InAppTemplateResponse: InAppTemplateResponse | undefined; +} + +export namespace GetInAppTemplateResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetInAppTemplateResponse): any => ({ + ...obj, + }); +} + +export interface GetJourneyRequest { + /** + *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
+ */ + ApplicationId: string | undefined; + + /** + *The unique identifier for the journey.
+ */ + JourneyId: string | undefined; +} + +export namespace GetJourneyRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetJourneyRequest): any => ({ + ...obj, + }); +} + +export interface GetJourneyResponse { + /** + *Provides information about the status, configuration, and other settings for a journey.
+ */ + JourneyResponse: JourneyResponse | undefined; +} + +export namespace GetJourneyResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetJourneyResponse): any => ({ + ...obj, + }); +} + export interface GetJourneyDateRangeKpiRequest { /** *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
@@ -2702,6 +3281,53 @@ export namespace UpdateGcmChannelResponse { }); } +export interface UpdateInAppTemplateRequest { + /** + *Specifies whether to save the updates as a new version of the message template. Valid values are: true, save the updates as a new version; and, false, save the updates to (overwrite) the latest existing version of the template.
If you don't specify a value for this parameter, Amazon Pinpoint saves the updates to (overwrites) the latest existing version of the template. If you specify a value of true for this parameter, don't specify a value for the version parameter. Otherwise, an error will occur.
+ */ + CreateNewVersion?: boolean; + + /** + *InApp Template Request.
+ */ + InAppTemplateRequest: InAppTemplateRequest | undefined; + + /** + *The name of the message template. A template name must start with an alphanumeric character and can contain a maximum of 128 characters. The characters can be alphanumeric characters, underscores (_), or hyphens (-). Template names are case sensitive.
+ */ + TemplateName: string | undefined; + + /** + *The unique identifier for the version of the message template to update, retrieve information about, or delete. To retrieve identifiers and other information for all the versions of a template, use the Template Versions resource.
If specified, this value must match the identifier for an existing template version. If specified for an update operation, this value must match the identifier for the latest existing version of the template. This restriction helps ensure that race conditions don't occur.
If you don't specify a value for this parameter, Amazon Pinpoint does the following:
For a get operation, retrieves information about the active version of the template.
For an update operation, saves the updates to (overwrites) the latest existing version of the template, if the create-new-version parameter isn't used or is set to false.
For a delete operation, deletes the template, including all versions of the template.
Provides information about an API request or response.
+ */ + MessageBody: MessageBody | undefined; +} + +export namespace UpdateInAppTemplateResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateInAppTemplateResponse): any => ({ + ...obj, + }); +} + export interface UpdateJourneyRequest { /** *The unique identifier for the application. This identifier is displayed as the Project ID on the Amazon Pinpoint console.
diff --git a/clients/client-pinpoint/protocols/Aws_restJson1.ts b/clients/client-pinpoint/protocols/Aws_restJson1.ts index 105f1520bd67..4c9a7e641f28 100644 --- a/clients/client-pinpoint/protocols/Aws_restJson1.ts +++ b/clients/client-pinpoint/protocols/Aws_restJson1.ts @@ -6,6 +6,10 @@ import { } from "../commands/CreateEmailTemplateCommand"; import { CreateExportJobCommandInput, CreateExportJobCommandOutput } from "../commands/CreateExportJobCommand"; import { CreateImportJobCommandInput, CreateImportJobCommandOutput } from "../commands/CreateImportJobCommand"; +import { + CreateInAppTemplateCommandInput, + CreateInAppTemplateCommandOutput, +} from "../commands/CreateInAppTemplateCommand"; import { CreateJourneyCommandInput, CreateJourneyCommandOutput } from "../commands/CreateJourneyCommand"; import { CreatePushTemplateCommandInput, CreatePushTemplateCommandOutput } from "../commands/CreatePushTemplateCommand"; import { @@ -43,6 +47,10 @@ import { import { DeleteEndpointCommandInput, DeleteEndpointCommandOutput } from "../commands/DeleteEndpointCommand"; import { DeleteEventStreamCommandInput, DeleteEventStreamCommandOutput } from "../commands/DeleteEventStreamCommand"; import { DeleteGcmChannelCommandInput, DeleteGcmChannelCommandOutput } from "../commands/DeleteGcmChannelCommand"; +import { + DeleteInAppTemplateCommandInput, + DeleteInAppTemplateCommandOutput, +} from "../commands/DeleteInAppTemplateCommand"; import { DeleteJourneyCommandInput, DeleteJourneyCommandOutput } from "../commands/DeleteJourneyCommand"; import { DeletePushTemplateCommandInput, DeletePushTemplateCommandOutput } from "../commands/DeletePushTemplateCommand"; import { @@ -108,6 +116,8 @@ import { GetExportJobsCommandInput, GetExportJobsCommandOutput } from "../comman import { GetGcmChannelCommandInput, GetGcmChannelCommandOutput } from "../commands/GetGcmChannelCommand"; import { GetImportJobCommandInput, GetImportJobCommandOutput } from "../commands/GetImportJobCommand"; import { GetImportJobsCommandInput, GetImportJobsCommandOutput } from "../commands/GetImportJobsCommand"; +import { GetInAppMessagesCommandInput, GetInAppMessagesCommandOutput } from "../commands/GetInAppMessagesCommand"; +import { GetInAppTemplateCommandInput, GetInAppTemplateCommandOutput } from "../commands/GetInAppTemplateCommand"; import { GetJourneyCommandInput, GetJourneyCommandOutput } from "../commands/GetJourneyCommand"; import { GetJourneyDateRangeKpiCommandInput, @@ -199,6 +209,10 @@ import { UpdateEndpointsBatchCommandOutput, } from "../commands/UpdateEndpointsBatchCommand"; import { UpdateGcmChannelCommandInput, UpdateGcmChannelCommandOutput } from "../commands/UpdateGcmChannelCommand"; +import { + UpdateInAppTemplateCommandInput, + UpdateInAppTemplateCommandOutput, +} from "../commands/UpdateInAppTemplateCommand"; import { UpdateJourneyCommandInput, UpdateJourneyCommandOutput } from "../commands/UpdateJourneyCommand"; import { UpdateJourneyStateCommandInput, UpdateJourneyStateCommandOutput } from "../commands/UpdateJourneyStateCommand"; import { UpdatePushTemplateCommandInput, UpdatePushTemplateCommandOutput } from "../commands/UpdatePushTemplateCommand"; @@ -253,6 +267,7 @@ import { CampaignEmailMessage, CampaignEventFilter, CampaignHook, + CampaignInAppMessage, CampaignLimits, CampaignResponse, CampaignSmsMessage, @@ -268,6 +283,7 @@ import { CreateTemplateMessageBody, CustomDeliveryConfiguration, CustomMessageActivity, + DefaultButtonConfiguration, DefaultMessage, DefaultPushNotificationMessage, DefaultPushNotificationTemplate, @@ -313,7 +329,11 @@ import { ImportJobRequest, ImportJobResource, ImportJobResponse, - ImportJobsResponse, + InAppMessageBodyConfig, + InAppMessageButton, + InAppMessageContent, + InAppMessageHeaderConfig, + InAppTemplateRequest, InternalServerErrorException, ItemResponse, JourneyCustomMessage, @@ -331,6 +351,7 @@ import { MultiConditionalBranch, MultiConditionalSplitActivity, NotFoundException, + OverrideButtonConfiguration, PayloadTooLargeException, PublicEndpoint, PushMessageActivity, @@ -366,6 +387,7 @@ import { StartCondition, Template, TemplateConfiguration, + TemplateCreateMessageBody, TooManyRequestsException, TreatmentResource, VoiceChannelResponse, @@ -380,6 +402,12 @@ import { __EndpointTypesElement, } from "../models/models_0"; import { + ImportJobsResponse, + InAppCampaignSchedule, + InAppMessage, + InAppMessageCampaign, + InAppMessagesResponse, + InAppTemplateResponse, JourneyDateRangeKpiResponse, JourneyExecutionActivityMetricsResponse, JourneyExecutionMetricsResponse, @@ -610,6 +638,44 @@ export const serializeAws_restJson1CreateImportJobCommand = async ( }); }; +export const serializeAws_restJson1CreateInAppTemplateCommand = async ( + input: CreateInAppTemplateCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/v1/templates/{TemplateName}/inapp"; + if (input.TemplateName !== undefined) { + const labelValue: string = input.TemplateName; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: TemplateName."); + } + resolvedPath = resolvedPath.replace("{TemplateName}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: TemplateName."); + } + let body: any; + if (input.InAppTemplateRequest !== undefined) { + body = serializeAws_restJson1InAppTemplateRequest(input.InAppTemplateRequest, context); + } + if (body === undefined) { + body = {}; + } + body = JSON.stringify(body); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1CreateJourneyCommand = async ( input: CreateJourneyCommandInput, context: __SerdeContext @@ -1231,6 +1297,39 @@ export const serializeAws_restJson1DeleteGcmChannelCommand = async ( }); }; +export const serializeAws_restJson1DeleteInAppTemplateCommand = async ( + input: DeleteInAppTemplateCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/v1/templates/{TemplateName}/inapp"; + if (input.TemplateName !== undefined) { + const labelValue: string = input.TemplateName; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: TemplateName."); + } + resolvedPath = resolvedPath.replace("{TemplateName}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: TemplateName."); + } + const query: any = { + ...(input.Version !== undefined && { version: input.Version }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + query, + body, + }); +}; + export const serializeAws_restJson1DeleteJourneyCommand = async ( input: DeleteJourneyCommandInput, context: __SerdeContext @@ -2439,6 +2538,78 @@ export const serializeAws_restJson1GetImportJobsCommand = async ( }); }; +export const serializeAws_restJson1GetInAppMessagesCommand = async ( + input: GetInAppMessagesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/v1/apps/{ApplicationId}/endpoints/{EndpointId}/inappmessages"; + if (input.ApplicationId !== undefined) { + const labelValue: string = input.ApplicationId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: ApplicationId."); + } + resolvedPath = resolvedPath.replace("{ApplicationId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: ApplicationId."); + } + if (input.EndpointId !== undefined) { + const labelValue: string = input.EndpointId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: EndpointId."); + } + resolvedPath = resolvedPath.replace("{EndpointId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: EndpointId."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetInAppTemplateCommand = async ( + input: GetInAppTemplateCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/v1/templates/{TemplateName}/inapp"; + if (input.TemplateName !== undefined) { + const labelValue: string = input.TemplateName; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: TemplateName."); + } + resolvedPath = resolvedPath.replace("{TemplateName}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: TemplateName."); + } + const query: any = { + ...(input.Version !== undefined && { version: input.Version }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + export const serializeAws_restJson1GetJourneyCommand = async ( input: GetJourneyCommandInput, context: __SerdeContext @@ -4085,6 +4256,49 @@ export const serializeAws_restJson1UpdateGcmChannelCommand = async ( }); }; +export const serializeAws_restJson1UpdateInAppTemplateCommand = async ( + input: UpdateInAppTemplateCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/v1/templates/{TemplateName}/inapp"; + if (input.TemplateName !== undefined) { + const labelValue: string = input.TemplateName; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: TemplateName."); + } + resolvedPath = resolvedPath.replace("{TemplateName}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: TemplateName."); + } + const query: any = { + ...(input.CreateNewVersion !== undefined && { "create-new-version": input.CreateNewVersion.toString() }), + ...(input.Version !== undefined && { version: input.Version }), + }; + let body: any; + if (input.InAppTemplateRequest !== undefined) { + body = serializeAws_restJson1InAppTemplateRequest(input.InAppTemplateRequest, context); + } + if (body === undefined) { + body = {}; + } + body = JSON.stringify(body); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PUT", + headers, + path: resolvedPath, + query, + body, + }); +}; + export const serializeAws_restJson1UpdateJourneyCommand = async ( input: UpdateJourneyCommandInput, context: __SerdeContext @@ -5009,6 +5223,91 @@ const deserializeAws_restJson1CreateImportJobCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1CreateInAppTemplateCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): PromiseThe object that contains the Docker image URI for either your robot or simulation applications.
+ */ +export interface Environment { + /** + *The Docker image URI for either your robot or simulation applications.
+ */ + uri?: string; +} + +export namespace Environment { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Environment): any => ({ + ...obj, + }); +} + export enum RobotSoftwareSuiteType { ROS = "ROS", ROS2 = "ROS2", @@ -1645,7 +1664,7 @@ export interface CreateRobotApplicationRequest { /** *The sources of the robot application.
*/ - sources: SourceConfig[] | undefined; + sources?: SourceConfig[]; /** *The robot software suite (ROS distribuition) used by the robot application.
@@ -1657,6 +1676,11 @@ export interface CreateRobotApplicationRequest { * application. */ tags?: { [key: string]: string }; + + /** + *The object that contains that URI of the Docker image that you use for your robot application.
+ */ + environment?: Environment; } export namespace CreateRobotApplicationRequest { @@ -1743,6 +1767,11 @@ export interface CreateRobotApplicationResponse { *The list of all tags added to the robot application.
*/ tags?: { [key: string]: string }; + + /** + *An object that contains the Docker image URI used to a create your robot application.
+ */ + environment?: Environment; } export namespace CreateRobotApplicationResponse { @@ -1765,6 +1794,16 @@ export interface CreateRobotApplicationVersionRequest { * the latest revision ID, a new version will be created. */ currentRevisionId?: string; + + /** + *The Amazon S3 identifier for the zip file bundle that you use for your robot application.
+ */ + s3Etags?: string[]; + + /** + *A SHA256 identifier for the Docker image that you use for your robot application.
+ */ + imageDigest?: string; } export namespace CreateRobotApplicationVersionRequest { @@ -1812,6 +1851,11 @@ export interface CreateRobotApplicationVersionResponse { *The revision id of the robot application.
*/ revisionId?: string; + + /** + *The object that contains the Docker image URI used to create your robot application.
+ */ + environment?: Environment; } export namespace CreateRobotApplicationVersionResponse { @@ -1889,7 +1933,7 @@ export interface CreateSimulationApplicationRequest { /** *The sources of the simulation application.
*/ - sources: SourceConfig[] | undefined; + sources?: SourceConfig[]; /** *The simulation software suite used by the simulation application.
@@ -1911,6 +1955,11 @@ export interface CreateSimulationApplicationRequest { * application. */ tags?: { [key: string]: string }; + + /** + *The object that contains the Docker image URI used to create your simulation application.
+ */ + environment?: Environment; } export namespace CreateSimulationApplicationRequest { @@ -1973,6 +2022,11 @@ export interface CreateSimulationApplicationResponse { *The list of all tags added to the simulation application.
*/ tags?: { [key: string]: string }; + + /** + *The object that contains the Docker image URI that you used to create your simulation application.
+ */ + environment?: Environment; } export namespace CreateSimulationApplicationResponse { @@ -1995,6 +2049,16 @@ export interface CreateSimulationApplicationVersionRequest { * matches the latest revision ID, a new version will be created. */ currentRevisionId?: string; + + /** + *The Amazon S3 eTag identifier for the zip file bundle that you use to create the simulation application.
+ */ + s3Etags?: string[]; + + /** + *The SHA256 digest used to identify the Docker image URI used to created the simulation application.
+ */ + imageDigest?: string; } export namespace CreateSimulationApplicationVersionRequest { @@ -2052,6 +2116,11 @@ export interface CreateSimulationApplicationVersionResponse { *The revision ID of the simulation application.
*/ revisionId?: string; + + /** + *The object that contains the Docker image URI used to create the simulation application.
+ */ + environment?: Environment; } export namespace CreateSimulationApplicationVersionResponse { @@ -3683,6 +3752,16 @@ export interface DescribeRobotApplicationResponse { *The list of all tags added to the specified robot application.
*/ tags?: { [key: string]: string }; + + /** + *The object that contains the Docker image URI used to create the robot application.
+ */ + environment?: Environment; + + /** + *A SHA256 identifier for the Docker image that you use for your robot application.
+ */ + imageDigest?: string; } export namespace DescribeRobotApplicationResponse { @@ -3766,6 +3845,16 @@ export interface DescribeSimulationApplicationResponse { *The list of all tags added to the specified simulation application.
*/ tags?: { [key: string]: string }; + + /** + *The object that contains the Docker image URI used to create the simulation application.
+ */ + environment?: Environment; + + /** + *A SHA256 identifier for the Docker image that you use for your simulation application.
+ */ + imageDigest?: string; } export namespace DescribeSimulationApplicationResponse { @@ -6384,7 +6473,7 @@ export interface UpdateRobotApplicationRequest { /** *The sources of the robot application.
*/ - sources: SourceConfig[] | undefined; + sources?: SourceConfig[]; /** *The robot software suite (ROS distribution) used by the robot application.
@@ -6395,6 +6484,11 @@ export interface UpdateRobotApplicationRequest { *The revision id for the robot application.
*/ currentRevisionId?: string; + + /** + *The object that contains the Docker image URI for your robot application.
+ */ + environment?: Environment; } export namespace UpdateRobotApplicationRequest { @@ -6442,6 +6536,11 @@ export interface UpdateRobotApplicationResponse { *The revision id of the robot application.
*/ revisionId?: string; + + /** + *The object that contains the Docker image URI for your robot application.
+ */ + environment?: Environment; } export namespace UpdateRobotApplicationResponse { @@ -6462,7 +6561,7 @@ export interface UpdateSimulationApplicationRequest { /** *The sources of the simulation application.
*/ - sources: SourceConfig[] | undefined; + sources?: SourceConfig[]; /** *The simulation software suite used by the simulation application.
@@ -6483,6 +6582,11 @@ export interface UpdateSimulationApplicationRequest { *The revision id for the robot application.
*/ currentRevisionId?: string; + + /** + *The object that contains the Docker image URI for your simulation application.
+ */ + environment?: Environment; } export namespace UpdateSimulationApplicationRequest { @@ -6540,6 +6644,11 @@ export interface UpdateSimulationApplicationResponse { *The revision id of the simulation application.
*/ revisionId?: string; + + /** + *The object that contains the Docker image URI used for your simulation application.
+ */ + environment?: Environment; } export namespace UpdateSimulationApplicationResponse { diff --git a/clients/client-robomaker/protocols/Aws_restJson1.ts b/clients/client-robomaker/protocols/Aws_restJson1.ts index f109b540a976..f62e1618486d 100644 --- a/clients/client-robomaker/protocols/Aws_restJson1.ts +++ b/clients/client-robomaker/protocols/Aws_restJson1.ts @@ -180,6 +180,7 @@ import { DeploymentConfig, DeploymentJob, DeploymentLaunchConfig, + Environment, FailedCreateSimulationJobRequest, FailureSummary, Filter, @@ -525,6 +526,8 @@ export const serializeAws_restJson1CreateRobotApplicationCommand = async ( let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/createRobotApplication"; let body: any; body = JSON.stringify({ + ...(input.environment !== undefined && + input.environment !== null && { environment: serializeAws_restJson1Environment(input.environment, context) }), ...(input.name !== undefined && input.name !== null && { name: input.name }), ...(input.robotSoftwareSuite !== undefined && input.robotSoftwareSuite !== null && { @@ -560,6 +563,9 @@ export const serializeAws_restJson1CreateRobotApplicationVersionCommand = async ...(input.application !== undefined && input.application !== null && { application: input.application }), ...(input.currentRevisionId !== undefined && input.currentRevisionId !== null && { currentRevisionId: input.currentRevisionId }), + ...(input.imageDigest !== undefined && input.imageDigest !== null && { imageDigest: input.imageDigest }), + ...(input.s3Etags !== undefined && + input.s3Etags !== null && { s3Etags: serializeAws_restJson1S3Etags(input.s3Etags, context) }), }); return new __HttpRequest({ protocol, @@ -584,6 +590,8 @@ export const serializeAws_restJson1CreateSimulationApplicationCommand = async ( `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/createSimulationApplication"; let body: any; body = JSON.stringify({ + ...(input.environment !== undefined && + input.environment !== null && { environment: serializeAws_restJson1Environment(input.environment, context) }), ...(input.name !== undefined && input.name !== null && { name: input.name }), ...(input.renderingEngine !== undefined && input.renderingEngine !== null && { @@ -627,6 +635,9 @@ export const serializeAws_restJson1CreateSimulationApplicationVersionCommand = a ...(input.application !== undefined && input.application !== null && { application: input.application }), ...(input.currentRevisionId !== undefined && input.currentRevisionId !== null && { currentRevisionId: input.currentRevisionId }), + ...(input.imageDigest !== undefined && input.imageDigest !== null && { imageDigest: input.imageDigest }), + ...(input.s3Etags !== undefined && + input.s3Etags !== null && { s3Etags: serializeAws_restJson1S3Etags(input.s3Etags, context) }), }); return new __HttpRequest({ protocol, @@ -1754,6 +1765,8 @@ export const serializeAws_restJson1UpdateRobotApplicationCommand = async ( ...(input.application !== undefined && input.application !== null && { application: input.application }), ...(input.currentRevisionId !== undefined && input.currentRevisionId !== null && { currentRevisionId: input.currentRevisionId }), + ...(input.environment !== undefined && + input.environment !== null && { environment: serializeAws_restJson1Environment(input.environment, context) }), ...(input.robotSoftwareSuite !== undefined && input.robotSoftwareSuite !== null && { robotSoftwareSuite: serializeAws_restJson1RobotSoftwareSuite(input.robotSoftwareSuite, context), @@ -1787,6 +1800,8 @@ export const serializeAws_restJson1UpdateSimulationApplicationCommand = async ( ...(input.application !== undefined && input.application !== null && { application: input.application }), ...(input.currentRevisionId !== undefined && input.currentRevisionId !== null && { currentRevisionId: input.currentRevisionId }), + ...(input.environment !== undefined && + input.environment !== null && { environment: serializeAws_restJson1Environment(input.environment, context) }), ...(input.renderingEngine !== undefined && input.renderingEngine !== null && { renderingEngine: serializeAws_restJson1RenderingEngine(input.renderingEngine, context), @@ -2718,6 +2733,7 @@ export const deserializeAws_restJson1CreateRobotApplicationCommand = async ( const contents: CreateRobotApplicationCommandOutput = { $metadata: deserializeMetadata(output), arn: undefined, + environment: undefined, lastUpdatedAt: undefined, name: undefined, revisionId: undefined, @@ -2730,6 +2746,9 @@ export const deserializeAws_restJson1CreateRobotApplicationCommand = async ( if (data.arn !== undefined && data.arn !== null) { contents.arn = __expectString(data.arn); } + if (data.environment !== undefined && data.environment !== null) { + contents.environment = deserializeAws_restJson1Environment(data.environment, context); + } if (data.lastUpdatedAt !== undefined && data.lastUpdatedAt !== null) { contents.lastUpdatedAt = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.lastUpdatedAt))); } @@ -2841,6 +2860,7 @@ export const deserializeAws_restJson1CreateRobotApplicationVersionCommand = asyn const contents: CreateRobotApplicationVersionCommandOutput = { $metadata: deserializeMetadata(output), arn: undefined, + environment: undefined, lastUpdatedAt: undefined, name: undefined, revisionId: undefined, @@ -2852,6 +2872,9 @@ export const deserializeAws_restJson1CreateRobotApplicationVersionCommand = asyn if (data.arn !== undefined && data.arn !== null) { contents.arn = __expectString(data.arn); } + if (data.environment !== undefined && data.environment !== null) { + contents.environment = deserializeAws_restJson1Environment(data.environment, context); + } if (data.lastUpdatedAt !== undefined && data.lastUpdatedAt !== null) { contents.lastUpdatedAt = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.lastUpdatedAt))); } @@ -2952,6 +2975,7 @@ export const deserializeAws_restJson1CreateSimulationApplicationCommand = async const contents: CreateSimulationApplicationCommandOutput = { $metadata: deserializeMetadata(output), arn: undefined, + environment: undefined, lastUpdatedAt: undefined, name: undefined, renderingEngine: undefined, @@ -2966,6 +2990,9 @@ export const deserializeAws_restJson1CreateSimulationApplicationCommand = async if (data.arn !== undefined && data.arn !== null) { contents.arn = __expectString(data.arn); } + if (data.environment !== undefined && data.environment !== null) { + contents.environment = deserializeAws_restJson1Environment(data.environment, context); + } if (data.lastUpdatedAt !== undefined && data.lastUpdatedAt !== null) { contents.lastUpdatedAt = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.lastUpdatedAt))); } @@ -3086,6 +3113,7 @@ export const deserializeAws_restJson1CreateSimulationApplicationVersionCommand = const contents: CreateSimulationApplicationVersionCommandOutput = { $metadata: deserializeMetadata(output), arn: undefined, + environment: undefined, lastUpdatedAt: undefined, name: undefined, renderingEngine: undefined, @@ -3099,6 +3127,9 @@ export const deserializeAws_restJson1CreateSimulationApplicationVersionCommand = if (data.arn !== undefined && data.arn !== null) { contents.arn = __expectString(data.arn); } + if (data.environment !== undefined && data.environment !== null) { + contents.environment = deserializeAws_restJson1Environment(data.environment, context); + } if (data.lastUpdatedAt !== undefined && data.lastUpdatedAt !== null) { contents.lastUpdatedAt = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.lastUpdatedAt))); } @@ -4520,6 +4551,8 @@ export const deserializeAws_restJson1DescribeRobotApplicationCommand = async ( const contents: DescribeRobotApplicationCommandOutput = { $metadata: deserializeMetadata(output), arn: undefined, + environment: undefined, + imageDigest: undefined, lastUpdatedAt: undefined, name: undefined, revisionId: undefined, @@ -4532,6 +4565,12 @@ export const deserializeAws_restJson1DescribeRobotApplicationCommand = async ( if (data.arn !== undefined && data.arn !== null) { contents.arn = __expectString(data.arn); } + if (data.environment !== undefined && data.environment !== null) { + contents.environment = deserializeAws_restJson1Environment(data.environment, context); + } + if (data.imageDigest !== undefined && data.imageDigest !== null) { + contents.imageDigest = __expectString(data.imageDigest); + } if (data.lastUpdatedAt !== undefined && data.lastUpdatedAt !== null) { contents.lastUpdatedAt = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.lastUpdatedAt))); } @@ -4627,6 +4666,8 @@ export const deserializeAws_restJson1DescribeSimulationApplicationCommand = asyn const contents: DescribeSimulationApplicationCommandOutput = { $metadata: deserializeMetadata(output), arn: undefined, + environment: undefined, + imageDigest: undefined, lastUpdatedAt: undefined, name: undefined, renderingEngine: undefined, @@ -4641,6 +4682,12 @@ export const deserializeAws_restJson1DescribeSimulationApplicationCommand = asyn if (data.arn !== undefined && data.arn !== null) { contents.arn = __expectString(data.arn); } + if (data.environment !== undefined && data.environment !== null) { + contents.environment = deserializeAws_restJson1Environment(data.environment, context); + } + if (data.imageDigest !== undefined && data.imageDigest !== null) { + contents.imageDigest = __expectString(data.imageDigest); + } if (data.lastUpdatedAt !== undefined && data.lastUpdatedAt !== null) { contents.lastUpdatedAt = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.lastUpdatedAt))); } @@ -7060,6 +7107,7 @@ export const deserializeAws_restJson1UpdateRobotApplicationCommand = async ( const contents: UpdateRobotApplicationCommandOutput = { $metadata: deserializeMetadata(output), arn: undefined, + environment: undefined, lastUpdatedAt: undefined, name: undefined, revisionId: undefined, @@ -7071,6 +7119,9 @@ export const deserializeAws_restJson1UpdateRobotApplicationCommand = async ( if (data.arn !== undefined && data.arn !== null) { contents.arn = __expectString(data.arn); } + if (data.environment !== undefined && data.environment !== null) { + contents.environment = deserializeAws_restJson1Environment(data.environment, context); + } if (data.lastUpdatedAt !== undefined && data.lastUpdatedAt !== null) { contents.lastUpdatedAt = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.lastUpdatedAt))); } @@ -7171,6 +7222,7 @@ export const deserializeAws_restJson1UpdateSimulationApplicationCommand = async const contents: UpdateSimulationApplicationCommandOutput = { $metadata: deserializeMetadata(output), arn: undefined, + environment: undefined, lastUpdatedAt: undefined, name: undefined, renderingEngine: undefined, @@ -7184,6 +7236,9 @@ export const deserializeAws_restJson1UpdateSimulationApplicationCommand = async if (data.arn !== undefined && data.arn !== null) { contents.arn = __expectString(data.arn); } + if (data.environment !== undefined && data.environment !== null) { + contents.environment = deserializeAws_restJson1Environment(data.environment, context); + } if (data.lastUpdatedAt !== undefined && data.lastUpdatedAt !== null) { contents.lastUpdatedAt = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.lastUpdatedAt))); } @@ -7650,6 +7705,12 @@ const serializeAws_restJson1DeploymentLaunchConfig = (input: DeploymentLaunchCon }; }; +const serializeAws_restJson1Environment = (input: Environment, context: __SerdeContext): any => { + return { + ...(input.uri !== undefined && input.uri !== null && { uri: input.uri }), + }; +}; + const serializeAws_restJson1EnvironmentVariableMap = ( input: { [key: string]: string }, context: __SerdeContext @@ -7805,6 +7866,17 @@ const serializeAws_restJson1RobotSoftwareSuite = (input: RobotSoftwareSuite, con }; }; +const serializeAws_restJson1S3Etags = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_restJson1S3Keys = (input: string[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) @@ -8244,6 +8316,12 @@ const deserializeAws_restJson1DeploymentLaunchConfig = ( } as any; }; +const deserializeAws_restJson1Environment = (output: any, context: __SerdeContext): Environment => { + return { + uri: __expectString(output.uri), + } as any; +}; + const deserializeAws_restJson1EnvironmentVariableMap = ( output: any, context: __SerdeContext diff --git a/clients/client-s3/S3.ts b/clients/client-s3/S3.ts index 3c00e17ab908..17322ac49b79 100644 --- a/clients/client-s3/S3.ts +++ b/clients/client-s3/S3.ts @@ -1016,11 +1016,10 @@ export class S3 extends S3Client { * *You can optionally request server-side encryption. For server-side encryption, Amazon S3
* encrypts your data as it writes it to disks in its data centers and decrypts it when you
- * access it. You can provide your own encryption key, or use Amazon Web Services Key Management Service (Amazon Web Services
- * KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide
+ * access it. You can provide your own encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed encryption keys. If you choose to provide
* your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to
* initiate the upload by using CreateMultipartUpload
.
To perform a multipart upload with encryption using an Amazon Web Services KMS CMK, the requester must + *
To perform a multipart upload with encryption using an Amazon Web Services KMS key, the requester must
* have permission to the kms:Decrypt
and kms:GenerateDataKey*
* actions on the key. These permissions are required because Amazon S3 must decrypt and read data
* from the encrypted file parts before it completes the multipart upload. For more
@@ -1028,7 +1027,7 @@ export class S3 extends S3Client {
* and permissions in the Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account - * as the Amazon Web Services KMS CMK, then you must have these permissions on the key policy. If your IAM + * as the KMS key, then you must have these permissions on the key policy. If your IAM * user or role belongs to a different account than the key, then you must have the * permissions on both the key policy and your IAM user or role.
* @@ -1069,7 +1068,7 @@ export class S3 extends S3Client { * encryption keys or provide your own encryption key. *Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored + *
Use encryption keys managed by Amazon S3 or customer managed key stored * in Amazon Web Services Key Management Service (Amazon Web Services KMS) – If you want Amazon Web Services to manage the keys * used to encrypt data, specify the following headers in the request.
*If you specify x-amz-server-side-encryption:aws:kms
, but
* don't provide x-amz-server-side-encryption-aws-kms-key-id
,
- * Amazon S3 uses the Amazon Web Services managed CMK in Amazon Web Services KMS to protect the data.
All GET and PUT requests for an object protected by Amazon Web Services KMS fail if * you don't make them with SSL or by using SigV4.
*For more information about server-side encryption with CMKs stored in Amazon Web Services - * KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon Web Services - * KMS.
+ *For more information about server-side encryption with KMS key (SSE-KMS), + * see Protecting Data Using Server-Side Encryption with KMS keys.
*Use customer-provided encryption keys – If you want to manage your own @@ -1110,9 +1108,8 @@ export class S3 extends S3Client { *
x-amz-server-side-encryption-customer-key-MD5
*For more information about server-side encryption with CMKs stored in Amazon Web Services - * KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon Web Services - * KMS.
+ *For more information about server-side encryption with KMS keys (SSE-KMS), + * see Protecting Data Using Server-Side Encryption with KMS keys.
*Deletes the S3 Intelligent-Tiering configuration from the specified bucket.
- *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
- *The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
- *If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
+ *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.
+ *The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
+ *For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
*Operations related to
* DeleteBucketIntelligentTieringConfiguration
include:
Gets the S3 Intelligent-Tiering configuration from the specified bucket.
- *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
- *The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
- *If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
+ *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.
+ *The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
+ *For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
*Operations related to
* GetBucketIntelligentTieringConfiguration
include:
Encryption request headers, like x-amz-server-side-encryption
, should not
- * be sent for GET requests if your object uses server-side encryption with CMKs stored in Amazon Web Services
- * KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your
+ * be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS)
+ * or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your
* object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided * encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, @@ -4018,8 +4015,8 @@ export class S3 extends S3Client { *
Encryption request headers, like x-amz-server-side-encryption
, should
- * not be sent for GET requests if your object uses server-side encryption with CMKs stored
- * in Amazon Web Services KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys
+ * not be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS)
+ * or server-side encryption with Amazon S3–managed encryption keys
* (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest
* error.
Lists the S3 Intelligent-Tiering configuration from the specified bucket.
- *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
- *The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
- *If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
+ *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.
+ *The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
+ *For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
*Operations related to
* ListBucketIntelligentTieringConfigurations
include:
This action uses the encryption
subresource to configure default
* encryption and Amazon S3 Bucket Key for an existing bucket.
Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys - * (SSE-S3) or Amazon Web Services KMS customer master keys (SSE-KMS). If you specify default encryption + * (SSE-S3) or customer managed keys (SSE-KMS). If you specify default encryption * using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default * encryption, see Amazon S3 default bucket encryption * in the Amazon S3 User Guide. For more information about S3 Bucket Keys, @@ -5374,9 +5371,9 @@ export class S3 extends S3Client { /** *
Puts a S3 Intelligent-Tiering configuration to the specified bucket. * You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.
- *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
- *The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
- *If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
+ *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.
+ *The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
+ *For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
*Operations related to
* PutBucketIntelligentTieringConfiguration
include:
- * PutBucketMetricsConfiguration + * GetBucketMetricsConfiguration *
*By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side
- * encryption with CMKs stored in Amazon Web Services KMS. To replicate Amazon Web Services KMS-encrypted objects, add the
+ * encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects, add the
* following: SourceSelectionCriteria
, SseKmsEncryptedObjects
,
* Status
, EncryptionConfiguration
, and
* ReplicaKmsKeyID
. For information about replication configuration, see
* Replicating Objects
- * Created with SSE Using CMKs stored in Amazon Web Services KMS.
For information on PutBucketReplication
errors, see List of
* replication-related error codes
@@ -7681,7 +7678,7 @@ export class S3 extends S3Client {
* (Using Customer-Provided Encryption Keys) in the
* Amazon S3 User Guide.
For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and
- * customer master keys (CMKs) stored in Amazon Web Services Key Management Service (SSE-KMS),
+ * Amazon Web Services KMS keys (SSE-KMS),
* server-side encryption is handled transparently, so you don't need to specify
* anything. For more information about server-side encryption, including SSE-S3 and
* SSE-KMS, see Protecting Data Using
@@ -8150,9 +8147,9 @@ export class S3 extends S3Client {
/**
* Passes transformed
- * objects to a GetObject
operation when using Object Lambda Access Points. For information about
- * Object Lambda Access Points, see Transforming objects with
- * Object Lambda Access Points in the Amazon S3 User Guide.GetObject
operation when using Object Lambda access points. For information about
+ * Object Lambda access points, see Transforming objects with
+ * Object Lambda access points in the Amazon S3 User Guide.
This operation supports metadata that can be returned by GetObject, in addition to
* RequestRoute
, RequestToken
, StatusCode
,
* ErrorCode
, and ErrorMessage
. The GetObject
@@ -8167,7 +8164,7 @@ export class S3 extends S3Client {
*
Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to detect and redact * personally identifiable information (PII) and decompress S3 objects. These Lambda functions * are available in the Amazon Web Services Serverless Application Repository, and can be selected through the Amazon Web Services Management Console when you create your - * Object Lambda Access Point.
+ * Object Lambda access point. *Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically detects personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
*Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically redacts personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
*Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.
diff --git a/clients/client-s3/commands/CreateMultipartUploadCommand.ts b/clients/client-s3/commands/CreateMultipartUploadCommand.ts index b0bc68ba7394..d7b367edaf28 100644 --- a/clients/client-s3/commands/CreateMultipartUploadCommand.ts +++ b/clients/client-s3/commands/CreateMultipartUploadCommand.ts @@ -56,11 +56,10 @@ export interface CreateMultipartUploadCommandOutput extends CreateMultipartUploa * *You can optionally request server-side encryption. For server-side encryption, Amazon S3
* encrypts your data as it writes it to disks in its data centers and decrypts it when you
- * access it. You can provide your own encryption key, or use Amazon Web Services Key Management Service (Amazon Web Services
- * KMS) customer master keys (CMKs) or Amazon S3-managed encryption keys. If you choose to provide
+ * access it. You can provide your own encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed encryption keys. If you choose to provide
* your own encryption key, the request headers you provide in UploadPart and UploadPartCopy requests must match the headers you used in the request to
* initiate the upload by using CreateMultipartUpload
.
To perform a multipart upload with encryption using an Amazon Web Services KMS CMK, the requester must + *
To perform a multipart upload with encryption using an Amazon Web Services KMS key, the requester must
* have permission to the kms:Decrypt
and kms:GenerateDataKey*
* actions on the key. These permissions are required because Amazon S3 must decrypt and read data
* from the encrypted file parts before it completes the multipart upload. For more
@@ -68,7 +67,7 @@ export interface CreateMultipartUploadCommandOutput extends CreateMultipartUploa
* and permissions in the Amazon S3 User Guide.
If your Identity and Access Management (IAM) user or role is in the same Amazon Web Services account - * as the Amazon Web Services KMS CMK, then you must have these permissions on the key policy. If your IAM + * as the KMS key, then you must have these permissions on the key policy. If your IAM * user or role belongs to a different account than the key, then you must have the * permissions on both the key policy and your IAM user or role.
* @@ -109,7 +108,7 @@ export interface CreateMultipartUploadCommandOutput extends CreateMultipartUploa * encryption keys or provide your own encryption key. *Use encryption keys managed by Amazon S3 or customer master keys (CMKs) stored + *
Use encryption keys managed by Amazon S3 or customer managed key stored * in Amazon Web Services Key Management Service (Amazon Web Services KMS) – If you want Amazon Web Services to manage the keys * used to encrypt data, specify the following headers in the request.
*If you specify x-amz-server-side-encryption:aws:kms
, but
* don't provide x-amz-server-side-encryption-aws-kms-key-id
,
- * Amazon S3 uses the Amazon Web Services managed CMK in Amazon Web Services KMS to protect the data.
All GET and PUT requests for an object protected by Amazon Web Services KMS fail if * you don't make them with SSL or by using SigV4.
*For more information about server-side encryption with CMKs stored in Amazon Web Services - * KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon Web Services - * KMS.
+ *For more information about server-side encryption with KMS key (SSE-KMS), + * see Protecting Data Using Server-Side Encryption with KMS keys.
*Use customer-provided encryption keys – If you want to manage your own @@ -150,9 +148,8 @@ export interface CreateMultipartUploadCommandOutput extends CreateMultipartUploa *
x-amz-server-side-encryption-customer-key-MD5
*For more information about server-side encryption with CMKs stored in Amazon Web Services - * KMS (SSE-KMS), see Protecting Data Using Server-Side Encryption with CMKs stored in Amazon Web Services - * KMS.
+ *For more information about server-side encryption with KMS keys (SSE-KMS), + * see Protecting Data Using Server-Side Encryption with KMS keys.
*Deletes the S3 Intelligent-Tiering configuration from the specified bucket.
- *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
- *The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
- *If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
+ *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.
+ *The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
+ *For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
*Operations related to
* DeleteBucketIntelligentTieringConfiguration
include:
Gets the S3 Intelligent-Tiering configuration from the specified bucket.
- *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
- *The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
- *If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
+ *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.
+ *The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
+ *For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
*Operations related to
* GetBucketIntelligentTieringConfiguration
include:
Encryption request headers, like x-amz-server-side-encryption
, should not
- * be sent for GET requests if your object uses server-side encryption with CMKs stored in Amazon Web Services
- * KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your
+ * be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS)
+ * or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your
* object does use these types of keys, you’ll get an HTTP 400 BadRequest error.
If you encrypt an object by using server-side encryption with customer-provided * encryption keys (SSE-C) when you store the object in Amazon S3, then when you GET the object, diff --git a/clients/client-s3/commands/HeadObjectCommand.ts b/clients/client-s3/commands/HeadObjectCommand.ts index e2d52579c7ef..dd51c64c3b4d 100644 --- a/clients/client-s3/commands/HeadObjectCommand.ts +++ b/clients/client-s3/commands/HeadObjectCommand.ts @@ -53,8 +53,8 @@ export interface HeadObjectCommandOutput extends HeadObjectOutput, __MetadataBea *
Encryption request headers, like x-amz-server-side-encryption
, should
- * not be sent for GET requests if your object uses server-side encryption with CMKs stored
- * in Amazon Web Services KMS (SSE-KMS) or server-side encryption with Amazon S3–managed encryption keys
+ * not be sent for GET requests if your object uses server-side encryption with KMS keys (SSE-KMS)
+ * or server-side encryption with Amazon S3–managed encryption keys
* (SSE-S3). If your object does use these types of keys, you’ll get an HTTP 400 BadRequest
* error.
Lists the S3 Intelligent-Tiering configuration from the specified bucket.
- *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
- *The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
- *If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
+ *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.
+ *The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
+ *For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
*Operations related to
* ListBucketIntelligentTieringConfigurations
include:
This action uses the encryption
subresource to configure default
* encryption and Amazon S3 Bucket Key for an existing bucket.
Default encryption for a bucket can use server-side encryption with Amazon S3-managed keys - * (SSE-S3) or Amazon Web Services KMS customer master keys (SSE-KMS). If you specify default encryption + * (SSE-S3) or customer managed keys (SSE-KMS). If you specify default encryption * using SSE-KMS, you can also configure Amazon S3 Bucket Key. For information about default * encryption, see Amazon S3 default bucket encryption * in the Amazon S3 User Guide. For more information about S3 Bucket Keys, diff --git a/clients/client-s3/commands/PutBucketIntelligentTieringConfigurationCommand.ts b/clients/client-s3/commands/PutBucketIntelligentTieringConfigurationCommand.ts index 6140197540b0..d61dc80ccaa0 100644 --- a/clients/client-s3/commands/PutBucketIntelligentTieringConfigurationCommand.ts +++ b/clients/client-s3/commands/PutBucketIntelligentTieringConfigurationCommand.ts @@ -25,9 +25,9 @@ export interface PutBucketIntelligentTieringConfigurationCommandOutput extends _ /** *
Puts a S3 Intelligent-Tiering configuration to the specified bucket. * You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.
- *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without additional operational overhead. S3 Intelligent-Tiering delivers automatic cost savings by moving data between access tiers, when access patterns change.
- *The S3 Intelligent-Tiering storage class is suitable for objects larger than 128 KB that you plan to store for at least 30 days. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the frequent access tier rates in the S3 Intelligent-Tiering storage class.
- *If you delete an object before the end of the 30-day minimum storage duration period, you are charged for 30 days. For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
+ *The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.
+ *The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.
+ *For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.
*Operations related to
* PutBucketIntelligentTieringConfiguration
include:
- * PutBucketMetricsConfiguration + * GetBucketMetricsConfiguration *
*By default, Amazon S3 doesn't replicate objects that are stored at rest using server-side
- * encryption with CMKs stored in Amazon Web Services KMS. To replicate Amazon Web Services KMS-encrypted objects, add the
+ * encryption with KMS keys. To replicate Amazon Web Services KMS-encrypted objects, add the
* following: SourceSelectionCriteria
, SseKmsEncryptedObjects
,
* Status
, EncryptionConfiguration
, and
* ReplicaKmsKeyID
. For information about replication configuration, see
* Replicating Objects
- * Created with SSE Using CMKs stored in Amazon Web Services KMS.
For information on PutBucketReplication
errors, see List of
* replication-related error codes
diff --git a/clients/client-s3/commands/SelectObjectContentCommand.ts b/clients/client-s3/commands/SelectObjectContentCommand.ts
index c59bea2e7c67..f21f2cd7ff5b 100644
--- a/clients/client-s3/commands/SelectObjectContentCommand.ts
+++ b/clients/client-s3/commands/SelectObjectContentCommand.ts
@@ -77,7 +77,7 @@ export interface SelectObjectContentCommandOutput extends SelectObjectContentOut
* (Using Customer-Provided Encryption Keys) in the
* Amazon S3 User Guide.
For objects that are encrypted with Amazon S3 managed encryption keys (SSE-S3) and
- * customer master keys (CMKs) stored in Amazon Web Services Key Management Service (SSE-KMS),
+ * Amazon Web Services KMS keys (SSE-KMS),
* server-side encryption is handled transparently, so you don't need to specify
* anything. For more information about server-side encryption, including SSE-S3 and
* SSE-KMS, see Protecting Data Using
diff --git a/clients/client-s3/commands/WriteGetObjectResponseCommand.ts b/clients/client-s3/commands/WriteGetObjectResponseCommand.ts
index 0b40c5a7f9e7..f6a35d7ea643 100644
--- a/clients/client-s3/commands/WriteGetObjectResponseCommand.ts
+++ b/clients/client-s3/commands/WriteGetObjectResponseCommand.ts
@@ -32,9 +32,9 @@ export interface WriteGetObjectResponseCommandOutput extends __MetadataBearer {}
/**
* Passes transformed
- * objects to a GetObject
operation when using Object Lambda Access Points. For information about
- * Object Lambda Access Points, see Transforming objects with
- * Object Lambda Access Points in the Amazon S3 User Guide.GetObject
operation when using Object Lambda access points. For information about
+ * Object Lambda access points, see Transforming objects with
+ * Object Lambda access points in the Amazon S3 User Guide.
This operation supports metadata that can be returned by GetObject, in addition to
* RequestRoute
, RequestToken
, StatusCode
,
* ErrorCode
, and ErrorMessage
. The GetObject
@@ -49,7 +49,7 @@ export interface WriteGetObjectResponseCommandOutput extends __MetadataBearer {}
*
Amazon Web Services provides some prebuilt Lambda functions that you can use with S3 Object Lambda to detect and redact * personally identifiable information (PII) and decompress S3 objects. These Lambda functions * are available in the Amazon Web Services Serverless Application Repository, and can be selected through the Amazon Web Services Management Console when you create your - * Object Lambda Access Point.
+ * Object Lambda access point. *Example 1: PII Access Control - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically detects personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
*Example 2: PII Redaction - This Lambda function uses Amazon Comprehend, a natural language processing (NLP) service using machine learning to find insights and relationships in text. It automatically redacts personally identifiable information (PII) such as names, addresses, dates, credit card numbers, and social security numbers from documents in your Amazon S3 bucket.
*Example 3: Decompression - The Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in one of six compressed file formats including bzip2, gzip, snappy, zlib, zstandard and ZIP.
diff --git a/clients/client-s3/models/models_0.ts b/clients/client-s3/models/models_0.ts index f3cb0fc6cd01..428ee59dee33 100644 --- a/clients/client-s3/models/models_0.ts +++ b/clients/client-s3/models/models_0.ts @@ -331,7 +331,7 @@ export interface CompleteMultipartUploadOutput { /** *If you specified server-side encryption either with an Amazon S3-managed encryption key or an - * Amazon Web Services KMS customer master key (CMK) in your initiate multipart upload request, the response + * Amazon Web Services KMS key in your initiate multipart upload request, the response * includes this header. It confirms the encryption algorithm that Amazon S3 used to encrypt the * object.
*/ @@ -345,7 +345,7 @@ export interface CompleteMultipartUploadOutput { /** *If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric - * customer managed customer master key (CMK) that was used for the object.
+ * customer managed key that was used for the object. */ SSEKMSKeyId?: string; @@ -527,7 +527,7 @@ export interface CopyObjectOutput { /** *If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric - * customer managed customer master key (CMK) that was used for the object.
+ * customer managed key that was used for the object. */ SSEKMSKeyId?: string; @@ -1004,6 +1004,11 @@ export interface CreateBucketRequest { */ Bucket: string | undefined; + /** + *The configuration information for the bucket.
+ */ + CreateBucketConfiguration?: CreateBucketConfiguration; + /** *Allows grantee the read, write, read ACP, and write ACP permissions on the * bucket.
@@ -1035,11 +1040,6 @@ export interface CreateBucketRequest { *Specifies whether you want S3 Object Lock to be enabled for the new bucket.
*/ ObjectLockEnabledForBucket?: boolean; - - /** - *The configuration information for the bucket.
- */ - CreateBucketConfiguration?: CreateBucketConfiguration; } export namespace CreateBucketRequest { @@ -1109,7 +1109,7 @@ export interface CreateMultipartUploadOutput { /** *If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric - * customer managed customer master key (CMK) that was used for the object.
+ * customer managed key that was used for the object. */ SSEKMSKeyId?: string; @@ -1270,7 +1270,7 @@ export interface CreateMultipartUploadRequest { SSECustomerKeyMD5?: string; /** - *Specifies the ID of the symmetric customer managed Amazon Web Services KMS CMK to use for object + *
Specifies the ID of the symmetric customer managed key to use for object * encryption. All GET and PUT requests for an object protected by Amazon Web Services KMS will fail if not * made via SSL or using SigV4. For information about configuring using any of the officially * supported Amazon Web Services SDKs and Amazon Web Services CLI, see Specifying the Signature Version in Request Authentication @@ -3740,6 +3740,11 @@ export interface DeleteObjectsRequest { */ Bucket: string | undefined; + /** + *
Container for the request.
+ */ + Delete: Delete | undefined; + /** *The concatenation of the authentication device's serial number, a space, and the value * that is displayed on your authentication device. Required to permanently delete a versioned @@ -3766,11 +3771,6 @@ export interface DeleteObjectsRequest { *
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for the request.
- */ - Delete: Delete | undefined; } export namespace DeleteObjectsRequest { @@ -4660,8 +4660,8 @@ export namespace GetBucketIntelligentTieringConfigurationRequest { */ export interface SSEKMS { /** - *Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed - * customer master key (CMK) to use for encrypting inventory reports.
+ *Specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed key + * to use for encrypting inventory reports.
*/ KeyId: string | undefined; } @@ -5464,6 +5464,11 @@ export interface MetricsAndOperator { *The list of tags used when evaluating an AND predicate.
*/ Tags?: Tag[]; + + /** + *The access point ARN used when evaluating an AND predicate.
+ */ + AccessPointArn?: string; } export namespace MetricsAndOperator { @@ -5477,10 +5482,11 @@ export namespace MetricsAndOperator { /** *Specifies a metrics configuration filter. The metrics configuration only includes - * objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction - * (MetricsAndOperator).
+ * objects that meet the filter's criteria. A filter must be a prefix, an object tag, an access point ARN, or a conjunction + * (MetricsAndOperator). For more information, see PutBucketMetricsConfiguration. */ export type MetricsFilter = + | MetricsFilter.AccessPointArnMember | MetricsFilter.AndMember | MetricsFilter.PrefixMember | MetricsFilter.TagMember @@ -5493,6 +5499,7 @@ export namespace MetricsFilter { export interface PrefixMember { Prefix: string; Tag?: never; + AccessPointArn?: never; And?: never; $unknown?: never; } @@ -5503,6 +5510,18 @@ export namespace MetricsFilter { export interface TagMember { Prefix?: never; Tag: Tag; + AccessPointArn?: never; + And?: never; + $unknown?: never; + } + + /** + *The access point ARN used when evaluating a metrics filter.
+ */ + export interface AccessPointArnMember { + Prefix?: never; + Tag?: never; + AccessPointArn: string; And?: never; $unknown?: never; } @@ -5515,6 +5534,7 @@ export namespace MetricsFilter { export interface AndMember { Prefix?: never; Tag?: never; + AccessPointArn?: never; And: MetricsAndOperator; $unknown?: never; } @@ -5522,6 +5542,7 @@ export namespace MetricsFilter { export interface $UnknownMember { Prefix?: never; Tag?: never; + AccessPointArn?: never; And?: never; $unknown: [string, any]; } @@ -5529,6 +5550,7 @@ export namespace MetricsFilter { export interface VisitorSpecifies a metrics configuration filter. The metrics configuration will only include - * objects that meet the filter's criteria. A filter must be a prefix, a tag, or a conjunction + * objects that meet the filter's criteria. A filter must be a prefix, an object tag, an access point ARN, or a conjunction * (MetricsAndOperator).
*/ Filter?: MetricsFilter; @@ -6481,7 +6504,7 @@ export namespace SseKmsEncryptedObjects { *A container that describes additional filters for identifying the source objects that * you want to replicate. You can choose to enable or disable the replication of these * objects. Currently, Amazon S3 supports only the filter that you can specify for objects created - * with server-side encryption using a customer master key (CMK) stored in Amazon Web Services Key Management + * with server-side encryption using a customer managed key stored in Amazon Web Services Key Management * Service (SSE-KMS).
*/ export interface SourceSelectionCriteria { @@ -6567,7 +6590,7 @@ export interface ReplicationRule { *A container that describes additional filters for identifying the source objects that * you want to replicate. You can choose to enable or disable the replication of these * objects. Currently, Amazon S3 supports only the filter that you can specify for objects created - * with server-side encryption using a customer master key (CMK) stored in Amazon Web Services Key Management + * with server-side encryption using a customer managed key stored in Amazon Web Services Key Management * Service (SSE-KMS).
*/ SourceSelectionCriteria?: SourceSelectionCriteria; @@ -7201,7 +7224,7 @@ export interface GetObjectOutput { /** *If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric - * customer managed customer master key (CMK) that was used for the object.
+ * customer managed key that was used for the object. */ SSEKMSKeyId?: string; @@ -7269,6 +7292,7 @@ export interface GetObjectRequest { /** *The bucket name containing the object.
*When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
+ *When using an Object Lambda access point the hostname takes the form AccessPointName-AccountId.s3-object-lambda.Region.amazonaws.com.
*When using this action with Amazon S3 on Outposts, you must direct requests to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When using this action using S3 on Outposts through the Amazon Web Services SDKs, you provide the Outposts bucket ARN in place of the bucket name. For more information about S3 on Outposts ARNs, see Using S3 on Outposts in the Amazon S3 User Guide.
*/ Bucket: string | undefined; @@ -8171,8 +8195,8 @@ export interface HeadObjectOutput { WebsiteRedirectLocation?: string; /** - *If the object is stored using server-side encryption either with an Amazon Web Services KMS customer - * master key (CMK) or an Amazon S3-managed encryption key, the response includes this header with + *
If the object is stored using server-side encryption either with an Amazon Web Services KMS key or + * an Amazon S3-managed encryption key, the response includes this header with * the value of the server-side encryption algorithm used when storing this object in Amazon * S3 (for example, AES256, aws:kms).
*/ @@ -8198,7 +8222,7 @@ export interface HeadObjectOutput { /** *If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric - * customer managed customer master key (CMK) that was used for the object.
+ * customer managed key that was used for the object. */ SSEKMSKeyId?: string; @@ -9854,14 +9878,14 @@ export interface PutBucketAccelerateConfigurationRequest { Bucket: string | undefined; /** - *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for setting the transfer acceleration state.
*/ - ExpectedBucketOwner?: string; + AccelerateConfiguration: AccelerateConfiguration | undefined; /** - *Container for setting the transfer acceleration state.
+ *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Contains the elements that set the ACL permissions for an object per grantee.
+ */ + AccessControlPolicy?: AccessControlPolicy; + /** *The bucket to which to apply the ACL.
*/ @@ -9925,11 +9954,6 @@ export interface PutBucketAclRequest { *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Contains the elements that set the ACL permissions for an object per grantee.
- */ - AccessControlPolicy?: AccessControlPolicy; } export namespace PutBucketAclRequest { @@ -9953,14 +9977,14 @@ export interface PutBucketAnalyticsConfigurationRequest { Id: string | undefined; /** - *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The configuration and any analyses for the analytics filter.
*/ - ExpectedBucketOwner?: string; + AnalyticsConfiguration: AnalyticsConfiguration | undefined; /** - *The configuration and any analyses for the analytics filter.
+ *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more + * information, see Enabling Cross-Origin Resource + * Sharing in the Amazon S3 User Guide.
+ */ + CORSConfiguration: CORSConfiguration | undefined; + /** *The base64-encoded 128-bit MD5 digest of the data. This header must be used as a message * integrity check to verify that the request body was not corrupted in transit. For more @@ -10017,13 +10048,6 @@ export interface PutBucketCorsRequest { *
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Describes the cross-origin access configuration for objects in an Amazon S3 bucket. For more - * information, see Enabling Cross-Origin Resource - * Sharing in the Amazon S3 User Guide.
- */ - CORSConfiguration: CORSConfiguration | undefined; } export namespace PutBucketCorsRequest { @@ -10038,7 +10062,7 @@ export namespace PutBucketCorsRequest { export interface PutBucketEncryptionRequest { /** *Specifies default encryption for a bucket using server-side encryption with Amazon S3-managed - * keys (SSE-S3) or customer master keys stored in Amazon Web Services KMS (SSE-KMS). For information about + * keys (SSE-S3) or customer managed keys (SSE-KMS). For information about * the Amazon S3 default encryption feature, see Amazon S3 Default Bucket Encryption * in the Amazon S3 User Guide.
*/ @@ -10051,14 +10075,14 @@ export interface PutBucketEncryptionRequest { ContentMD5?: string; /** - *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Specifies the default server-side-encryption configuration.
*/ - ExpectedBucketOwner?: string; + ServerSideEncryptionConfiguration: ServerSideEncryptionConfiguration | undefined; /** - *Specifies the default server-side-encryption configuration.
+ *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Specifies the inventory configuration.
*/ - ExpectedBucketOwner?: string; + InventoryConfiguration: InventoryConfiguration | undefined; /** - *Specifies the inventory configuration.
+ *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for lifecycle rules. You can add as many as 1,000 rules.
*/ - ExpectedBucketOwner?: string; + LifecycleConfiguration?: BucketLifecycleConfiguration; /** - *Container for lifecycle rules. You can add as many as 1,000 rules.
+ *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for logging status information.
+ */ + BucketLoggingStatus: BucketLoggingStatus | undefined; + /** *The MD5 hash of the PutBucketLogging
request body.
For requests made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated automatically.
@@ -10223,11 +10252,6 @@ export interface PutBucketLoggingRequest { *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for logging status information.
- */ - BucketLoggingStatus: BucketLoggingStatus | undefined; } export namespace PutBucketLoggingRequest { @@ -10251,14 +10275,14 @@ export interface PutBucketMetricsConfigurationRequest { Id: string | undefined; /** - *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Specifies the metrics configuration.
*/ - ExpectedBucketOwner?: string; + MetricsConfiguration: MetricsConfiguration | undefined; /** - *Specifies the metrics configuration.
+ *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
A container for specifying the notification configuration of the bucket. If this element * is empty, notifications are turned off for the bucket.
*/ NotificationConfiguration: NotificationConfiguration | undefined; + + /** + *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
A container for replication rules. You can add up to 1,000 rules. The maximum size of a + * replication configuration is 2 MB.
+ */ + ReplicationConfiguration: ReplicationConfiguration | undefined; + /** *A token to allow Object Lock to be enabled for an existing bucket.
*/ @@ -10394,12 +10424,6 @@ export interface PutBucketReplicationRequest { *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
A container for replication rules. You can add up to 1,000 rules. The maximum size of a - * replication configuration is 2 MB.
- */ - ReplicationConfiguration: ReplicationConfiguration | undefined; } export namespace PutBucketReplicationRequest { @@ -10449,14 +10473,14 @@ export interface PutBucketRequestPaymentRequest { ContentMD5?: string; /** - *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for Payer.
*/ - ExpectedBucketOwner?: string; + RequestPaymentConfiguration: RequestPaymentConfiguration | undefined; /** - *Container for Payer.
+ *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for the TagSet
and Tag
elements.
Container for the TagSet
and Tag
elements.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for setting the versioning state.
*/ - ExpectedBucketOwner?: string; + VersioningConfiguration: VersioningConfiguration | undefined; /** - *Container for setting the versioning state.
+ *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for the request.
*/ - ExpectedBucketOwner?: string; + WebsiteConfiguration: WebsiteConfiguration | undefined; /** - *Container for the request.
+ *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
If you specified server-side encryption either with an Amazon Web Services KMS customer master key (CMK) + *
If you specified server-side encryption either with an Amazon Web Services KMS key * or Amazon S3-managed encryption key in your PUT request, the response includes this header. It * confirms the encryption algorithm that Amazon S3 used to encrypt the object.
*/ @@ -10703,7 +10727,7 @@ export interface PutObjectOutput { /** *If x-amz-server-side-encryption
is present and has the value of
* aws:kms
, this header specifies the ID of the Amazon Web Services Key Management Service
- * (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) that was used for the
+ * (Amazon Web Services KMS) symmetric customer managed key that was used for the
* object.
If x-amz-server-side-encryption
is present and has the value of
* aws:kms
, this header specifies the ID of the Amazon Web Services Key Management Service
- * (Amazon Web Services KMS) symmetrical customer managed customer master key (CMK) that was used for the
+ * (Amazon Web Services KMS) symmetrical customer managed key that was used for the
* object. If you specify x-amz-server-side-encryption:aws:kms
, but do not
* provide x-amz-server-side-encryption-aws-kms-key-id
, Amazon S3 uses the Amazon Web Services
- * managed CMK in Amazon Web Services to protect the data. If the KMS key does not exist in the same account
+ * managed key to protect the data. If the KMS key does not exist in the same account
* issuing the command, you must use the full ARN and not just the ID.
*
Contains the elements that set the ACL permissions for an object per grantee.
+ */ + AccessControlPolicy?: AccessControlPolicy; + /** *The bucket name that contains the object to which you want to attach the ACL.
*When using this action with an access point, you must direct requests to the access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this action with an access point through the Amazon Web Services SDKs, you provide the access point ARN in place of the bucket name. For more information about access point ARNs, see Using access points in the Amazon S3 User Guide.
@@ -11077,11 +11106,6 @@ export interface PutObjectAclRequest { *The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Contains the elements that set the ACL permissions for an object per grantee.
- */ - AccessControlPolicy?: AccessControlPolicy; } export namespace PutObjectAclRequest { @@ -11122,6 +11146,12 @@ export interface PutObjectLegalHoldRequest { */ Key: string | undefined; + /** + *Container element for the Legal Hold configuration you want to apply to the specified + * object.
+ */ + LegalHold?: ObjectLockLegalHold; + /** *Confirms that the requester knows that they will be charged for the request. Bucket * owners need not specify this parameter in their requests. For information about downloading @@ -11145,12 +11175,6 @@ export interface PutObjectLegalHoldRequest { *
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container element for the Legal Hold configuration you want to apply to the specified - * object.
- */ - LegalHold?: ObjectLockLegalHold; } export namespace PutObjectLegalHoldRequest { @@ -11185,6 +11209,11 @@ export interface PutObjectLockConfigurationRequest { */ Bucket: string | undefined; + /** + *The Object Lock configuration that you want to apply to the specified bucket.
+ */ + ObjectLockConfiguration?: ObjectLockConfiguration; + /** *Confirms that the requester knows that they will be charged for the request. Bucket * owners need not specify this parameter in their requests. For information about downloading @@ -11208,11 +11237,6 @@ export interface PutObjectLockConfigurationRequest { *
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The Object Lock configuration that you want to apply to the specified bucket.
- */ - ObjectLockConfiguration?: ObjectLockConfiguration; } export namespace PutObjectLockConfigurationRequest { @@ -11255,6 +11279,11 @@ export interface PutObjectRetentionRequest { */ Key: string | undefined; + /** + *The container element for the Object Retention configuration.
+ */ + Retention?: ObjectLockRetention; + /** *Confirms that the requester knows that they will be charged for the request. Bucket * owners need not specify this parameter in their requests. For information about downloading @@ -11284,11 +11313,6 @@ export interface PutObjectRetentionRequest { *
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The container element for the Object Retention configuration.
- */ - Retention?: ObjectLockRetention; } export namespace PutObjectRetentionRequest { @@ -11340,6 +11364,11 @@ export interface PutObjectTaggingRequest { */ ContentMD5?: string; + /** + *Container for the TagSet
and Tag
elements
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for the TagSet
and Tag
elements
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
The PublicAccessBlock
configuration that you want to apply to this Amazon S3
* bucket. You can enable the configuration options in any combination. For more information
* about when Amazon S3 considers a bucket or object public, see The Meaning of "Public" in the Amazon S3 User Guide.
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
If the encryption type is aws:kms
, this optional value specifies the ID of
- * the symmetric customer managed Amazon Web Services KMS CMK to use for encryption of job results. Amazon S3 only
- * supports symmetric CMKs. For more information, see Using symmetric and
+ * the symmetric customer managed key to use for encryption of job results. Amazon S3 only
+ * supports symmetric keys. For more information, see Using symmetric and
* asymmetric keys in the Amazon Web Services Key Management Service Developer Guide.
Container for restore job parameters.
+ */ + RestoreRequest?: RestoreRequest; + /** *Confirms that the requester knows that they will be charged for the request. Bucket * owners need not specify this parameter in their requests. For information about downloading @@ -539,11 +544,6 @@ export interface RestoreObjectRequest { *
The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied)
error.
Container for restore job parameters.
- */ - RestoreRequest?: RestoreRequest; } export namespace RestoreObjectRequest { @@ -1019,7 +1019,7 @@ export interface UploadPartOutput { /** *If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric - * customer managed customer master key (CMK) was used for the object.
+ * customer managed key was used for the object. */ SSEKMSKeyId?: string; @@ -1190,7 +1190,7 @@ export interface UploadPartCopyOutput { /** *If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric - * customer managed customer master key (CMK) that was used for the object.
+ * customer managed key that was used for the object. */ SSEKMSKeyId?: string; @@ -1614,7 +1614,7 @@ export interface WriteGetObjectResponseRequest { SSECustomerAlgorithm?: string; /** - *If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed customer master key (CMK) that was used for stored in Amazon S3 object.
+ *If present, specifies the ID of the Amazon Web Services Key Management Service (Amazon Web Services KMS) symmetric customer managed key that was used for stored in Amazon S3 object.
*/ SSEKMSKeyId?: string; diff --git a/clients/client-s3/protocols/Aws_restXml.ts b/clients/client-s3/protocols/Aws_restXml.ts index 24d397769ada..b141c3095fc9 100644 --- a/clients/client-s3/protocols/Aws_restXml.ts +++ b/clients/client-s3/protocols/Aws_restXml.ts @@ -11454,6 +11454,12 @@ const serializeAws_restXmlMetricsAndOperator = (input: MetricsAndOperator, conte bodyNode.addChildNode(node); }); } + if (input.AccessPointArn !== undefined && input.AccessPointArn !== null) { + const node = new __XmlNode("AccessPointArn") + .addChildNode(new __XmlText(input.AccessPointArn)) + .withName("AccessPointArn"); + bodyNode.addChildNode(node); + } return bodyNode; }; @@ -11481,6 +11487,10 @@ const serializeAws_restXmlMetricsFilter = (input: MetricsFilter, context: __Serd const node = serializeAws_restXmlTag(value, context).withName("Tag"); bodyNode.addChildNode(node); }, + AccessPointArn: (value) => { + const node = new __XmlNode("AccessPointArn").addChildNode(new __XmlText(value)).withName("AccessPointArn"); + bodyNode.addChildNode(node); + }, And: (value) => { const node = serializeAws_restXmlMetricsAndOperator(value, context).withName("And"); bodyNode.addChildNode(node); @@ -13637,6 +13647,7 @@ const deserializeAws_restXmlMetricsAndOperator = (output: any, context: __SerdeC let contents: any = { Prefix: undefined, Tags: undefined, + AccessPointArn: undefined, }; if (output["Prefix"] !== undefined) { contents.Prefix = __expectString(output["Prefix"]); @@ -13647,6 +13658,9 @@ const deserializeAws_restXmlMetricsAndOperator = (output: any, context: __SerdeC if (output["Tag"] !== undefined) { contents.Tags = deserializeAws_restXmlTagSet(__getArrayIfSingleItem(output["Tag"]), context); } + if (output["AccessPointArn"] !== undefined) { + contents.AccessPointArn = __expectString(output["AccessPointArn"]); + } return contents; }; @@ -13689,6 +13703,11 @@ const deserializeAws_restXmlMetricsFilter = (output: any, context: __SerdeContex Tag: deserializeAws_restXmlTag(output["Tag"], context), }; } + if (output["AccessPointArn"] !== undefined) { + return { + AccessPointArn: __expectString(output["AccessPointArn"]) as any, + }; + } if (output["And"] !== undefined) { return { And: deserializeAws_restXmlMetricsAndOperator(output["And"], context), diff --git a/clients/client-sagemaker/SageMaker.ts b/clients/client-sagemaker/SageMaker.ts index 05e470c92653..1dd4cb534309 100644 --- a/clients/client-sagemaker/SageMaker.ts +++ b/clients/client-sagemaker/SageMaker.ts @@ -913,6 +913,11 @@ import { RenderUiTemplateCommandInput, RenderUiTemplateCommandOutput, } from "./commands/RenderUiTemplateCommand"; +import { + RetryPipelineExecutionCommand, + RetryPipelineExecutionCommandInput, + RetryPipelineExecutionCommandOutput, +} from "./commands/RetryPipelineExecutionCommand"; import { SearchCommand, SearchCommandInput, SearchCommandOutput } from "./commands/SearchCommand"; import { SendPipelineExecutionStepFailureCommand, @@ -1662,8 +1667,8 @@ export class SageMaker extends SageMakerClient { * domain. Each user receives a private home directory within the EFS volume for notebooks, * Git repositories, and data files. *SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with - * an Amazon Web Services managed customer master key (CMK) by default. For more control, you can specify a - * customer managed CMK. For more information, see + * an Amazon Web Services managed key by default. For more control, you can specify a + * customer managed key. For more information, see * Protect Data at * Rest Using Encryption.
* @@ -8122,6 +8127,38 @@ export class SageMaker extends SageMakerClient { } } + /** + *Retry the execution of the pipeline.
+ */ + public retryPipelineExecution( + args: RetryPipelineExecutionCommandInput, + options?: __HttpHandlerOptions + ): PromiseFinds Amazon SageMaker resources that match a search query. Matching resources are returned
* as a list of SearchRecord
objects in the response. You can sort the search
diff --git a/clients/client-sagemaker/SageMakerClient.ts b/clients/client-sagemaker/SageMakerClient.ts
index bc9f8ecc6282..848b4b9252ea 100644
--- a/clients/client-sagemaker/SageMakerClient.ts
+++ b/clients/client-sagemaker/SageMakerClient.ts
@@ -493,6 +493,10 @@ import {
} from "./commands/PutModelPackageGroupPolicyCommand";
import { RegisterDevicesCommandInput, RegisterDevicesCommandOutput } from "./commands/RegisterDevicesCommand";
import { RenderUiTemplateCommandInput, RenderUiTemplateCommandOutput } from "./commands/RenderUiTemplateCommand";
+import {
+ RetryPipelineExecutionCommandInput,
+ RetryPipelineExecutionCommandOutput,
+} from "./commands/RetryPipelineExecutionCommand";
import { SearchCommandInput, SearchCommandOutput } from "./commands/SearchCommand";
import {
SendPipelineExecutionStepFailureCommandInput,
@@ -839,6 +843,7 @@ export type ServiceInputTypes =
| PutModelPackageGroupPolicyCommandInput
| RegisterDevicesCommandInput
| RenderUiTemplateCommandInput
+ | RetryPipelineExecutionCommandInput
| SearchCommandInput
| SendPipelineExecutionStepFailureCommandInput
| SendPipelineExecutionStepSuccessCommandInput
@@ -1080,6 +1085,7 @@ export type ServiceOutputTypes =
| PutModelPackageGroupPolicyCommandOutput
| RegisterDevicesCommandOutput
| RenderUiTemplateCommandOutput
+ | RetryPipelineExecutionCommandOutput
| SearchCommandOutput
| SendPipelineExecutionStepFailureCommandOutput
| SendPipelineExecutionStepSuccessCommandOutput
diff --git a/clients/client-sagemaker/commands/CreateDomainCommand.ts b/clients/client-sagemaker/commands/CreateDomainCommand.ts
index 8ba9a54da3f0..a9df773fe2e2 100644
--- a/clients/client-sagemaker/commands/CreateDomainCommand.ts
+++ b/clients/client-sagemaker/commands/CreateDomainCommand.ts
@@ -33,8 +33,8 @@ export interface CreateDomainCommandOutput extends CreateDomainResponse, __Metad
* domain. Each user receives a private home directory within the EFS volume for notebooks,
* Git repositories, and data files.
SageMaker uses the Amazon Web Services Key Management Service (Amazon Web Services KMS) to encrypt the EFS volume attached to the domain with - * an Amazon Web Services managed customer master key (CMK) by default. For more control, you can specify a - * customer managed CMK. For more information, see + * an Amazon Web Services managed key by default. For more control, you can specify a + * customer managed key. For more information, see * Protect Data at * Rest Using Encryption.
* diff --git a/clients/client-sagemaker/commands/RetryPipelineExecutionCommand.ts b/clients/client-sagemaker/commands/RetryPipelineExecutionCommand.ts new file mode 100644 index 000000000000..d491e0509219 --- /dev/null +++ b/clients/client-sagemaker/commands/RetryPipelineExecutionCommand.ts @@ -0,0 +1,94 @@ +import { SageMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../SageMakerClient"; +import { RetryPipelineExecutionRequest, RetryPipelineExecutionResponse } from "../models/models_2"; +import { + deserializeAws_json1_1RetryPipelineExecutionCommand, + serializeAws_json1_1RetryPipelineExecutionCommand, +} from "../protocols/Aws_json1_1"; +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + MiddlewareStack, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +export interface RetryPipelineExecutionCommandInput extends RetryPipelineExecutionRequest {} +export interface RetryPipelineExecutionCommandOutput extends RetryPipelineExecutionResponse, __MetadataBearer {} + +/** + *Retry the execution of the pipeline.
+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { SageMakerClient, RetryPipelineExecutionCommand } from "@aws-sdk/client-sagemaker"; // ES Modules import + * // const { SageMakerClient, RetryPipelineExecutionCommand } = require("@aws-sdk/client-sagemaker"); // CommonJS import + * const client = new SageMakerClient(config); + * const command = new RetryPipelineExecutionCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link RetryPipelineExecutionCommandInput} for command's `input` shape. + * @see {@link RetryPipelineExecutionCommandOutput} for command's `response` shape. + * @see {@link SageMakerClientResolvedConfig | config} for command's `input` shape. + * + */ +export class RetryPipelineExecutionCommand extends $Command< + RetryPipelineExecutionCommandInput, + RetryPipelineExecutionCommandOutput, + SageMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: RetryPipelineExecutionCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStackIf you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must + *
If you use a KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must
* include permissions to call kms:Encrypt
. If you don't provide a KMS key ID,
* Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side
* encryption with KMS-managed keys for OutputDataConfig
. If you use a bucket
@@ -3608,8 +3608,8 @@ export namespace AssociateTrialComponentResponse {
}
/**
- *
Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ export interface UserContext { /** @@ -3684,8 +3684,8 @@ export interface AssociationSummary { CreationTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy?: UserContext; } @@ -6382,7 +6382,7 @@ export interface ResourceSpec { InstanceType?: AppInstanceType | string; /** - *The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the Resource.
+ *The Amazon Resource Name (ARN) of the Lifecycle Configuration attached to the Resource.
*/ LifecycleConfigArn?: string; } @@ -8424,7 +8424,7 @@ export interface KernelGatewayAppSettings { CustomImages?: CustomImage[]; /** - *The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the KernelGatewayApp.
+ *The Amazon Resource Name (ARN) of the Lifecycle Configurations attached to the the user profile or domain.
*/ LifecycleConfigArns?: string[]; } @@ -8620,7 +8620,7 @@ export interface CreateDomainRequest { /** *SageMaker uses Amazon Web Services KMS to encrypt the EFS volume attached to the domain with an Amazon Web Services managed - * customer master key (CMK) by default. For more control, specify a customer managed CMK.
+ * key by default. For more control, specify a customer managed key. */ KmsKeyId?: string; } @@ -8687,7 +8687,7 @@ export interface CreateEdgePackagingJobRequest { OutputConfig: EdgeOutputConfig | undefined; /** - *The CMK to use when encrypting the EBS volume the edge packaging job runs on.
+ *The Amazon Web Services KMS key to use when encrypting the EBS volume the edge packaging job runs on.
*/ ResourceKey?: string; @@ -8847,7 +8847,7 @@ export interface ProductionVariantCoreDumpConfig { *If you use a KMS key ID or an alias of your master key, the Amazon SageMaker execution role must + *
If you use a KMS key ID or an alias of your KMS key, the Amazon SageMaker execution role must
* include permissions to call kms:Encrypt
. If you don't provide a KMS key ID,
* Amazon SageMaker uses the default KMS key for Amazon S3 for your role's account. Amazon SageMaker uses server-side
* encryption with KMS-managed keys for OutputDataConfig
. If you use a bucket
@@ -13071,7 +13071,7 @@ export interface LabelingJobResourceConfig {
* automated data labeling.
You can only specify a VolumeKmsKeyId
when you create a labeling job with
* automated data labeling enabled using the API operation CreateLabelingJob
.
- * You cannot specify an Amazon Web Services KMS customer managed CMK to encrypt the storage volume used for
+ * You cannot specify an Amazon Web Services KMS key to encrypt the storage volume used for
* automated data labeling model training and inference when you create a labeling job
* using the console. To learn more, see Output Data and Storage Volume
* Encryption.
Details that you specify to provision a service catalog product. For information about - * service catalog, see .What is Amazon Web Services Service + * service catalog, see What is Amazon Web Services Service * Catalog.
*/ export interface ServiceCatalogProvisioningDetails { @@ -4971,8 +4971,8 @@ export interface DescribeActionResponse { CreationTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy?: UserContext; @@ -4982,8 +4982,8 @@ export interface DescribeActionResponse { LastModifiedTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ LastModifiedBy?: UserContext; @@ -5282,8 +5282,8 @@ export interface DescribeArtifactResponse { CreationTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy?: UserContext; @@ -5293,8 +5293,8 @@ export interface DescribeArtifactResponse { LastModifiedTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ LastModifiedBy?: UserContext; @@ -5781,8 +5781,8 @@ export interface DescribeContextResponse { CreationTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy?: UserContext; @@ -5792,8 +5792,8 @@ export interface DescribeContextResponse { LastModifiedTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ LastModifiedBy?: UserContext; } @@ -6203,7 +6203,7 @@ export interface DescribeDomainResponse { VpcId?: string; /** - *The Amazon Web Services KMS customer managed CMK used to encrypt + *
The Amazon Web Services KMS customer managed key used to encrypt * the EFS volume attached to the domain.
*/ KmsKeyId?: string; @@ -6320,7 +6320,7 @@ export interface DescribeEdgePackagingJobResponse { OutputConfig?: EdgeOutputConfig; /** - *The CMK to use when encrypting the EBS volume the job run on.
+ *The Amazon Web Services KMS key to use when encrypting the EBS volume the job run on.
*/ ResourceKey?: string; @@ -8266,8 +8266,8 @@ export interface DescribeModelPackageOutput { ModelApprovalStatus?: ModelApprovalStatus | string; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy?: UserContext; @@ -8287,8 +8287,8 @@ export interface DescribeModelPackageOutput { LastModifiedTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ LastModifiedBy?: UserContext; @@ -8354,8 +8354,8 @@ export interface DescribeModelPackageGroupOutput { CreationTime: Date | undefined; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy: UserContext | undefined; @@ -8944,14 +8944,14 @@ export interface DescribePipelineResponse { LastRunTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy?: UserContext; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ LastModifiedBy?: UserContext; } @@ -9097,14 +9097,14 @@ export interface DescribePipelineExecutionResponse { LastModifiedTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy?: UserContext; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ LastModifiedBy?: UserContext; } @@ -9377,8 +9377,8 @@ export interface DescribeProjectOutput { ProjectStatus: ProjectStatus | string | undefined; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy?: UserContext; diff --git a/clients/client-sagemaker/models/models_2.ts b/clients/client-sagemaker/models/models_2.ts index b1076dff2834..a9ac00a4b13c 100644 --- a/clients/client-sagemaker/models/models_2.ts +++ b/clients/client-sagemaker/models/models_2.ts @@ -112,6 +112,8 @@ import { ProfilingStatus, ProjectStatus, ScheduleStatus, + ServiceCatalogProvisionedProductDetails, + ServiceCatalogProvisioningDetails, SourceAlgorithmSpecification, SourceIpConfig, StudioLifecycleConfigAppType, @@ -1207,7 +1209,7 @@ export interface DescribeTrialComponentResponse { CreationTime?: Date; /** - *Who created the component.
+ *Who created the trial component.
*/ CreatedBy?: UserContext; @@ -2370,8 +2372,7 @@ export interface Experiment { CreationTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Who created the experiment.
*/ CreatedBy?: UserContext; @@ -2381,8 +2382,8 @@ export interface Experiment { LastModifiedTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ LastModifiedBy?: UserContext; @@ -3021,6 +3022,7 @@ export enum ResourceType { MODEL_PACKAGE_GROUP = "ModelPackageGroup", PIPELINE = "Pipeline", PIPELINE_EXECUTION = "PipelineExecution", + PROJECT = "Project", TRAINING_JOB = "TrainingJob", } @@ -6315,8 +6317,8 @@ export interface ListModelsInput { MaxResults?: number; /** - *A string in the training job name. This filter returns only models in the training - * job whose name contains the specified string.
+ *A string in the model name. This filter returns only models whose + * name contains the specified string.
*/ NameContains?: string; @@ -8624,7 +8626,7 @@ export interface TrialComponentSummary { CreationTime?: Date; /** - *Who created the component.
+ *Who created the trial component.
*/ CreatedBy?: UserContext; @@ -9140,8 +9142,8 @@ export interface ModelPackage { ModelApprovalStatus?: ModelApprovalStatus | string; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy?: UserContext; @@ -9161,8 +9163,8 @@ export interface ModelPackage { LastModifiedTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ LastModifiedBy?: UserContext; @@ -9212,8 +9214,8 @@ export interface ModelPackageGroup { CreationTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy?: UserContext; @@ -9386,14 +9388,14 @@ export interface Pipeline { LastRunTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy?: UserContext; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ LastModifiedBy?: UserContext; @@ -9462,14 +9464,14 @@ export interface PipelineExecution { LastModifiedTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy?: UserContext; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ LastModifiedBy?: UserContext; @@ -9681,6 +9683,77 @@ export namespace ProfilerConfigForUpdate { }); } +/** + *The properties of a project as returned by the Search API.
+ */ +export interface Project { + /** + *The Amazon Resource Name (ARN) of the project.
+ */ + ProjectArn?: string; + + /** + *The name of the project.
+ */ + ProjectName?: string; + + /** + *The ID of the project.
+ */ + ProjectId?: string; + + /** + *The description of the project.
+ */ + ProjectDescription?: string; + + /** + *Details that you specify to provision a service catalog product. For information about + * service catalog, see What is Amazon Web Services Service + * Catalog.
+ */ + ServiceCatalogProvisioningDetails?: ServiceCatalogProvisioningDetails; + + /** + *Details of a provisioned service catalog product. For information about service catalog, + * see What is Amazon Web Services Service + * Catalog.
+ */ + ServiceCatalogProvisionedProductDetails?: ServiceCatalogProvisionedProductDetails; + + /** + *The status of the project.
+ */ + ProjectStatus?: ProjectStatus | string; + + /** + *Who created the project.
+ */ + CreatedBy?: UserContext; + + /** + *A timestamp specifying when the project was created.
+ */ + CreationTime?: Date; + + /** + *An array of key-value pairs. You can use tags to categorize your Amazon Web Services resources in + * different ways, for example, by purpose, owner, or environment. For more information, + * see Tagging Amazon Web Services + * Resources.
+ */ + Tags?: Tag[]; +} + +export namespace Project { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Project): any => ({ + ...obj, + }); +} + export interface PutModelPackageGroupPolicyInput { /** *The name of the model group to add a resource policy to.
@@ -9848,6 +9921,44 @@ export namespace RenderUiTemplateResponse { }); } +export interface RetryPipelineExecutionRequest { + /** + *The Amazon Resource Name (ARN) of the pipeline execution.
+ */ + PipelineExecutionArn: string | undefined; + + /** + *A unique, case-sensitive identifier that you provide to ensure the idempotency of the + * operation. An idempotent operation completes no more than once.
+ */ + ClientRequestToken?: string; +} + +export namespace RetryPipelineExecutionRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RetryPipelineExecutionRequest): any => ({ + ...obj, + }); +} + +export interface RetryPipelineExecutionResponse { + /** + *The Amazon Resource Name (ARN) of the pipeline execution.
+ */ + PipelineExecutionArn?: string; +} + +export namespace RetryPipelineExecutionResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RetryPipelineExecutionResponse): any => ({ + ...obj, + }); +} + export enum SearchSortOrder { ASCENDING = "Ascending", DESCENDING = "Descending", @@ -10255,8 +10366,8 @@ export interface TrialComponentSimpleSummary { CreationTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ CreatedBy?: UserContext; } @@ -10306,8 +10417,7 @@ export interface Trial { CreationTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Who created the trial.
*/ CreatedBy?: UserContext; @@ -10317,8 +10427,8 @@ export interface Trial { LastModifiedTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ LastModifiedBy?: UserContext; @@ -10620,8 +10730,7 @@ export interface TrialComponent { CreationTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Who created the trial component.
*/ CreatedBy?: UserContext; @@ -10631,8 +10740,8 @@ export interface TrialComponent { LastModifiedTime?: Date; /** - *Information about the user who created or modified an experiment, trial, or trial - * component.
+ *Information about the user who created or modified an experiment, trial, trial + * component, or project.
*/ LastModifiedBy?: UserContext; @@ -10752,6 +10861,11 @@ export interface SearchRecord { * In principle, a Feature Group is composed of features and values per features. */ FeatureGroup?: FeatureGroup; + + /** + *The properties of a project.
+ */ + Project?: Project; } export namespace SearchRecord { @@ -10929,7 +11043,7 @@ export interface StartPipelineExecutionRequest { /** *A unique, case-sensitive identifier that you provide to ensure the idempotency of the - * operation. An idempotent operation completes no more than one time.
+ * operation. An idempotent operation completes no more than once. */ ClientRequestToken?: string; } @@ -11079,7 +11193,7 @@ export interface StopPipelineExecutionRequest { /** *A unique, case-sensitive identifier that you provide to ensure the idempotency of the - * operation. An idempotent operation completes no more than one time.
+ * operation. An idempotent operation completes no more than once. */ ClientRequestToken?: string; } @@ -11245,78 +11359,3 @@ export namespace UpdateAppImageConfigResponse { ...obj, }); } - -export interface UpdateArtifactRequest { - /** - *The Amazon Resource Name (ARN) of the artifact to update.
- */ - ArtifactArn: string | undefined; - - /** - *The new name for the artifact.
- */ - ArtifactName?: string; - - /** - *The new list of properties. Overwrites the current property list.
- */ - Properties?: { [key: string]: string }; - - /** - *A list of properties to remove.
- */ - PropertiesToRemove?: string[]; -} - -export namespace UpdateArtifactRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: UpdateArtifactRequest): any => ({ - ...obj, - }); -} - -export interface UpdateArtifactResponse { - /** - *The Amazon Resource Name (ARN) of the artifact.
- */ - ArtifactArn?: string; -} - -export namespace UpdateArtifactResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: UpdateArtifactResponse): any => ({ - ...obj, - }); -} - -export interface UpdateCodeRepositoryInput { - /** - *The name of the Git repository to update.
- */ - CodeRepositoryName: string | undefined; - - /** - *The configuration of the git repository, including the URL and the Amazon Resource
- * Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the credentials used to
- * access the repository. The secret must have a staging label of AWSCURRENT
- * and must be in the following format:
- * {"username": UserName, "password":
- * Password}
- *
The Amazon Resource Name (ARN) of the artifact to update.
+ */ + ArtifactArn: string | undefined; + + /** + *The new name for the artifact.
+ */ + ArtifactName?: string; + + /** + *The new list of properties. Overwrites the current property list.
+ */ + Properties?: { [key: string]: string }; + + /** + *A list of properties to remove.
+ */ + PropertiesToRemove?: string[]; +} + +export namespace UpdateArtifactRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateArtifactRequest): any => ({ + ...obj, + }); +} + +export interface UpdateArtifactResponse { + /** + *The Amazon Resource Name (ARN) of the artifact.
+ */ + ArtifactArn?: string; +} + +export namespace UpdateArtifactResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateArtifactResponse): any => ({ + ...obj, + }); +} + +export interface UpdateCodeRepositoryInput { + /** + *The name of the Git repository to update.
+ */ + CodeRepositoryName: string | undefined; + + /** + *The configuration of the git repository, including the URL and the Amazon Resource
+ * Name (ARN) of the Amazon Web Services Secrets Manager secret that contains the credentials used to
+ * access the repository. The secret must have a staging label of AWSCURRENT
+ * and must be in the following format:
+ * {"username": UserName, "password":
+ * Password}
+ *
The ARN of the Git repository.
diff --git a/clients/client-sagemaker/protocols/Aws_json1_1.ts b/clients/client-sagemaker/protocols/Aws_json1_1.ts index 1b201df66395..bfbb77692346 100644 --- a/clients/client-sagemaker/protocols/Aws_json1_1.ts +++ b/clients/client-sagemaker/protocols/Aws_json1_1.ts @@ -493,6 +493,10 @@ import { } from "../commands/PutModelPackageGroupPolicyCommand"; import { RegisterDevicesCommandInput, RegisterDevicesCommandOutput } from "../commands/RegisterDevicesCommand"; import { RenderUiTemplateCommandInput, RenderUiTemplateCommandOutput } from "../commands/RenderUiTemplateCommand"; +import { + RetryPipelineExecutionCommandInput, + RetryPipelineExecutionCommandOutput, +} from "../commands/RetryPipelineExecutionCommand"; import { SearchCommandInput, SearchCommandOutput } from "../commands/SearchCommand"; import { SendPipelineExecutionStepFailureCommandInput, @@ -1270,6 +1274,7 @@ import { ProcessingJobStepMetadata, ProcessingJobSummary, ProfilerConfigForUpdate, + Project, ProjectSummary, PropertyNameQuery, PropertyNameSuggestion, @@ -1281,6 +1286,8 @@ import { RenderUiTemplateResponse, RenderableTask, RenderingError, + RetryPipelineExecutionRequest, + RetryPipelineExecutionResponse, SearchRecord, SearchResponse, SecondaryStatusTransition, @@ -1326,9 +1333,6 @@ import { UpdateActionResponse, UpdateAppImageConfigRequest, UpdateAppImageConfigResponse, - UpdateArtifactRequest, - UpdateArtifactResponse, - UpdateCodeRepositoryInput, UserProfileDetails, Workforce, Workteam, @@ -1336,6 +1340,9 @@ import { import { SearchExpression, SearchRequest, + UpdateArtifactRequest, + UpdateArtifactResponse, + UpdateCodeRepositoryInput, UpdateCodeRepositoryOutput, UpdateContextRequest, UpdateContextResponse, @@ -3976,6 +3983,19 @@ export const serializeAws_json1_1RenderUiTemplateCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1RetryPipelineExecutionCommand = async ( + input: RetryPipelineExecutionCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "SageMaker.RetryPipelineExecution", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1RetryPipelineExecutionRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1SearchCommand = async ( input: SearchCommandInput, context: __SerdeContext @@ -14816,6 +14836,76 @@ const deserializeAws_json1_1RenderUiTemplateCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1RetryPipelineExecutionCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): PromiseStarts a bidirectional HTTP2 stream where audio is streamed to Amazon Transcribe and the transcription + *
Starts a bidirectional HTTP/2 stream where audio is streamed to Amazon Transcribe and the transcription * results are streamed to your application.
- *The following are encoded as HTTP2 headers:
+ *The following are encoded as HTTP/2 headers:
*x-amzn-transcribe-language-code
@@ -66,6 +66,7 @@ export class TranscribeStreaming extends TranscribeStreamingClient { *x-amzn-transcribe-session-id
*See the SDK for Go API Reference for more detail.
*/ public startStreamTranscription( args: StartStreamTranscriptionCommandInput, diff --git a/clients/client-transcribe-streaming/commands/StartStreamTranscriptionCommand.ts b/clients/client-transcribe-streaming/commands/StartStreamTranscriptionCommand.ts index d9af7ef9ca60..6cc8da227e99 100644 --- a/clients/client-transcribe-streaming/commands/StartStreamTranscriptionCommand.ts +++ b/clients/client-transcribe-streaming/commands/StartStreamTranscriptionCommand.ts @@ -27,9 +27,9 @@ export interface StartStreamTranscriptionCommandInput extends StartStreamTranscr export interface StartStreamTranscriptionCommandOutput extends StartStreamTranscriptionResponse, __MetadataBearer {} /** - *Starts a bidirectional HTTP2 stream where audio is streamed to Amazon Transcribe and the transcription + *
Starts a bidirectional HTTP/2 stream where audio is streamed to Amazon Transcribe and the transcription * results are streamed to your application.
- *The following are encoded as HTTP2 headers:
+ *The following are encoded as HTTP/2 headers:
*x-amzn-transcribe-language-code
@@ -44,6 +44,7 @@ export interface StartStreamTranscriptionCommandOutput extends StartStreamTransc *x-amzn-transcribe-session-id
*See the SDK for Go API Reference for more detail.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-transcribe-streaming/endpoints.ts b/clients/client-transcribe-streaming/endpoints.ts index 59e2a04cbdfc..66dd9a7919e2 100644 --- a/clients/client-transcribe-streaming/endpoints.ts +++ b/clients/client-transcribe-streaming/endpoints.ts @@ -1,7 +1,24 @@ import { PartitionHash, RegionHash, getRegionInfo } from "@aws-sdk/config-resolver"; import { RegionInfoProvider } from "@aws-sdk/types"; -const regionHash: RegionHash = {}; +const regionHash: RegionHash = { + "transcribestreaming-fips-ca-central-1": { + hostname: "transcribestreaming-fips.ca-central-1.amazonaws.com", + signingRegion: "ca-central-1", + }, + "transcribestreaming-fips-us-east-1": { + hostname: "transcribestreaming-fips.us-east-1.amazonaws.com", + signingRegion: "us-east-1", + }, + "transcribestreaming-fips-us-east-2": { + hostname: "transcribestreaming-fips.us-east-2.amazonaws.com", + signingRegion: "us-east-2", + }, + "transcribestreaming-fips-us-west-2": { + hostname: "transcribestreaming-fips.us-west-2.amazonaws.com", + signingRegion: "us-west-2", + }, +}; const partitionHash: PartitionHash = { aws: { @@ -23,6 +40,10 @@ const partitionHash: PartitionHash = { "eu-west-3", "me-south-1", "sa-east-1", + "transcribestreaming-fips-ca-central-1", + "transcribestreaming-fips-us-east-1", + "transcribestreaming-fips-us-east-2", + "transcribestreaming-fips-us-west-2", "us-east-1", "us-east-2", "us-west-1", diff --git a/clients/client-transcribe-streaming/models/models_0.ts b/clients/client-transcribe-streaming/models/models_0.ts index 49fc224aef8d..f8980b6214c4 100644 --- a/clients/client-transcribe-streaming/models/models_0.ts +++ b/clients/client-transcribe-streaming/models/models_0.ts @@ -1,5 +1,49 @@ import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; +/** + *The entity identified as personally identifiable information (PII).
+ */ +export interface Entity { + /** + *The start time of speech that was identified as PII.
+ */ + StartTime?: number; + + /** + *The end time of speech that was identified as PII.
+ */ + EndTime?: number; + + /** + *The category of of information identified in this entity; for example, PII.
+ */ + Category?: string; + + /** + *The type of PII identified in this entity; for example, name or credit card number.
+ */ + Type?: string; + + /** + *The words in the transcription output that have been identified as a PII entity.
+ */ + Content?: string; + + /** + *A value between zero and one that Amazon Transcribe assigns to PII identified in the source audio. Larger values indicate a higher confidence in PII identification.
+ */ + Confidence?: number; +} + +export namespace Entity { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Entity): any => ({ + ...obj, + }); +} + export enum ItemType { PRONUNCIATION = "pronunciation", PUNCTUATION = "punctuation", @@ -81,6 +125,11 @@ export interface Alternative { *One or more alternative interpretations of the input audio.
*/ Items?: Item[]; + + /** + *Contains the entities identified as personally identifiable information (PII) in the transcription output.
+ */ + Entities?: Entity[]; } export namespace Alternative { @@ -94,8 +143,10 @@ export namespace Alternative { /** *Provides a wrapper for the audio chunks that you are sending.
- *For information on audio encoding in Amazon Transcribe, see input. For information - * on audio encoding formats in Amazon Transcribe Medical, see input-med.
+ *For information on audio encoding in Amazon Transcribe, see + * Speech input. For information + * on audio encoding formats in Amazon Transcribe Medical, see + * Speech input.
*/ export interface AudioEvent { /** @@ -123,10 +174,10 @@ export namespace AudioStream { /** *A blob of audio from your application. You audio stream consists of one or more audio * events.
- *For information on audio encoding formats in Amazon Transcribe, see input. For - * information on audio encoding formats in Amazon Transcribe Medical, see input-med.
- *For more information on stream encoding in Amazon Transcribe, see event-stream. For - * information on stream encoding in Amazon Transcribe Medical, see event-stream-med.
+ *For information on audio encoding formats in Amazon Transcribe, see Speech input. For + * information on audio encoding formats in Amazon Transcribe Medical, see Speech input.
+ *For more information on stream encoding in Amazon Transcribe, see Event stream encoding. For + * information on stream encoding in Amazon Transcribe Medical, see Event stream encoding.
*/ export interface AudioEventMember { AudioEvent: AudioEvent; @@ -197,6 +248,14 @@ export namespace ConflictException { }); } +export enum ContentIdentificationType { + PII = "PII", +} + +export enum ContentRedactionType { + PII = "PII", +} + /** *A problem occurred while processing the audio. Amazon Transcribe or Amazon Transcribe Medical terminated processing. Try * your request again.
@@ -742,8 +801,7 @@ export interface StartMedicalStreamTranscriptionRequest { LanguageCode: LanguageCode | string | undefined; /** - *The sample rate of the input audio in Hertz. Sample rates of 16000 Hz or higher are - * accepted.
+ *The sample rate of the input audio in Hertz.
*/ MediaSampleRateHertz: number | undefined; @@ -833,7 +891,7 @@ export interface StartMedicalStreamTranscriptionResponse { LanguageCode?: LanguageCode | string; /** - *The sample rate of the input audio in Hertz. Valid value: 16000 Hz.
+ *The sample rate of the input audio in Hertz.
*/ MediaSampleRateHertz?: number; @@ -913,8 +971,8 @@ export interface StartStreamTranscriptionRequest { LanguageCode: LanguageCode | string | undefined; /** - *The sample rate, in Hertz, of the input audio. We suggest that you use 8000 Hz for low - * quality audio and 16000 Hz for high quality audio.
+ *The sample rate, in Hertz, of the input audio. We suggest that you use 8,000 Hz for low + * quality audio and 16,000 Hz for high quality audio.
*/ MediaSampleRateHertz: number | undefined; @@ -936,13 +994,13 @@ export interface StartStreamTranscriptionRequest { SessionId?: string; /** - *PCM-encoded stream of audio blobs. The audio stream is encoded as an HTTP2 data + *
PCM-encoded stream of audio blobs. The audio stream is encoded as an HTTP/2 data * frame.
*/ AudioStream: AsyncIterableThe name of the vocabulary filter you've created that is unique to your AWS account. + *
The name of the vocabulary filter you've created that is unique to your account. * Provide the name in this field to successfully use it in a stream.
*/ VocabularyFilterName?: string; @@ -950,7 +1008,7 @@ export interface StartStreamTranscriptionRequest { /** *The manner in which you use your vocabulary filter to filter words in your transcript.
* Remove
removes filtered words from your transcription results.
- * Mask
masks those words with a ***
in your transcription results.
+ * Mask
masks filtered words with a ***
in your transcription results.
* Tag
keeps the filtered words in your transcription results and tags them. The
* tag appears as VocabularyFilterMatch
equal to True
*
Set this field to PII to identify personally identifiable information (PII) in the transcription output. Content identification is performed only upon complete transcription of the audio segments.
+ *You can’t set both ContentIdentificationType
and ContentRedactionType
in the same request. If you set both, your request returns a BadRequestException
.
Set this field to PII to redact personally identifiable information (PII) in the transcription output. Content redaction is performed only upon complete transcription of the audio segments.
+ *You can’t set both ContentRedactionType
and ContentIdentificationType
in the same request. If you set both, your request returns a BadRequestException
.
List the PII entity types you want to identify or redact. In order to specify entity types, you must have
+ * either ContentIdentificationType
or ContentRedactionType
enabled.
+ * PIIEntityTypes
must be comma-separated; the available values are:
+ * BANK_ACCOUNT_NUMBER
, BANK_ROUTING
,
+ * CREDIT_DEBIT_NUMBER
, CREDIT_DEBIT_CVV
,
+ * CREDIT_DEBIT_EXPIRY
, PIN
, EMAIL
,
+ * ADDRESS
, NAME
, PHONE
,
+ * SSN
, and ALL
.
+ * PiiEntityTypes
is an optional parameter with a default value of ALL
.
A portion of the transcription of the audio stream. Events are sent periodically from * Amazon Transcribe to your application. The event can be a partial transcription of a section of the audio - * stream, or it can be the entire transcription of that portion of the audio stream. - *
+ * stream, or it can be the entire transcription of that portion of the audio stream. */ export interface TranscriptEventMember { TranscriptEvent: TranscriptEvent; @@ -1209,7 +1293,7 @@ export interface StartStreamTranscriptionResponse { LanguageCode?: LanguageCode | string; /** - *The sample rate for the input audio stream. Use 8000 Hz for low quality audio and 16000 Hz + *
The sample rate for the input audio stream. Use 8,000 Hz for low quality audio and 16,000 Hz * for high quality audio.
*/ MediaSampleRateHertz?: number; @@ -1269,6 +1353,21 @@ export interface StartStreamTranscriptionResponse { * level. */ PartialResultsStability?: PartialResultsStability | string; + + /** + *Shows whether content identification was enabled in this stream.
+ */ + ContentIdentificationType?: ContentIdentificationType | string; + + /** + *Shows whether content redaction was enabled in this stream.
+ */ + ContentRedactionType?: ContentRedactionType | string; + + /** + *Lists the PII entity types you specified in your request.
+ */ + PiiEntityTypes?: string; } export namespace StartStreamTranscriptionResponse { diff --git a/clients/client-transcribe-streaming/protocols/Aws_restJson1.ts b/clients/client-transcribe-streaming/protocols/Aws_restJson1.ts index 955ab725b3c4..9cb77b23577f 100644 --- a/clients/client-transcribe-streaming/protocols/Aws_restJson1.ts +++ b/clients/client-transcribe-streaming/protocols/Aws_restJson1.ts @@ -12,6 +12,7 @@ import { AudioStream, BadRequestException, ConflictException, + Entity, InternalFailureException, Item, LimitExceededException, @@ -131,6 +132,15 @@ export const serializeAws_restJson1StartStreamTranscriptionCommand = async ( ...(isSerializableHeaderValue(input.PartialResultsStability) && { "x-amzn-transcribe-partial-results-stability": input.PartialResultsStability!, }), + ...(isSerializableHeaderValue(input.ContentIdentificationType) && { + "x-amzn-transcribe-content-identification-type": input.ContentIdentificationType!, + }), + ...(isSerializableHeaderValue(input.ContentRedactionType) && { + "x-amzn-transcribe-content-redaction-type": input.ContentRedactionType!, + }), + ...(isSerializableHeaderValue(input.PiiEntityTypes) && { + "x-amzn-transcribe-pii-entity-types": input.PiiEntityTypes!, + }), }; let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/stream-transcription"; let body: any; @@ -308,6 +318,8 @@ export const deserializeAws_restJson1StartStreamTranscriptionCommand = async ( } const contents: StartStreamTranscriptionCommandOutput = { $metadata: deserializeMetadata(output), + ContentIdentificationType: undefined, + ContentRedactionType: undefined, EnableChannelIdentification: undefined, EnablePartialResultsStabilization: undefined, LanguageCode: undefined, @@ -315,6 +327,7 @@ export const deserializeAws_restJson1StartStreamTranscriptionCommand = async ( MediaSampleRateHertz: undefined, NumberOfChannels: undefined, PartialResultsStability: undefined, + PiiEntityTypes: undefined, RequestId: undefined, SessionId: undefined, ShowSpeakerLabel: undefined, @@ -366,6 +379,15 @@ export const deserializeAws_restJson1StartStreamTranscriptionCommand = async ( if (output.headers["x-amzn-transcribe-partial-results-stability"] !== undefined) { contents.PartialResultsStability = output.headers["x-amzn-transcribe-partial-results-stability"]; } + if (output.headers["x-amzn-transcribe-content-identification-type"] !== undefined) { + contents.ContentIdentificationType = output.headers["x-amzn-transcribe-content-identification-type"]; + } + if (output.headers["x-amzn-transcribe-content-redaction-type"] !== undefined) { + contents.ContentRedactionType = output.headers["x-amzn-transcribe-content-redaction-type"]; + } + if (output.headers["x-amzn-transcribe-pii-entity-types"] !== undefined) { + contents.PiiEntityTypes = output.headers["x-amzn-transcribe-pii-entity-types"]; + } const data: any = context.eventStreamMarshaller.deserialize(output.body, async (event) => { const eventName = Object.keys(event)[0]; const eventHeaders = Object.entries(event[eventName].headers).reduce((accummulator, curr) => { @@ -742,6 +764,10 @@ const serializeAws_restJson1AudioStream = (input: AudioStream, context: __SerdeC const deserializeAws_restJson1Alternative = (output: any, context: __SerdeContext): Alternative => { return { + Entities: + output.Entities !== undefined && output.Entities !== null + ? deserializeAws_restJson1EntityList(output.Entities, context) + : undefined, Items: output.Items !== undefined && output.Items !== null ? deserializeAws_restJson1ItemList(output.Items, context) @@ -773,6 +799,28 @@ const deserializeAws_restJson1ConflictException = (output: any, context: __Serde } as any; }; +const deserializeAws_restJson1Entity = (output: any, context: __SerdeContext): Entity => { + return { + Category: __expectString(output.Category), + Confidence: __limitedParseDouble(output.Confidence), + Content: __expectString(output.Content), + EndTime: __limitedParseDouble(output.EndTime), + StartTime: __limitedParseDouble(output.StartTime), + Type: __expectString(output.Type), + } as any; +}; + +const deserializeAws_restJson1EntityList = (output: any, context: __SerdeContext): Entity[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Entity(entry, context); + }); +}; + const deserializeAws_restJson1InternalFailureException = ( output: any, context: __SerdeContext diff --git a/clients/client-transcribe/models/models_0.ts b/clients/client-transcribe/models/models_0.ts index 076f58ce83a1..8bc51e0204e7 100644 --- a/clients/client-transcribe/models/models_0.ts +++ b/clients/client-transcribe/models/models_0.ts @@ -2222,6 +2222,35 @@ export namespace Settings { }); } +export enum SubtitleFormat { + SRT = "srt", + VTT = "vtt", +} + +/** + *Specify the output format for your subtitle file.
+ */ +export interface SubtitlesOutput { + /** + *Specify the output format for your subtitle file; if you select both SRT and VTT formats, two output files are genereated.
+ */ + Formats?: (SubtitleFormat | string)[]; + + /** + *Choose the output location for your subtitle file. This location must be an S3 bucket.
+ */ + SubtitleFileUris?: string[]; +} + +export namespace SubtitlesOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SubtitlesOutput): any => ({ + ...obj, + }); +} + /** *Describes an asynchronous transcription job that was created with the StartTranscriptionJob
* operation.
A key:value pair assigned to a given transcription job.
*/ Tags?: Tag[]; + + /** + *Generate subtitles for your batch transcription job.
+ */ + Subtitles?: SubtitlesOutput; } export namespace TranscriptionJob { @@ -3514,6 +3548,25 @@ export namespace StartMedicalTranscriptionJobResponse { }); } +/** + *Generate subtitles for your batch transcription job.
+ */ +export interface Subtitles { + /** + *Specify the output format for your subtitle file.
+ */ + Formats?: (SubtitleFormat | string)[]; +} + +export namespace Subtitles { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Subtitles): any => ({ + ...obj, + }); +} + export interface StartTranscriptionJobRequest { /** *The name of the job. You can't use the strings ".
" or "..
" by themselves as the
@@ -3652,6 +3705,11 @@ export interface StartTranscriptionJobRequest {
*/
LanguageOptions?: (LanguageCode | string)[];
+ /**
+ *
Add subtitles to your batch transcription job.
+ */ + Subtitles?: Subtitles; + /** *Add tags to an Amazon Transcribe transcription job.
*/ diff --git a/clients/client-transcribe/protocols/Aws_json1_1.ts b/clients/client-transcribe/protocols/Aws_json1_1.ts index 4be24c72291d..cdd8d0490aeb 100644 --- a/clients/client-transcribe/protocols/Aws_json1_1.ts +++ b/clients/client-transcribe/protocols/Aws_json1_1.ts @@ -220,6 +220,9 @@ import { StartMedicalTranscriptionJobResponse, StartTranscriptionJobRequest, StartTranscriptionJobResponse, + SubtitleFormat, + Subtitles, + SubtitlesOutput, Tag, TagResourceRequest, TagResourceResponse, @@ -4495,6 +4498,8 @@ const serializeAws_json1_1StartTranscriptionJobRequest = ( ...(input.OutputKey !== undefined && input.OutputKey !== null && { OutputKey: input.OutputKey }), ...(input.Settings !== undefined && input.Settings !== null && { Settings: serializeAws_json1_1Settings(input.Settings, context) }), + ...(input.Subtitles !== undefined && + input.Subtitles !== null && { Subtitles: serializeAws_json1_1Subtitles(input.Subtitles, context) }), ...(input.Tags !== undefined && input.Tags !== null && { Tags: serializeAws_json1_1TagList(input.Tags, context) }), ...(input.TranscriptionJobName !== undefined && input.TranscriptionJobName !== null && { TranscriptionJobName: input.TranscriptionJobName }), @@ -4512,6 +4517,24 @@ const serializeAws_json1_1StringTargetList = (input: string[], context: __SerdeC }); }; +const serializeAws_json1_1SubtitleFormats = (input: (SubtitleFormat | string)[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_json1_1Subtitles = (input: Subtitles, context: __SerdeContext): any => { + return { + ...(input.Formats !== undefined && + input.Formats !== null && { Formats: serializeAws_json1_1SubtitleFormats(input.Formats, context) }), + }; +}; + const serializeAws_json1_1Tag = (input: Tag, context: __SerdeContext): any => { return { ...(input.Key !== undefined && input.Key !== null && { Key: input.Key }), @@ -5496,6 +5519,41 @@ const deserializeAws_json1_1StringTargetList = (output: any, context: __SerdeCon }); }; +const deserializeAws_json1_1SubtitleFileUris = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_json1_1SubtitleFormats = (output: any, context: __SerdeContext): (SubtitleFormat | string)[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_json1_1SubtitlesOutput = (output: any, context: __SerdeContext): SubtitlesOutput => { + return { + Formats: + output.Formats !== undefined && output.Formats !== null + ? deserializeAws_json1_1SubtitleFormats(output.Formats, context) + : undefined, + SubtitleFileUris: + output.SubtitleFileUris !== undefined && output.SubtitleFileUris !== null + ? deserializeAws_json1_1SubtitleFileUris(output.SubtitleFileUris, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1Tag = (output: any, context: __SerdeContext): Tag => { return { Key: __expectString(output.Key), @@ -5589,6 +5647,10 @@ const deserializeAws_json1_1TranscriptionJob = (output: any, context: __SerdeCon output.StartTime !== undefined && output.StartTime !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.StartTime))) : undefined, + Subtitles: + output.Subtitles !== undefined && output.Subtitles !== null + ? deserializeAws_json1_1SubtitlesOutput(output.Subtitles, context) + : undefined, Tags: output.Tags !== undefined && output.Tags !== null ? deserializeAws_json1_1TagList(output.Tags, context) diff --git a/clients/client-wafv2/WAFV2.ts b/clients/client-wafv2/WAFV2.ts index af79a7eaaa0c..ba57ac6f94be 100644 --- a/clients/client-wafv2/WAFV2.ts +++ b/clients/client-wafv2/WAFV2.ts @@ -809,7 +809,7 @@ export class WAFV2 extends WAFV2Client { /** *Retrieves the specified managed rule set.
*This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
+ *This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
*Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Retrieves the keys that are currently blocked by a rate-based rule. The maximum number - * of managed keys that can be blocked for a single rate-based rule is 10,000. If more than - * 10,000 addresses exceed the rate limit, those with the highest rates are blocked.
+ *Retrieves the keys that are currently blocked by a rate-based rule instance. The maximum number of managed keys that can be blocked for a single rate-based rule instance is 10,000. If more than 10,000 addresses exceed the rate limit, those with the highest rates are blocked.
+ *For a rate-based rule that you've defined inside a rule group, provide the name of the rule group reference statement in your request, in addition to the rate-based rule name and the web ACL name.
+ *WAF monitors web requests and manages keys independently for each unique combination of web ACL, optional rule group, and rate-based rule. For example, if you define a rate-based rule inside a rule group, and then use the rule group in a web ACL, WAF monitors web requests and manages keys for that web ACL, rule group reference statement, and rate-based rule instance. If you use the same rule group in a second web ACL, WAF monitors web requests and manages keys for this second usage completely independent of your first.
*/ public getRateBasedStatementManagedKeys( args: GetRateBasedStatementManagedKeysCommandInput, @@ -1071,7 +1071,7 @@ export class WAFV2 extends WAFV2Client { /** *Retrieves an array of managed rule groups that are available for you to use. This list - * includes all Amazon Web Services Managed Rules rule groups and all of the Marketplace managed rule groups that you're + * includes all Amazon Web Services Managed Rules rule groups and all of the Amazon Web Services Marketplace managed rule groups that you're * subscribed to.
*/ public listAvailableManagedRuleGroups( @@ -1199,7 +1199,7 @@ export class WAFV2 extends WAFV2Client { /** *Retrieves the managed rule sets that you own.
*This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
+ *This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
*Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Defines the versions of your managed rule set that you are offering to the customers. * Customers see your offerings as managed rule groups with versioning.
*This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
+ *This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
*Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Customers retrieve their managed rule group list by calling ListAvailableManagedRuleGroups. The name that you provide here for your @@ -1653,7 +1653,7 @@ export class WAFV2 extends WAFV2Client { * expiration of a managed rule group version. After you initiate expiration for a version, * WAF excludes it from the reponse to ListAvailableManagedRuleGroupVersions for the managed rule group.
*This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
+ *This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
*Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Retrieves the specified managed rule set.
*This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
+ *This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
*Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Retrieves the keys that are currently blocked by a rate-based rule. The maximum number - * of managed keys that can be blocked for a single rate-based rule is 10,000. If more than - * 10,000 addresses exceed the rate limit, those with the highest rates are blocked.
+ *Retrieves the keys that are currently blocked by a rate-based rule instance. The maximum number of managed keys that can be blocked for a single rate-based rule instance is 10,000. If more than 10,000 addresses exceed the rate limit, those with the highest rates are blocked.
+ *For a rate-based rule that you've defined inside a rule group, provide the name of the rule group reference statement in your request, in addition to the rate-based rule name and the web ACL name.
+ *WAF monitors web requests and manages keys independently for each unique combination of web ACL, optional rule group, and rate-based rule. For example, if you define a rate-based rule inside a rule group, and then use the rule group in a web ACL, WAF monitors web requests and manages keys for that web ACL, rule group reference statement, and rate-based rule instance. If you use the same rule group in a second web ACL, WAF monitors web requests and manages keys for this second usage completely independent of your first.
* @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-wafv2/commands/ListAvailableManagedRuleGroupsCommand.ts b/clients/client-wafv2/commands/ListAvailableManagedRuleGroupsCommand.ts index 1256620d33eb..a8f921ccaaec 100644 --- a/clients/client-wafv2/commands/ListAvailableManagedRuleGroupsCommand.ts +++ b/clients/client-wafv2/commands/ListAvailableManagedRuleGroupsCommand.ts @@ -24,7 +24,7 @@ export interface ListAvailableManagedRuleGroupsCommandOutput /** *Retrieves an array of managed rule groups that are available for you to use. This list - * includes all Amazon Web Services Managed Rules rule groups and all of the Marketplace managed rule groups that you're + * includes all Amazon Web Services Managed Rules rule groups and all of the Amazon Web Services Marketplace managed rule groups that you're * subscribed to.
* @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-wafv2/commands/ListManagedRuleSetsCommand.ts b/clients/client-wafv2/commands/ListManagedRuleSetsCommand.ts index 9eb728afbf27..193f1f42b78d 100644 --- a/clients/client-wafv2/commands/ListManagedRuleSetsCommand.ts +++ b/clients/client-wafv2/commands/ListManagedRuleSetsCommand.ts @@ -23,7 +23,7 @@ export interface ListManagedRuleSetsCommandOutput extends ListManagedRuleSetsRes /** *Retrieves the managed rule sets that you own.
*This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
+ *This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
*Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Defines the versions of your managed rule set that you are offering to the customers. * Customers see your offerings as managed rule groups with versioning.
*This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
+ *This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
*Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
Customers retrieve their managed rule group list by calling ListAvailableManagedRuleGroups. The name that you provide here for your diff --git a/clients/client-wafv2/commands/UpdateManagedRuleSetVersionExpiryDateCommand.ts b/clients/client-wafv2/commands/UpdateManagedRuleSetVersionExpiryDateCommand.ts index c7ae58a5b8c0..2e17300c2fe6 100644 --- a/clients/client-wafv2/commands/UpdateManagedRuleSetVersionExpiryDateCommand.ts +++ b/clients/client-wafv2/commands/UpdateManagedRuleSetVersionExpiryDateCommand.ts @@ -31,7 +31,7 @@ export interface UpdateManagedRuleSetVersionExpiryDateCommandOutput * expiration of a managed rule group version. After you initiate expiration for a version, * WAF excludes it from the reponse to ListAvailableManagedRuleGroupVersions for the managed rule group.
*This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
+ *This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
*Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
+ *JSON specification: "All": {}
+ *
All query arguments of a web request.
- *This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
+ *This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
+ *JSON specification: "AllQueryArguments": {}
+ *
The body of a web request. This immediately follows the request headers.
*This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
+ *JSON specification: "Body": {}
+ *
Example JSON: "JsonBody": { "MatchPattern": { "All": {} }, "MatchScope": "ALL" }
+ *
The HTTP method of a web request. The method indicates the type of operation that the - * request is asking the origin to perform.
- *This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
+ *The HTTP method of a web request. The method indicates the type of operation that the request is asking the origin to perform.
+ *This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
+ *JSON specification: "Method": {}
+ *
The query string of a web request. This is the part of a URL that appears after a
- * ?
character, if any.
This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
+ *The query string of a web request. This is the part of a URL that appears after a ?
character, if any.
This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
+ *JSON specification: "QueryString": {}
+ *
User-Agent
or Referer
. This setting isn't case
* sensitive.
* This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
+ *Example JSON: "SingleHeader": { "Name": "haystack" }
+ *
One query argument in a web request, identified by name, for example * UserName or SalesRegion. The name can be up to * 30 characters long and isn't case sensitive.
+ *Example JSON: "SingleQueryArgument": { "Name": "myArgument" }
+ *
The path component of the URI of a web request. This is the part of a web request that identifies a resource. For example, /images/daily-ad.jpg
.
This is used only to indicate the web request component for WAF to inspect, in the FieldToMatch specification.
+ *JSON specification: "UriPath": {}
+ *
The part of a web request that you want WAF to inspect. Include the single
- * FieldToMatch
type that you want to inspect, with additional specifications
- * as needed, according to the type. You specify a single request component in
- * FieldToMatch
for each rule statement that requires it. To inspect more than
- * one component of a web request, create a separate rule statement for each component.
The part of a web request that you want WAF to inspect. Include the single FieldToMatch
type that you want to inspect, with additional specifications as needed, according to the type. You specify a single request component in FieldToMatch
for each rule statement that requires it. To inspect more than one component of a web request, create a separate rule statement for each component.
JSON specification for a QueryString
field to match:
+ * "FieldToMatch": { "QueryString": {} }
+ *
Example JSON for a Method
field to match specification:
+ * "FieldToMatch": { "Method": { "Name": "DELETE" } }
+ *
A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
- *You cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. It can only be referenced as a top-level statement within a rule.
You cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. You
+ * can only use a rule group reference statement at the top level inside a web ACL.
Specifies that WAF should do nothing. This is generally used to try out a rule
* without performing any actions. You set the OverrideAction
on the Rule.
This is used in the context of other settings, for example to specify values for RuleAction and web ACL DefaultAction.
+ *JSON specification: "None": {}
+ *
The parts of the request that you want to keep out of the logs. For example, if you
- * redact the HEADER
field, the HEADER
field in the firehose will be
- * xxx
.
The parts of the request that you want to keep out of the logs. For
+ * example, if you redact the SingleHeader
field, the HEADER
field in the firehose will be xxx
.
You must use one of the following values: URI
,
- * QUERY_STRING
, HEADER
, or METHOD
.
You can specify only the following fields for redaction: UriPath
, QueryString
, SingleHeader
, Method
, and JsonBody
.
Information for a single version of a managed rule set.
*This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
+ *This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
*Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
A set of rules that is managed by Amazon Web Services and Marketplace sellers to provide versioned managed + *
A set of rules that is managed by Amazon Web Services and Amazon Web Services Marketplace sellers to provide versioned managed * rule groups for customers of WAF.
*This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
+ *This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
*Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
The name of the rate-based rule to get the keys for.
+ *The name of the rule group reference statement in your web ACL. This is required only when you have the rate-based rule nested + * inside a rule group.
+ */ + RuleGroupRuleName?: string; + + /** + *The name of the rate-based rule to get the keys for. If you have the rule defined inside a rule group that you're using in your web ACL, also provide the name of the rule group reference statement in the request parameter RuleGroupRuleName
.
The set of IP addresses that are currently blocked for a rate-based statement.
+ *The set of IP addresses that are currently blocked for a RateBasedStatement.
*/ export interface RateBasedStatementManagedKeysIPSet { /** @@ -4230,7 +4260,7 @@ export namespace ListAvailableManagedRuleGroupsRequest { } /** - *High-level information about a managed rule group, returned by ListAvailableManagedRuleGroups. This provides information like the name and vendor name, that you provide when you add a ManagedRuleGroupStatement to a web ACL. Managed rule groups include Amazon Web Services Managed Rules rule groups, which are free of charge to WAF customers, and Marketplace managed rule groups, which you can subscribe to through Marketplace.
+ *High-level information about a managed rule group, returned by ListAvailableManagedRuleGroups. This provides information like the name and vendor name, that you provide when you add a ManagedRuleGroupStatement to a web ACL. Managed rule groups include Amazon Web Services Managed Rules rule groups, which are free of charge to WAF customers, and Amazon Web Services Marketplace managed rule groups, which you can subscribe to through Amazon Web Services Marketplace.
*/ export interface ManagedRuleGroupSummary { /** @@ -4244,7 +4274,7 @@ export interface ManagedRuleGroupSummary { Name?: string; /** - *The description of the managed rule group, provided by Amazon Web Services Managed Rules or the Marketplace seller who manages it.
+ *The description of the managed rule group, provided by Amazon Web Services Managed Rules or the Amazon Web Services Marketplace seller who manages it.
*/ Description?: string; } @@ -4545,7 +4575,7 @@ export namespace ListManagedRuleSetsRequest { /** *High-level information for a managed rule set.
*This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
+ *This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
*Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
A version of the named managed rule group, that the rule group's vendor publishes for * use by customers.
*This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Marketplace sellers.
+ *This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web Services Marketplace sellers.
*Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group offerings for your customers. The APIs are ListManagedRuleSets
, GetManagedRuleSet
, PutManagedRuleSetVersions
, and UpdateManagedRuleSetVersionExpiryDate
.
The processing guidance for a Rule, used by WAF to determine - * whether a web request matches the rule.
+ *The processing guidance for a Rule, used by WAF to determine whether a web request matches the rule.
*/ export interface Statement { /** @@ -5559,7 +5588,8 @@ export interface Statement { /** *A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
- *You cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. It can only be referenced as a top-level statement within a rule.
You cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. You
+ * can only use a rule group reference statement at the top level inside a web ACL.
A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.
+ *A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.
+ *WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by WAF. If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by WAF.
*When the rule action triggers, WAF blocks additional requests from the IP address until the request rate falls below the limit.
*You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:
*In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.
- *You cannot nest a RateBasedStatement
, for example for use inside a NotStatement
or OrStatement
. It can only be referenced as a top-level statement within a rule.
You cannot nest a RateBasedStatement
inside another statement, for example inside a NotStatement
or OrStatement
. You can define a RateBasedStatement
inside a web ACL and inside a rule group.
A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.
+ *A rate-based rule tracks the rate of requests for each originating IP address, and triggers the rule action when the rate exceeds a limit that you specify on the number of requests in any 5-minute time span. You can use this to put a temporary block on requests from an IP address that is sending excessive requests.
+ *WAF tracks and manages web requests separately for each instance of a rate-based rule that you use. For example, if you provide the same rate-based rule settings in two web ACLs, each of the two rule statements represents a separate instance of the rate-based rule and gets its own tracking and management by WAF. If you define a rate-based rule inside a rule group, and then use that rule group in multiple places, each use creates a separate instance of the rate-based rule that gets its own tracking and management by WAF.
*When the rule action triggers, WAF blocks additional requests from the IP address until the request rate falls below the limit.
*You can optionally nest another statement inside the rate-based statement, to narrow the scope of the rule so that it only counts requests that match the nested statement. For example, based on recent requests that you have seen from an attacker, you might create a rate-based rule with a nested AND rule statement that contains the following nested statements:
*In this rate-based rule, you also define a rate limit. For this example, the rate limit is 1,000. Requests that meet both of the conditions in the statements are counted. If the count exceeds 1,000 requests per five minutes, the rule action triggers. Requests that do not meet both conditions are not counted towards the rate limit and are not affected by this rule.
- *You cannot nest a RateBasedStatement
, for example for use inside a NotStatement
or OrStatement
. It can only be referenced as a top-level statement within a rule.
You cannot nest a RateBasedStatement
inside another statement, for example inside a NotStatement
or OrStatement
. You can define a RateBasedStatement
inside a web ACL and inside a rule group.
A rule statement used to run the rules that are defined in a RuleGroup. To use this, create a rule group with your rules, then provide the ARN of the rule group in this statement.
- *You cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. It can only be referenced as a top-level statement within a rule.
You cannot nest a RuleGroupReferenceStatement
, for example for use inside a NotStatement
or OrStatement
. You
+ * can only use a rule group reference statement at the top level inside a web ACL.