1. Packages
  2. AWS
  3. API Docs
  4. dms
  5. Endpoint
AWS v6.77.0 published on Wednesday, Apr 9, 2025 by Pulumi

aws.dms.Endpoint

Explore with Pulumi AI

Provides a DMS (Data Migration Service) endpoint resource. DMS endpoints can be created, updated, deleted, and imported.

Note: All arguments including the password will be stored in the raw state as plain-text. > Note: The s3_settings argument is deprecated, may not be maintained, and will be removed in a future version. Use the aws.dms.S3Endpoint resource instead.

Example Usage

import * as pulumi from "@pulumi/pulumi";
import * as aws from "@pulumi/aws";

// Create a new endpoint
const test = new aws.dms.Endpoint("test", {
    certificateArn: "arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012",
    databaseName: "test",
    endpointId: "test-dms-endpoint-tf",
    endpointType: "source",
    engineName: "aurora",
    extraConnectionAttributes: "",
    kmsKeyArn: "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012",
    password: "test",
    port: 3306,
    serverName: "test",
    sslMode: "none",
    tags: {
        Name: "test",
    },
    username: "test",
});
Copy
import pulumi
import pulumi_aws as aws

# Create a new endpoint
test = aws.dms.Endpoint("test",
    certificate_arn="arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012",
    database_name="test",
    endpoint_id="test-dms-endpoint-tf",
    endpoint_type="source",
    engine_name="aurora",
    extra_connection_attributes="",
    kms_key_arn="arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012",
    password="test",
    port=3306,
    server_name="test",
    ssl_mode="none",
    tags={
        "Name": "test",
    },
    username="test")
Copy
package main

import (
	"github.com/pulumi/pulumi-aws/sdk/v6/go/aws/dms"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		// Create a new endpoint
		_, err := dms.NewEndpoint(ctx, "test", &dms.EndpointArgs{
			CertificateArn:            pulumi.String("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"),
			DatabaseName:              pulumi.String("test"),
			EndpointId:                pulumi.String("test-dms-endpoint-tf"),
			EndpointType:              pulumi.String("source"),
			EngineName:                pulumi.String("aurora"),
			ExtraConnectionAttributes: pulumi.String(""),
			KmsKeyArn:                 pulumi.String("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"),
			Password:                  pulumi.String("test"),
			Port:                      pulumi.Int(3306),
			ServerName:                pulumi.String("test"),
			SslMode:                   pulumi.String("none"),
			Tags: pulumi.StringMap{
				"Name": pulumi.String("test"),
			},
			Username: pulumi.String("test"),
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Aws = Pulumi.Aws;

return await Deployment.RunAsync(() => 
{
    // Create a new endpoint
    var test = new Aws.Dms.Endpoint("test", new()
    {
        CertificateArn = "arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012",
        DatabaseName = "test",
        EndpointId = "test-dms-endpoint-tf",
        EndpointType = "source",
        EngineName = "aurora",
        ExtraConnectionAttributes = "",
        KmsKeyArn = "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012",
        Password = "test",
        Port = 3306,
        ServerName = "test",
        SslMode = "none",
        Tags = 
        {
            { "Name", "test" },
        },
        Username = "test",
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.aws.dms.Endpoint;
import com.pulumi.aws.dms.EndpointArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        // Create a new endpoint
        var test = new Endpoint("test", EndpointArgs.builder()
            .certificateArn("arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012")
            .databaseName("test")
            .endpointId("test-dms-endpoint-tf")
            .endpointType("source")
            .engineName("aurora")
            .extraConnectionAttributes("")
            .kmsKeyArn("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012")
            .password("test")
            .port(3306)
            .serverName("test")
            .sslMode("none")
            .tags(Map.of("Name", "test"))
            .username("test")
            .build());

    }
}
Copy
resources:
  # Create a new endpoint
  test:
    type: aws:dms:Endpoint
    properties:
      certificateArn: arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012
      databaseName: test
      endpointId: test-dms-endpoint-tf
      endpointType: source
      engineName: aurora
      extraConnectionAttributes: ""
      kmsKeyArn: arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012
      password: test
      port: 3306
      serverName: test
      sslMode: none
      tags:
        Name: test
      username: test
Copy

Create Endpoint Resource

Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

Constructor syntax

new Endpoint(name: string, args: EndpointArgs, opts?: CustomResourceOptions);
@overload
def Endpoint(resource_name: str,
             args: EndpointArgs,
             opts: Optional[ResourceOptions] = None)

@overload
def Endpoint(resource_name: str,
             opts: Optional[ResourceOptions] = None,
             endpoint_id: Optional[str] = None,
             engine_name: Optional[str] = None,
             endpoint_type: Optional[str] = None,
             pause_replication_tasks: Optional[bool] = None,
             postgres_settings: Optional[EndpointPostgresSettingsArgs] = None,
             database_name: Optional[str] = None,
             extra_connection_attributes: Optional[str] = None,
             kafka_settings: Optional[EndpointKafkaSettingsArgs] = None,
             kinesis_settings: Optional[EndpointKinesisSettingsArgs] = None,
             kms_key_arn: Optional[str] = None,
             mongodb_settings: Optional[EndpointMongodbSettingsArgs] = None,
             password: Optional[str] = None,
             certificate_arn: Optional[str] = None,
             port: Optional[int] = None,
             elasticsearch_settings: Optional[EndpointElasticsearchSettingsArgs] = None,
             redis_settings: Optional[EndpointRedisSettingsArgs] = None,
             redshift_settings: Optional[EndpointRedshiftSettingsArgs] = None,
             s3_settings: Optional[EndpointS3SettingsArgs] = None,
             secrets_manager_access_role_arn: Optional[str] = None,
             secrets_manager_arn: Optional[str] = None,
             server_name: Optional[str] = None,
             service_access_role: Optional[str] = None,
             ssl_mode: Optional[str] = None,
             tags: Optional[Mapping[str, str]] = None,
             username: Optional[str] = None)
func NewEndpoint(ctx *Context, name string, args EndpointArgs, opts ...ResourceOption) (*Endpoint, error)
public Endpoint(string name, EndpointArgs args, CustomResourceOptions? opts = null)
public Endpoint(String name, EndpointArgs args)
public Endpoint(String name, EndpointArgs args, CustomResourceOptions options)
type: aws:dms:Endpoint
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

Parameters

name This property is required. string
The unique name of the resource.
args This property is required. EndpointArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name This property is required. str
The unique name of the resource.
args This property is required. EndpointArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name This property is required. string
The unique name of the resource.
args This property is required. EndpointArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name This property is required. string
The unique name of the resource.
args This property is required. EndpointArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name This property is required. String
The unique name of the resource.
args This property is required. EndpointArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Constructor example

The following reference example uses placeholder values for all input properties.

var endpointResource = new Aws.Dms.Endpoint("endpointResource", new()
{
    EndpointId = "string",
    EngineName = "string",
    EndpointType = "string",
    PauseReplicationTasks = false,
    PostgresSettings = new Aws.Dms.Inputs.EndpointPostgresSettingsArgs
    {
        AfterConnectScript = "string",
        BabelfishDatabaseName = "string",
        CaptureDdls = false,
        DatabaseMode = "string",
        DdlArtifactsSchema = "string",
        ExecuteTimeout = 0,
        FailTasksOnLobTruncation = false,
        HeartbeatEnable = false,
        HeartbeatFrequency = 0,
        HeartbeatSchema = "string",
        MapBooleanAsBoolean = false,
        MapJsonbAsClob = false,
        MapLongVarcharAs = "string",
        MaxFileSize = 0,
        PluginName = "string",
        SlotName = "string",
    },
    DatabaseName = "string",
    ExtraConnectionAttributes = "string",
    KafkaSettings = new Aws.Dms.Inputs.EndpointKafkaSettingsArgs
    {
        Broker = "string",
        IncludeControlDetails = false,
        IncludeNullAndEmpty = false,
        IncludePartitionValue = false,
        IncludeTableAlterOperations = false,
        IncludeTransactionDetails = false,
        MessageFormat = "string",
        MessageMaxBytes = 0,
        NoHexPrefix = false,
        PartitionIncludeSchemaTable = false,
        SaslMechanism = "string",
        SaslPassword = "string",
        SaslUsername = "string",
        SecurityProtocol = "string",
        SslCaCertificateArn = "string",
        SslClientCertificateArn = "string",
        SslClientKeyArn = "string",
        SslClientKeyPassword = "string",
        Topic = "string",
    },
    KinesisSettings = new Aws.Dms.Inputs.EndpointKinesisSettingsArgs
    {
        IncludeControlDetails = false,
        IncludeNullAndEmpty = false,
        IncludePartitionValue = false,
        IncludeTableAlterOperations = false,
        IncludeTransactionDetails = false,
        MessageFormat = "string",
        PartitionIncludeSchemaTable = false,
        ServiceAccessRoleArn = "string",
        StreamArn = "string",
    },
    KmsKeyArn = "string",
    MongodbSettings = new Aws.Dms.Inputs.EndpointMongodbSettingsArgs
    {
        AuthMechanism = "string",
        AuthSource = "string",
        AuthType = "string",
        DocsToInvestigate = "string",
        ExtractDocId = "string",
        NestingLevel = "string",
    },
    Password = "string",
    CertificateArn = "string",
    Port = 0,
    ElasticsearchSettings = new Aws.Dms.Inputs.EndpointElasticsearchSettingsArgs
    {
        EndpointUri = "string",
        ServiceAccessRoleArn = "string",
        ErrorRetryDuration = 0,
        FullLoadErrorPercentage = 0,
        UseNewMappingType = false,
    },
    RedisSettings = new Aws.Dms.Inputs.EndpointRedisSettingsArgs
    {
        AuthType = "string",
        Port = 0,
        ServerName = "string",
        AuthPassword = "string",
        AuthUserName = "string",
        SslCaCertificateArn = "string",
        SslSecurityProtocol = "string",
    },
    RedshiftSettings = new Aws.Dms.Inputs.EndpointRedshiftSettingsArgs
    {
        BucketFolder = "string",
        BucketName = "string",
        EncryptionMode = "string",
        ServerSideEncryptionKmsKeyId = "string",
        ServiceAccessRoleArn = "string",
    },
    S3Settings = new Aws.Dms.Inputs.EndpointS3SettingsArgs
    {
        AddColumnName = false,
        BucketFolder = "string",
        BucketName = "string",
        CannedAclForObjects = "string",
        CdcInsertsAndUpdates = false,
        CdcInsertsOnly = false,
        CdcMaxBatchInterval = 0,
        CdcMinFileSize = 0,
        CdcPath = "string",
        CompressionType = "string",
        CsvDelimiter = "string",
        CsvNoSupValue = "string",
        CsvNullValue = "string",
        CsvRowDelimiter = "string",
        DataFormat = "string",
        DataPageSize = 0,
        DatePartitionDelimiter = "string",
        DatePartitionEnabled = false,
        DatePartitionSequence = "string",
        DictPageSizeLimit = 0,
        EnableStatistics = false,
        EncodingType = "string",
        EncryptionMode = "string",
        ExternalTableDefinition = "string",
        GlueCatalogGeneration = false,
        IgnoreHeaderRows = 0,
        IncludeOpForFullLoad = false,
        MaxFileSize = 0,
        ParquetTimestampInMillisecond = false,
        ParquetVersion = "string",
        PreserveTransactions = false,
        Rfc4180 = false,
        RowGroupLength = 0,
        ServerSideEncryptionKmsKeyId = "string",
        ServiceAccessRoleArn = "string",
        TimestampColumnName = "string",
        UseCsvNoSupValue = false,
        UseTaskStartTimeForFullLoadTimestamp = false,
    },
    SecretsManagerAccessRoleArn = "string",
    SecretsManagerArn = "string",
    ServerName = "string",
    ServiceAccessRole = "string",
    SslMode = "string",
    Tags = 
    {
        { "string", "string" },
    },
    Username = "string",
});
Copy
example, err := dms.NewEndpoint(ctx, "endpointResource", &dms.EndpointArgs{
	EndpointId:            pulumi.String("string"),
	EngineName:            pulumi.String("string"),
	EndpointType:          pulumi.String("string"),
	PauseReplicationTasks: pulumi.Bool(false),
	PostgresSettings: &dms.EndpointPostgresSettingsArgs{
		AfterConnectScript:       pulumi.String("string"),
		BabelfishDatabaseName:    pulumi.String("string"),
		CaptureDdls:              pulumi.Bool(false),
		DatabaseMode:             pulumi.String("string"),
		DdlArtifactsSchema:       pulumi.String("string"),
		ExecuteTimeout:           pulumi.Int(0),
		FailTasksOnLobTruncation: pulumi.Bool(false),
		HeartbeatEnable:          pulumi.Bool(false),
		HeartbeatFrequency:       pulumi.Int(0),
		HeartbeatSchema:          pulumi.String("string"),
		MapBooleanAsBoolean:      pulumi.Bool(false),
		MapJsonbAsClob:           pulumi.Bool(false),
		MapLongVarcharAs:         pulumi.String("string"),
		MaxFileSize:              pulumi.Int(0),
		PluginName:               pulumi.String("string"),
		SlotName:                 pulumi.String("string"),
	},
	DatabaseName:              pulumi.String("string"),
	ExtraConnectionAttributes: pulumi.String("string"),
	KafkaSettings: &dms.EndpointKafkaSettingsArgs{
		Broker:                      pulumi.String("string"),
		IncludeControlDetails:       pulumi.Bool(false),
		IncludeNullAndEmpty:         pulumi.Bool(false),
		IncludePartitionValue:       pulumi.Bool(false),
		IncludeTableAlterOperations: pulumi.Bool(false),
		IncludeTransactionDetails:   pulumi.Bool(false),
		MessageFormat:               pulumi.String("string"),
		MessageMaxBytes:             pulumi.Int(0),
		NoHexPrefix:                 pulumi.Bool(false),
		PartitionIncludeSchemaTable: pulumi.Bool(false),
		SaslMechanism:               pulumi.String("string"),
		SaslPassword:                pulumi.String("string"),
		SaslUsername:                pulumi.String("string"),
		SecurityProtocol:            pulumi.String("string"),
		SslCaCertificateArn:         pulumi.String("string"),
		SslClientCertificateArn:     pulumi.String("string"),
		SslClientKeyArn:             pulumi.String("string"),
		SslClientKeyPassword:        pulumi.String("string"),
		Topic:                       pulumi.String("string"),
	},
	KinesisSettings: &dms.EndpointKinesisSettingsArgs{
		IncludeControlDetails:       pulumi.Bool(false),
		IncludeNullAndEmpty:         pulumi.Bool(false),
		IncludePartitionValue:       pulumi.Bool(false),
		IncludeTableAlterOperations: pulumi.Bool(false),
		IncludeTransactionDetails:   pulumi.Bool(false),
		MessageFormat:               pulumi.String("string"),
		PartitionIncludeSchemaTable: pulumi.Bool(false),
		ServiceAccessRoleArn:        pulumi.String("string"),
		StreamArn:                   pulumi.String("string"),
	},
	KmsKeyArn: pulumi.String("string"),
	MongodbSettings: &dms.EndpointMongodbSettingsArgs{
		AuthMechanism:     pulumi.String("string"),
		AuthSource:        pulumi.String("string"),
		AuthType:          pulumi.String("string"),
		DocsToInvestigate: pulumi.String("string"),
		ExtractDocId:      pulumi.String("string"),
		NestingLevel:      pulumi.String("string"),
	},
	Password:       pulumi.String("string"),
	CertificateArn: pulumi.String("string"),
	Port:           pulumi.Int(0),
	ElasticsearchSettings: &dms.EndpointElasticsearchSettingsArgs{
		EndpointUri:             pulumi.String("string"),
		ServiceAccessRoleArn:    pulumi.String("string"),
		ErrorRetryDuration:      pulumi.Int(0),
		FullLoadErrorPercentage: pulumi.Int(0),
		UseNewMappingType:       pulumi.Bool(false),
	},
	RedisSettings: &dms.EndpointRedisSettingsArgs{
		AuthType:            pulumi.String("string"),
		Port:                pulumi.Int(0),
		ServerName:          pulumi.String("string"),
		AuthPassword:        pulumi.String("string"),
		AuthUserName:        pulumi.String("string"),
		SslCaCertificateArn: pulumi.String("string"),
		SslSecurityProtocol: pulumi.String("string"),
	},
	RedshiftSettings: &dms.EndpointRedshiftSettingsArgs{
		BucketFolder:                 pulumi.String("string"),
		BucketName:                   pulumi.String("string"),
		EncryptionMode:               pulumi.String("string"),
		ServerSideEncryptionKmsKeyId: pulumi.String("string"),
		ServiceAccessRoleArn:         pulumi.String("string"),
	},
	S3Settings: &dms.EndpointS3SettingsArgs{
		AddColumnName:                        pulumi.Bool(false),
		BucketFolder:                         pulumi.String("string"),
		BucketName:                           pulumi.String("string"),
		CannedAclForObjects:                  pulumi.String("string"),
		CdcInsertsAndUpdates:                 pulumi.Bool(false),
		CdcInsertsOnly:                       pulumi.Bool(false),
		CdcMaxBatchInterval:                  pulumi.Int(0),
		CdcMinFileSize:                       pulumi.Int(0),
		CdcPath:                              pulumi.String("string"),
		CompressionType:                      pulumi.String("string"),
		CsvDelimiter:                         pulumi.String("string"),
		CsvNoSupValue:                        pulumi.String("string"),
		CsvNullValue:                         pulumi.String("string"),
		CsvRowDelimiter:                      pulumi.String("string"),
		DataFormat:                           pulumi.String("string"),
		DataPageSize:                         pulumi.Int(0),
		DatePartitionDelimiter:               pulumi.String("string"),
		DatePartitionEnabled:                 pulumi.Bool(false),
		DatePartitionSequence:                pulumi.String("string"),
		DictPageSizeLimit:                    pulumi.Int(0),
		EnableStatistics:                     pulumi.Bool(false),
		EncodingType:                         pulumi.String("string"),
		EncryptionMode:                       pulumi.String("string"),
		ExternalTableDefinition:              pulumi.String("string"),
		GlueCatalogGeneration:                pulumi.Bool(false),
		IgnoreHeaderRows:                     pulumi.Int(0),
		IncludeOpForFullLoad:                 pulumi.Bool(false),
		MaxFileSize:                          pulumi.Int(0),
		ParquetTimestampInMillisecond:        pulumi.Bool(false),
		ParquetVersion:                       pulumi.String("string"),
		PreserveTransactions:                 pulumi.Bool(false),
		Rfc4180:                              pulumi.Bool(false),
		RowGroupLength:                       pulumi.Int(0),
		ServerSideEncryptionKmsKeyId:         pulumi.String("string"),
		ServiceAccessRoleArn:                 pulumi.String("string"),
		TimestampColumnName:                  pulumi.String("string"),
		UseCsvNoSupValue:                     pulumi.Bool(false),
		UseTaskStartTimeForFullLoadTimestamp: pulumi.Bool(false),
	},
	SecretsManagerAccessRoleArn: pulumi.String("string"),
	SecretsManagerArn:           pulumi.String("string"),
	ServerName:                  pulumi.String("string"),
	ServiceAccessRole:           pulumi.String("string"),
	SslMode:                     pulumi.String("string"),
	Tags: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	Username: pulumi.String("string"),
})
Copy
var endpointResource = new Endpoint("endpointResource", EndpointArgs.builder()
    .endpointId("string")
    .engineName("string")
    .endpointType("string")
    .pauseReplicationTasks(false)
    .postgresSettings(EndpointPostgresSettingsArgs.builder()
        .afterConnectScript("string")
        .babelfishDatabaseName("string")
        .captureDdls(false)
        .databaseMode("string")
        .ddlArtifactsSchema("string")
        .executeTimeout(0)
        .failTasksOnLobTruncation(false)
        .heartbeatEnable(false)
        .heartbeatFrequency(0)
        .heartbeatSchema("string")
        .mapBooleanAsBoolean(false)
        .mapJsonbAsClob(false)
        .mapLongVarcharAs("string")
        .maxFileSize(0)
        .pluginName("string")
        .slotName("string")
        .build())
    .databaseName("string")
    .extraConnectionAttributes("string")
    .kafkaSettings(EndpointKafkaSettingsArgs.builder()
        .broker("string")
        .includeControlDetails(false)
        .includeNullAndEmpty(false)
        .includePartitionValue(false)
        .includeTableAlterOperations(false)
        .includeTransactionDetails(false)
        .messageFormat("string")
        .messageMaxBytes(0)
        .noHexPrefix(false)
        .partitionIncludeSchemaTable(false)
        .saslMechanism("string")
        .saslPassword("string")
        .saslUsername("string")
        .securityProtocol("string")
        .sslCaCertificateArn("string")
        .sslClientCertificateArn("string")
        .sslClientKeyArn("string")
        .sslClientKeyPassword("string")
        .topic("string")
        .build())
    .kinesisSettings(EndpointKinesisSettingsArgs.builder()
        .includeControlDetails(false)
        .includeNullAndEmpty(false)
        .includePartitionValue(false)
        .includeTableAlterOperations(false)
        .includeTransactionDetails(false)
        .messageFormat("string")
        .partitionIncludeSchemaTable(false)
        .serviceAccessRoleArn("string")
        .streamArn("string")
        .build())
    .kmsKeyArn("string")
    .mongodbSettings(EndpointMongodbSettingsArgs.builder()
        .authMechanism("string")
        .authSource("string")
        .authType("string")
        .docsToInvestigate("string")
        .extractDocId("string")
        .nestingLevel("string")
        .build())
    .password("string")
    .certificateArn("string")
    .port(0)
    .elasticsearchSettings(EndpointElasticsearchSettingsArgs.builder()
        .endpointUri("string")
        .serviceAccessRoleArn("string")
        .errorRetryDuration(0)
        .fullLoadErrorPercentage(0)
        .useNewMappingType(false)
        .build())
    .redisSettings(EndpointRedisSettingsArgs.builder()
        .authType("string")
        .port(0)
        .serverName("string")
        .authPassword("string")
        .authUserName("string")
        .sslCaCertificateArn("string")
        .sslSecurityProtocol("string")
        .build())
    .redshiftSettings(EndpointRedshiftSettingsArgs.builder()
        .bucketFolder("string")
        .bucketName("string")
        .encryptionMode("string")
        .serverSideEncryptionKmsKeyId("string")
        .serviceAccessRoleArn("string")
        .build())
    .s3Settings(EndpointS3SettingsArgs.builder()
        .addColumnName(false)
        .bucketFolder("string")
        .bucketName("string")
        .cannedAclForObjects("string")
        .cdcInsertsAndUpdates(false)
        .cdcInsertsOnly(false)
        .cdcMaxBatchInterval(0)
        .cdcMinFileSize(0)
        .cdcPath("string")
        .compressionType("string")
        .csvDelimiter("string")
        .csvNoSupValue("string")
        .csvNullValue("string")
        .csvRowDelimiter("string")
        .dataFormat("string")
        .dataPageSize(0)
        .datePartitionDelimiter("string")
        .datePartitionEnabled(false)
        .datePartitionSequence("string")
        .dictPageSizeLimit(0)
        .enableStatistics(false)
        .encodingType("string")
        .encryptionMode("string")
        .externalTableDefinition("string")
        .glueCatalogGeneration(false)
        .ignoreHeaderRows(0)
        .includeOpForFullLoad(false)
        .maxFileSize(0)
        .parquetTimestampInMillisecond(false)
        .parquetVersion("string")
        .preserveTransactions(false)
        .rfc4180(false)
        .rowGroupLength(0)
        .serverSideEncryptionKmsKeyId("string")
        .serviceAccessRoleArn("string")
        .timestampColumnName("string")
        .useCsvNoSupValue(false)
        .useTaskStartTimeForFullLoadTimestamp(false)
        .build())
    .secretsManagerAccessRoleArn("string")
    .secretsManagerArn("string")
    .serverName("string")
    .serviceAccessRole("string")
    .sslMode("string")
    .tags(Map.of("string", "string"))
    .username("string")
    .build());
Copy
endpoint_resource = aws.dms.Endpoint("endpointResource",
    endpoint_id="string",
    engine_name="string",
    endpoint_type="string",
    pause_replication_tasks=False,
    postgres_settings={
        "after_connect_script": "string",
        "babelfish_database_name": "string",
        "capture_ddls": False,
        "database_mode": "string",
        "ddl_artifacts_schema": "string",
        "execute_timeout": 0,
        "fail_tasks_on_lob_truncation": False,
        "heartbeat_enable": False,
        "heartbeat_frequency": 0,
        "heartbeat_schema": "string",
        "map_boolean_as_boolean": False,
        "map_jsonb_as_clob": False,
        "map_long_varchar_as": "string",
        "max_file_size": 0,
        "plugin_name": "string",
        "slot_name": "string",
    },
    database_name="string",
    extra_connection_attributes="string",
    kafka_settings={
        "broker": "string",
        "include_control_details": False,
        "include_null_and_empty": False,
        "include_partition_value": False,
        "include_table_alter_operations": False,
        "include_transaction_details": False,
        "message_format": "string",
        "message_max_bytes": 0,
        "no_hex_prefix": False,
        "partition_include_schema_table": False,
        "sasl_mechanism": "string",
        "sasl_password": "string",
        "sasl_username": "string",
        "security_protocol": "string",
        "ssl_ca_certificate_arn": "string",
        "ssl_client_certificate_arn": "string",
        "ssl_client_key_arn": "string",
        "ssl_client_key_password": "string",
        "topic": "string",
    },
    kinesis_settings={
        "include_control_details": False,
        "include_null_and_empty": False,
        "include_partition_value": False,
        "include_table_alter_operations": False,
        "include_transaction_details": False,
        "message_format": "string",
        "partition_include_schema_table": False,
        "service_access_role_arn": "string",
        "stream_arn": "string",
    },
    kms_key_arn="string",
    mongodb_settings={
        "auth_mechanism": "string",
        "auth_source": "string",
        "auth_type": "string",
        "docs_to_investigate": "string",
        "extract_doc_id": "string",
        "nesting_level": "string",
    },
    password="string",
    certificate_arn="string",
    port=0,
    elasticsearch_settings={
        "endpoint_uri": "string",
        "service_access_role_arn": "string",
        "error_retry_duration": 0,
        "full_load_error_percentage": 0,
        "use_new_mapping_type": False,
    },
    redis_settings={
        "auth_type": "string",
        "port": 0,
        "server_name": "string",
        "auth_password": "string",
        "auth_user_name": "string",
        "ssl_ca_certificate_arn": "string",
        "ssl_security_protocol": "string",
    },
    redshift_settings={
        "bucket_folder": "string",
        "bucket_name": "string",
        "encryption_mode": "string",
        "server_side_encryption_kms_key_id": "string",
        "service_access_role_arn": "string",
    },
    s3_settings={
        "add_column_name": False,
        "bucket_folder": "string",
        "bucket_name": "string",
        "canned_acl_for_objects": "string",
        "cdc_inserts_and_updates": False,
        "cdc_inserts_only": False,
        "cdc_max_batch_interval": 0,
        "cdc_min_file_size": 0,
        "cdc_path": "string",
        "compression_type": "string",
        "csv_delimiter": "string",
        "csv_no_sup_value": "string",
        "csv_null_value": "string",
        "csv_row_delimiter": "string",
        "data_format": "string",
        "data_page_size": 0,
        "date_partition_delimiter": "string",
        "date_partition_enabled": False,
        "date_partition_sequence": "string",
        "dict_page_size_limit": 0,
        "enable_statistics": False,
        "encoding_type": "string",
        "encryption_mode": "string",
        "external_table_definition": "string",
        "glue_catalog_generation": False,
        "ignore_header_rows": 0,
        "include_op_for_full_load": False,
        "max_file_size": 0,
        "parquet_timestamp_in_millisecond": False,
        "parquet_version": "string",
        "preserve_transactions": False,
        "rfc4180": False,
        "row_group_length": 0,
        "server_side_encryption_kms_key_id": "string",
        "service_access_role_arn": "string",
        "timestamp_column_name": "string",
        "use_csv_no_sup_value": False,
        "use_task_start_time_for_full_load_timestamp": False,
    },
    secrets_manager_access_role_arn="string",
    secrets_manager_arn="string",
    server_name="string",
    service_access_role="string",
    ssl_mode="string",
    tags={
        "string": "string",
    },
    username="string")
Copy
const endpointResource = new aws.dms.Endpoint("endpointResource", {
    endpointId: "string",
    engineName: "string",
    endpointType: "string",
    pauseReplicationTasks: false,
    postgresSettings: {
        afterConnectScript: "string",
        babelfishDatabaseName: "string",
        captureDdls: false,
        databaseMode: "string",
        ddlArtifactsSchema: "string",
        executeTimeout: 0,
        failTasksOnLobTruncation: false,
        heartbeatEnable: false,
        heartbeatFrequency: 0,
        heartbeatSchema: "string",
        mapBooleanAsBoolean: false,
        mapJsonbAsClob: false,
        mapLongVarcharAs: "string",
        maxFileSize: 0,
        pluginName: "string",
        slotName: "string",
    },
    databaseName: "string",
    extraConnectionAttributes: "string",
    kafkaSettings: {
        broker: "string",
        includeControlDetails: false,
        includeNullAndEmpty: false,
        includePartitionValue: false,
        includeTableAlterOperations: false,
        includeTransactionDetails: false,
        messageFormat: "string",
        messageMaxBytes: 0,
        noHexPrefix: false,
        partitionIncludeSchemaTable: false,
        saslMechanism: "string",
        saslPassword: "string",
        saslUsername: "string",
        securityProtocol: "string",
        sslCaCertificateArn: "string",
        sslClientCertificateArn: "string",
        sslClientKeyArn: "string",
        sslClientKeyPassword: "string",
        topic: "string",
    },
    kinesisSettings: {
        includeControlDetails: false,
        includeNullAndEmpty: false,
        includePartitionValue: false,
        includeTableAlterOperations: false,
        includeTransactionDetails: false,
        messageFormat: "string",
        partitionIncludeSchemaTable: false,
        serviceAccessRoleArn: "string",
        streamArn: "string",
    },
    kmsKeyArn: "string",
    mongodbSettings: {
        authMechanism: "string",
        authSource: "string",
        authType: "string",
        docsToInvestigate: "string",
        extractDocId: "string",
        nestingLevel: "string",
    },
    password: "string",
    certificateArn: "string",
    port: 0,
    elasticsearchSettings: {
        endpointUri: "string",
        serviceAccessRoleArn: "string",
        errorRetryDuration: 0,
        fullLoadErrorPercentage: 0,
        useNewMappingType: false,
    },
    redisSettings: {
        authType: "string",
        port: 0,
        serverName: "string",
        authPassword: "string",
        authUserName: "string",
        sslCaCertificateArn: "string",
        sslSecurityProtocol: "string",
    },
    redshiftSettings: {
        bucketFolder: "string",
        bucketName: "string",
        encryptionMode: "string",
        serverSideEncryptionKmsKeyId: "string",
        serviceAccessRoleArn: "string",
    },
    s3Settings: {
        addColumnName: false,
        bucketFolder: "string",
        bucketName: "string",
        cannedAclForObjects: "string",
        cdcInsertsAndUpdates: false,
        cdcInsertsOnly: false,
        cdcMaxBatchInterval: 0,
        cdcMinFileSize: 0,
        cdcPath: "string",
        compressionType: "string",
        csvDelimiter: "string",
        csvNoSupValue: "string",
        csvNullValue: "string",
        csvRowDelimiter: "string",
        dataFormat: "string",
        dataPageSize: 0,
        datePartitionDelimiter: "string",
        datePartitionEnabled: false,
        datePartitionSequence: "string",
        dictPageSizeLimit: 0,
        enableStatistics: false,
        encodingType: "string",
        encryptionMode: "string",
        externalTableDefinition: "string",
        glueCatalogGeneration: false,
        ignoreHeaderRows: 0,
        includeOpForFullLoad: false,
        maxFileSize: 0,
        parquetTimestampInMillisecond: false,
        parquetVersion: "string",
        preserveTransactions: false,
        rfc4180: false,
        rowGroupLength: 0,
        serverSideEncryptionKmsKeyId: "string",
        serviceAccessRoleArn: "string",
        timestampColumnName: "string",
        useCsvNoSupValue: false,
        useTaskStartTimeForFullLoadTimestamp: false,
    },
    secretsManagerAccessRoleArn: "string",
    secretsManagerArn: "string",
    serverName: "string",
    serviceAccessRole: "string",
    sslMode: "string",
    tags: {
        string: "string",
    },
    username: "string",
});
Copy
type: aws:dms:Endpoint
properties:
    certificateArn: string
    databaseName: string
    elasticsearchSettings:
        endpointUri: string
        errorRetryDuration: 0
        fullLoadErrorPercentage: 0
        serviceAccessRoleArn: string
        useNewMappingType: false
    endpointId: string
    endpointType: string
    engineName: string
    extraConnectionAttributes: string
    kafkaSettings:
        broker: string
        includeControlDetails: false
        includeNullAndEmpty: false
        includePartitionValue: false
        includeTableAlterOperations: false
        includeTransactionDetails: false
        messageFormat: string
        messageMaxBytes: 0
        noHexPrefix: false
        partitionIncludeSchemaTable: false
        saslMechanism: string
        saslPassword: string
        saslUsername: string
        securityProtocol: string
        sslCaCertificateArn: string
        sslClientCertificateArn: string
        sslClientKeyArn: string
        sslClientKeyPassword: string
        topic: string
    kinesisSettings:
        includeControlDetails: false
        includeNullAndEmpty: false
        includePartitionValue: false
        includeTableAlterOperations: false
        includeTransactionDetails: false
        messageFormat: string
        partitionIncludeSchemaTable: false
        serviceAccessRoleArn: string
        streamArn: string
    kmsKeyArn: string
    mongodbSettings:
        authMechanism: string
        authSource: string
        authType: string
        docsToInvestigate: string
        extractDocId: string
        nestingLevel: string
    password: string
    pauseReplicationTasks: false
    port: 0
    postgresSettings:
        afterConnectScript: string
        babelfishDatabaseName: string
        captureDdls: false
        databaseMode: string
        ddlArtifactsSchema: string
        executeTimeout: 0
        failTasksOnLobTruncation: false
        heartbeatEnable: false
        heartbeatFrequency: 0
        heartbeatSchema: string
        mapBooleanAsBoolean: false
        mapJsonbAsClob: false
        mapLongVarcharAs: string
        maxFileSize: 0
        pluginName: string
        slotName: string
    redisSettings:
        authPassword: string
        authType: string
        authUserName: string
        port: 0
        serverName: string
        sslCaCertificateArn: string
        sslSecurityProtocol: string
    redshiftSettings:
        bucketFolder: string
        bucketName: string
        encryptionMode: string
        serverSideEncryptionKmsKeyId: string
        serviceAccessRoleArn: string
    s3Settings:
        addColumnName: false
        bucketFolder: string
        bucketName: string
        cannedAclForObjects: string
        cdcInsertsAndUpdates: false
        cdcInsertsOnly: false
        cdcMaxBatchInterval: 0
        cdcMinFileSize: 0
        cdcPath: string
        compressionType: string
        csvDelimiter: string
        csvNoSupValue: string
        csvNullValue: string
        csvRowDelimiter: string
        dataFormat: string
        dataPageSize: 0
        datePartitionDelimiter: string
        datePartitionEnabled: false
        datePartitionSequence: string
        dictPageSizeLimit: 0
        enableStatistics: false
        encodingType: string
        encryptionMode: string
        externalTableDefinition: string
        glueCatalogGeneration: false
        ignoreHeaderRows: 0
        includeOpForFullLoad: false
        maxFileSize: 0
        parquetTimestampInMillisecond: false
        parquetVersion: string
        preserveTransactions: false
        rfc4180: false
        rowGroupLength: 0
        serverSideEncryptionKmsKeyId: string
        serviceAccessRoleArn: string
        timestampColumnName: string
        useCsvNoSupValue: false
        useTaskStartTimeForFullLoadTimestamp: false
    secretsManagerAccessRoleArn: string
    secretsManagerArn: string
    serverName: string
    serviceAccessRole: string
    sslMode: string
    tags:
        string: string
    username: string
Copy

Endpoint Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

The Endpoint resource accepts the following input properties:

EndpointId
This property is required.
Changes to this property will trigger replacement.
string
Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
EndpointType This property is required. string
Type of endpoint. Valid values are source, target.
EngineName This property is required. string
Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, aurora-serverless, aurora-postgresql-serverless,azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift,redshift-serverless, s3, sqlserver, neptune ,sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
CertificateArn string
ARN for the certificate.
DatabaseName string
Name of the endpoint database.
ElasticsearchSettings EndpointElasticsearchSettings
Configuration block for OpenSearch settings. See below.
ExtraConnectionAttributes string
Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
KafkaSettings EndpointKafkaSettings
Configuration block for Kafka settings. See below.
KinesisSettings EndpointKinesisSettings
Configuration block for Kinesis settings. See below.
KmsKeyArn Changes to this property will trigger replacement. string

ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

The following arguments are optional:

MongodbSettings EndpointMongodbSettings
Configuration block for MongoDB settings. See below.
Password string
Password to be used to login to the endpoint database.
PauseReplicationTasks bool
Port int
Port used by the endpoint database.
PostgresSettings EndpointPostgresSettings
Configuration block for Postgres settings. See below.
RedisSettings EndpointRedisSettings
RedshiftSettings EndpointRedshiftSettings
Configuration block for Redshift settings. See below.
S3Settings EndpointS3Settings
(Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.
SecretsManagerAccessRoleArn string

ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.

Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and secrets_manager_arn. Or you can specify clear-text values for username, password , server_name, and port. You can't specify both.

SecretsManagerArn string
Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.
ServerName string
Host name of the server.
ServiceAccessRole string
ARN used by the service access IAM role for dynamodb endpoints.
SslMode string
SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
Tags Dictionary<string, string>
Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
Username string
User name to be used to login to the endpoint database.
EndpointId
This property is required.
Changes to this property will trigger replacement.
string
Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
EndpointType This property is required. string
Type of endpoint. Valid values are source, target.
EngineName This property is required. string
Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, aurora-serverless, aurora-postgresql-serverless,azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift,redshift-serverless, s3, sqlserver, neptune ,sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
CertificateArn string
ARN for the certificate.
DatabaseName string
Name of the endpoint database.
ElasticsearchSettings EndpointElasticsearchSettingsArgs
Configuration block for OpenSearch settings. See below.
ExtraConnectionAttributes string
Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
KafkaSettings EndpointKafkaSettingsArgs
Configuration block for Kafka settings. See below.
KinesisSettings EndpointKinesisSettingsArgs
Configuration block for Kinesis settings. See below.
KmsKeyArn Changes to this property will trigger replacement. string

ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

The following arguments are optional:

MongodbSettings EndpointMongodbSettingsArgs
Configuration block for MongoDB settings. See below.
Password string
Password to be used to login to the endpoint database.
PauseReplicationTasks bool
Port int
Port used by the endpoint database.
PostgresSettings EndpointPostgresSettingsArgs
Configuration block for Postgres settings. See below.
RedisSettings EndpointRedisSettingsArgs
RedshiftSettings EndpointRedshiftSettingsArgs
Configuration block for Redshift settings. See below.
S3Settings EndpointS3SettingsArgs
(Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.
SecretsManagerAccessRoleArn string

ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.

Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and secrets_manager_arn. Or you can specify clear-text values for username, password , server_name, and port. You can't specify both.

SecretsManagerArn string
Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.
ServerName string
Host name of the server.
ServiceAccessRole string
ARN used by the service access IAM role for dynamodb endpoints.
SslMode string
SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
Tags map[string]string
Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
Username string
User name to be used to login to the endpoint database.
endpointId
This property is required.
Changes to this property will trigger replacement.
String
Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
endpointType This property is required. String
Type of endpoint. Valid values are source, target.
engineName This property is required. String
Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, aurora-serverless, aurora-postgresql-serverless,azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift,redshift-serverless, s3, sqlserver, neptune ,sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
certificateArn String
ARN for the certificate.
databaseName String
Name of the endpoint database.
elasticsearchSettings EndpointElasticsearchSettings
Configuration block for OpenSearch settings. See below.
extraConnectionAttributes String
Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
kafkaSettings EndpointKafkaSettings
Configuration block for Kafka settings. See below.
kinesisSettings EndpointKinesisSettings
Configuration block for Kinesis settings. See below.
kmsKeyArn Changes to this property will trigger replacement. String

ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

The following arguments are optional:

mongodbSettings EndpointMongodbSettings
Configuration block for MongoDB settings. See below.
password String
Password to be used to login to the endpoint database.
pauseReplicationTasks Boolean
port Integer
Port used by the endpoint database.
postgresSettings EndpointPostgresSettings
Configuration block for Postgres settings. See below.
redisSettings EndpointRedisSettings
redshiftSettings EndpointRedshiftSettings
Configuration block for Redshift settings. See below.
s3Settings EndpointS3Settings
(Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.
secretsManagerAccessRoleArn String

ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.

Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and secrets_manager_arn. Or you can specify clear-text values for username, password , server_name, and port. You can't specify both.

secretsManagerArn String
Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.
serverName String
Host name of the server.
serviceAccessRole String
ARN used by the service access IAM role for dynamodb endpoints.
sslMode String
SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
tags Map<String,String>
Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
username String
User name to be used to login to the endpoint database.
endpointId
This property is required.
Changes to this property will trigger replacement.
string
Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
endpointType This property is required. string
Type of endpoint. Valid values are source, target.
engineName This property is required. string
Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, aurora-serverless, aurora-postgresql-serverless,azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift,redshift-serverless, s3, sqlserver, neptune ,sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
certificateArn string
ARN for the certificate.
databaseName string
Name of the endpoint database.
elasticsearchSettings EndpointElasticsearchSettings
Configuration block for OpenSearch settings. See below.
extraConnectionAttributes string
Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
kafkaSettings EndpointKafkaSettings
Configuration block for Kafka settings. See below.
kinesisSettings EndpointKinesisSettings
Configuration block for Kinesis settings. See below.
kmsKeyArn Changes to this property will trigger replacement. string

ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

The following arguments are optional:

mongodbSettings EndpointMongodbSettings
Configuration block for MongoDB settings. See below.
password string
Password to be used to login to the endpoint database.
pauseReplicationTasks boolean
port number
Port used by the endpoint database.
postgresSettings EndpointPostgresSettings
Configuration block for Postgres settings. See below.
redisSettings EndpointRedisSettings
redshiftSettings EndpointRedshiftSettings
Configuration block for Redshift settings. See below.
s3Settings EndpointS3Settings
(Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.
secretsManagerAccessRoleArn string

ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.

Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and secrets_manager_arn. Or you can specify clear-text values for username, password , server_name, and port. You can't specify both.

secretsManagerArn string
Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.
serverName string
Host name of the server.
serviceAccessRole string
ARN used by the service access IAM role for dynamodb endpoints.
sslMode string
SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
tags {[key: string]: string}
Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
username string
User name to be used to login to the endpoint database.
endpoint_id
This property is required.
Changes to this property will trigger replacement.
str
Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
endpoint_type This property is required. str
Type of endpoint. Valid values are source, target.
engine_name This property is required. str
Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, aurora-serverless, aurora-postgresql-serverless,azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift,redshift-serverless, s3, sqlserver, neptune ,sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
certificate_arn str
ARN for the certificate.
database_name str
Name of the endpoint database.
elasticsearch_settings EndpointElasticsearchSettingsArgs
Configuration block for OpenSearch settings. See below.
extra_connection_attributes str
Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
kafka_settings EndpointKafkaSettingsArgs
Configuration block for Kafka settings. See below.
kinesis_settings EndpointKinesisSettingsArgs
Configuration block for Kinesis settings. See below.
kms_key_arn Changes to this property will trigger replacement. str

ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

The following arguments are optional:

mongodb_settings EndpointMongodbSettingsArgs
Configuration block for MongoDB settings. See below.
password str
Password to be used to login to the endpoint database.
pause_replication_tasks bool
port int
Port used by the endpoint database.
postgres_settings EndpointPostgresSettingsArgs
Configuration block for Postgres settings. See below.
redis_settings EndpointRedisSettingsArgs
redshift_settings EndpointRedshiftSettingsArgs
Configuration block for Redshift settings. See below.
s3_settings EndpointS3SettingsArgs
(Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.
secrets_manager_access_role_arn str

ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.

Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and secrets_manager_arn. Or you can specify clear-text values for username, password , server_name, and port. You can't specify both.

secrets_manager_arn str
Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.
server_name str
Host name of the server.
service_access_role str
ARN used by the service access IAM role for dynamodb endpoints.
ssl_mode str
SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
tags Mapping[str, str]
Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
username str
User name to be used to login to the endpoint database.
endpointId
This property is required.
Changes to this property will trigger replacement.
String
Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
endpointType This property is required. String
Type of endpoint. Valid values are source, target.
engineName This property is required. String
Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, aurora-serverless, aurora-postgresql-serverless,azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift,redshift-serverless, s3, sqlserver, neptune ,sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
certificateArn String
ARN for the certificate.
databaseName String
Name of the endpoint database.
elasticsearchSettings Property Map
Configuration block for OpenSearch settings. See below.
extraConnectionAttributes String
Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
kafkaSettings Property Map
Configuration block for Kafka settings. See below.
kinesisSettings Property Map
Configuration block for Kinesis settings. See below.
kmsKeyArn Changes to this property will trigger replacement. String

ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

The following arguments are optional:

mongodbSettings Property Map
Configuration block for MongoDB settings. See below.
password String
Password to be used to login to the endpoint database.
pauseReplicationTasks Boolean
port Number
Port used by the endpoint database.
postgresSettings Property Map
Configuration block for Postgres settings. See below.
redisSettings Property Map
redshiftSettings Property Map
Configuration block for Redshift settings. See below.
s3Settings Property Map
(Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.
secretsManagerAccessRoleArn String

ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.

Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and secrets_manager_arn. Or you can specify clear-text values for username, password , server_name, and port. You can't specify both.

secretsManagerArn String
Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.
serverName String
Host name of the server.
serviceAccessRole String
ARN used by the service access IAM role for dynamodb endpoints.
sslMode String
SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
tags Map<String>
Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
username String
User name to be used to login to the endpoint database.

Outputs

All input properties are implicitly available as output properties. Additionally, the Endpoint resource produces the following output properties:

EndpointArn string
ARN for the endpoint.
Id string
The provider-assigned unique ID for this managed resource.
TagsAll Dictionary<string, string>
Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

EndpointArn string
ARN for the endpoint.
Id string
The provider-assigned unique ID for this managed resource.
TagsAll map[string]string
Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

endpointArn String
ARN for the endpoint.
id String
The provider-assigned unique ID for this managed resource.
tagsAll Map<String,String>
Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

endpointArn string
ARN for the endpoint.
id string
The provider-assigned unique ID for this managed resource.
tagsAll {[key: string]: string}
Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

endpoint_arn str
ARN for the endpoint.
id str
The provider-assigned unique ID for this managed resource.
tags_all Mapping[str, str]
Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

endpointArn String
ARN for the endpoint.
id String
The provider-assigned unique ID for this managed resource.
tagsAll Map<String>
Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

Look up Existing Endpoint Resource

Get an existing Endpoint resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: EndpointState, opts?: CustomResourceOptions): Endpoint
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        certificate_arn: Optional[str] = None,
        database_name: Optional[str] = None,
        elasticsearch_settings: Optional[EndpointElasticsearchSettingsArgs] = None,
        endpoint_arn: Optional[str] = None,
        endpoint_id: Optional[str] = None,
        endpoint_type: Optional[str] = None,
        engine_name: Optional[str] = None,
        extra_connection_attributes: Optional[str] = None,
        kafka_settings: Optional[EndpointKafkaSettingsArgs] = None,
        kinesis_settings: Optional[EndpointKinesisSettingsArgs] = None,
        kms_key_arn: Optional[str] = None,
        mongodb_settings: Optional[EndpointMongodbSettingsArgs] = None,
        password: Optional[str] = None,
        pause_replication_tasks: Optional[bool] = None,
        port: Optional[int] = None,
        postgres_settings: Optional[EndpointPostgresSettingsArgs] = None,
        redis_settings: Optional[EndpointRedisSettingsArgs] = None,
        redshift_settings: Optional[EndpointRedshiftSettingsArgs] = None,
        s3_settings: Optional[EndpointS3SettingsArgs] = None,
        secrets_manager_access_role_arn: Optional[str] = None,
        secrets_manager_arn: Optional[str] = None,
        server_name: Optional[str] = None,
        service_access_role: Optional[str] = None,
        ssl_mode: Optional[str] = None,
        tags: Optional[Mapping[str, str]] = None,
        tags_all: Optional[Mapping[str, str]] = None,
        username: Optional[str] = None) -> Endpoint
func GetEndpoint(ctx *Context, name string, id IDInput, state *EndpointState, opts ...ResourceOption) (*Endpoint, error)
public static Endpoint Get(string name, Input<string> id, EndpointState? state, CustomResourceOptions? opts = null)
public static Endpoint get(String name, Output<String> id, EndpointState state, CustomResourceOptions options)
resources:  _:    type: aws:dms:Endpoint    get:      id: ${id}
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
CertificateArn string
ARN for the certificate.
DatabaseName string
Name of the endpoint database.
ElasticsearchSettings EndpointElasticsearchSettings
Configuration block for OpenSearch settings. See below.
EndpointArn string
ARN for the endpoint.
EndpointId Changes to this property will trigger replacement. string
Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
EndpointType string
Type of endpoint. Valid values are source, target.
EngineName string
Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, aurora-serverless, aurora-postgresql-serverless,azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift,redshift-serverless, s3, sqlserver, neptune ,sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
ExtraConnectionAttributes string
Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
KafkaSettings EndpointKafkaSettings
Configuration block for Kafka settings. See below.
KinesisSettings EndpointKinesisSettings
Configuration block for Kinesis settings. See below.
KmsKeyArn Changes to this property will trigger replacement. string

ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

The following arguments are optional:

MongodbSettings EndpointMongodbSettings
Configuration block for MongoDB settings. See below.
Password string
Password to be used to login to the endpoint database.
PauseReplicationTasks bool
Port int
Port used by the endpoint database.
PostgresSettings EndpointPostgresSettings
Configuration block for Postgres settings. See below.
RedisSettings EndpointRedisSettings
RedshiftSettings EndpointRedshiftSettings
Configuration block for Redshift settings. See below.
S3Settings EndpointS3Settings
(Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.
SecretsManagerAccessRoleArn string

ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.

Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and secrets_manager_arn. Or you can specify clear-text values for username, password , server_name, and port. You can't specify both.

SecretsManagerArn string
Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.
ServerName string
Host name of the server.
ServiceAccessRole string
ARN used by the service access IAM role for dynamodb endpoints.
SslMode string
SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
Tags Dictionary<string, string>
Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
TagsAll Dictionary<string, string>
Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

Username string
User name to be used to login to the endpoint database.
CertificateArn string
ARN for the certificate.
DatabaseName string
Name of the endpoint database.
ElasticsearchSettings EndpointElasticsearchSettingsArgs
Configuration block for OpenSearch settings. See below.
EndpointArn string
ARN for the endpoint.
EndpointId Changes to this property will trigger replacement. string
Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
EndpointType string
Type of endpoint. Valid values are source, target.
EngineName string
Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, aurora-serverless, aurora-postgresql-serverless,azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift,redshift-serverless, s3, sqlserver, neptune ,sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
ExtraConnectionAttributes string
Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
KafkaSettings EndpointKafkaSettingsArgs
Configuration block for Kafka settings. See below.
KinesisSettings EndpointKinesisSettingsArgs
Configuration block for Kinesis settings. See below.
KmsKeyArn Changes to this property will trigger replacement. string

ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

The following arguments are optional:

MongodbSettings EndpointMongodbSettingsArgs
Configuration block for MongoDB settings. See below.
Password string
Password to be used to login to the endpoint database.
PauseReplicationTasks bool
Port int
Port used by the endpoint database.
PostgresSettings EndpointPostgresSettingsArgs
Configuration block for Postgres settings. See below.
RedisSettings EndpointRedisSettingsArgs
RedshiftSettings EndpointRedshiftSettingsArgs
Configuration block for Redshift settings. See below.
S3Settings EndpointS3SettingsArgs
(Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.
SecretsManagerAccessRoleArn string

ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.

Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and secrets_manager_arn. Or you can specify clear-text values for username, password , server_name, and port. You can't specify both.

SecretsManagerArn string
Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.
ServerName string
Host name of the server.
ServiceAccessRole string
ARN used by the service access IAM role for dynamodb endpoints.
SslMode string
SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
Tags map[string]string
Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
TagsAll map[string]string
Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

Username string
User name to be used to login to the endpoint database.
certificateArn String
ARN for the certificate.
databaseName String
Name of the endpoint database.
elasticsearchSettings EndpointElasticsearchSettings
Configuration block for OpenSearch settings. See below.
endpointArn String
ARN for the endpoint.
endpointId Changes to this property will trigger replacement. String
Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
endpointType String
Type of endpoint. Valid values are source, target.
engineName String
Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, aurora-serverless, aurora-postgresql-serverless,azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift,redshift-serverless, s3, sqlserver, neptune ,sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
extraConnectionAttributes String
Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
kafkaSettings EndpointKafkaSettings
Configuration block for Kafka settings. See below.
kinesisSettings EndpointKinesisSettings
Configuration block for Kinesis settings. See below.
kmsKeyArn Changes to this property will trigger replacement. String

ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

The following arguments are optional:

mongodbSettings EndpointMongodbSettings
Configuration block for MongoDB settings. See below.
password String
Password to be used to login to the endpoint database.
pauseReplicationTasks Boolean
port Integer
Port used by the endpoint database.
postgresSettings EndpointPostgresSettings
Configuration block for Postgres settings. See below.
redisSettings EndpointRedisSettings
redshiftSettings EndpointRedshiftSettings
Configuration block for Redshift settings. See below.
s3Settings EndpointS3Settings
(Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.
secretsManagerAccessRoleArn String

ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.

Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and secrets_manager_arn. Or you can specify clear-text values for username, password , server_name, and port. You can't specify both.

secretsManagerArn String
Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.
serverName String
Host name of the server.
serviceAccessRole String
ARN used by the service access IAM role for dynamodb endpoints.
sslMode String
SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
tags Map<String,String>
Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
tagsAll Map<String,String>
Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

username String
User name to be used to login to the endpoint database.
certificateArn string
ARN for the certificate.
databaseName string
Name of the endpoint database.
elasticsearchSettings EndpointElasticsearchSettings
Configuration block for OpenSearch settings. See below.
endpointArn string
ARN for the endpoint.
endpointId Changes to this property will trigger replacement. string
Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
endpointType string
Type of endpoint. Valid values are source, target.
engineName string
Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, aurora-serverless, aurora-postgresql-serverless,azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift,redshift-serverless, s3, sqlserver, neptune ,sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
extraConnectionAttributes string
Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
kafkaSettings EndpointKafkaSettings
Configuration block for Kafka settings. See below.
kinesisSettings EndpointKinesisSettings
Configuration block for Kinesis settings. See below.
kmsKeyArn Changes to this property will trigger replacement. string

ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

The following arguments are optional:

mongodbSettings EndpointMongodbSettings
Configuration block for MongoDB settings. See below.
password string
Password to be used to login to the endpoint database.
pauseReplicationTasks boolean
port number
Port used by the endpoint database.
postgresSettings EndpointPostgresSettings
Configuration block for Postgres settings. See below.
redisSettings EndpointRedisSettings
redshiftSettings EndpointRedshiftSettings
Configuration block for Redshift settings. See below.
s3Settings EndpointS3Settings
(Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.
secretsManagerAccessRoleArn string

ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.

Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and secrets_manager_arn. Or you can specify clear-text values for username, password , server_name, and port. You can't specify both.

secretsManagerArn string
Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.
serverName string
Host name of the server.
serviceAccessRole string
ARN used by the service access IAM role for dynamodb endpoints.
sslMode string
SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
tags {[key: string]: string}
Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
tagsAll {[key: string]: string}
Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

username string
User name to be used to login to the endpoint database.
certificate_arn str
ARN for the certificate.
database_name str
Name of the endpoint database.
elasticsearch_settings EndpointElasticsearchSettingsArgs
Configuration block for OpenSearch settings. See below.
endpoint_arn str
ARN for the endpoint.
endpoint_id Changes to this property will trigger replacement. str
Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
endpoint_type str
Type of endpoint. Valid values are source, target.
engine_name str
Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, aurora-serverless, aurora-postgresql-serverless,azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift,redshift-serverless, s3, sqlserver, neptune ,sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
extra_connection_attributes str
Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
kafka_settings EndpointKafkaSettingsArgs
Configuration block for Kafka settings. See below.
kinesis_settings EndpointKinesisSettingsArgs
Configuration block for Kinesis settings. See below.
kms_key_arn Changes to this property will trigger replacement. str

ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

The following arguments are optional:

mongodb_settings EndpointMongodbSettingsArgs
Configuration block for MongoDB settings. See below.
password str
Password to be used to login to the endpoint database.
pause_replication_tasks bool
port int
Port used by the endpoint database.
postgres_settings EndpointPostgresSettingsArgs
Configuration block for Postgres settings. See below.
redis_settings EndpointRedisSettingsArgs
redshift_settings EndpointRedshiftSettingsArgs
Configuration block for Redshift settings. See below.
s3_settings EndpointS3SettingsArgs
(Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.
secrets_manager_access_role_arn str

ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.

Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and secrets_manager_arn. Or you can specify clear-text values for username, password , server_name, and port. You can't specify both.

secrets_manager_arn str
Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.
server_name str
Host name of the server.
service_access_role str
ARN used by the service access IAM role for dynamodb endpoints.
ssl_mode str
SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
tags Mapping[str, str]
Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
tags_all Mapping[str, str]
Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

username str
User name to be used to login to the endpoint database.
certificateArn String
ARN for the certificate.
databaseName String
Name of the endpoint database.
elasticsearchSettings Property Map
Configuration block for OpenSearch settings. See below.
endpointArn String
ARN for the endpoint.
endpointId Changes to this property will trigger replacement. String
Database endpoint identifier. Identifiers must contain from 1 to 255 alphanumeric characters or hyphens, begin with a letter, contain only ASCII letters, digits, and hyphens, not end with a hyphen, and not contain two consecutive hyphens.
endpointType String
Type of endpoint. Valid values are source, target.
engineName String
Type of engine for the endpoint. Valid values are aurora, aurora-postgresql, aurora-serverless, aurora-postgresql-serverless,azuredb, azure-sql-managed-instance, babelfish, db2, db2-zos, docdb, dynamodb, elasticsearch, kafka, kinesis, mariadb, mongodb, mysql, opensearch, oracle, postgres, redshift,redshift-serverless, s3, sqlserver, neptune ,sybase. Please note that some of engine names are available only for target endpoint type (e.g. redshift).
extraConnectionAttributes String
Additional attributes associated with the connection. For available attributes for a source Endpoint, see Sources for data migration. For available attributes for a target Endpoint, see Targets for data migration.
kafkaSettings Property Map
Configuration block for Kafka settings. See below.
kinesisSettings Property Map
Configuration block for Kinesis settings. See below.
kmsKeyArn Changes to this property will trigger replacement. String

ARN for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for kms_key_arn, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region. To encrypt an S3 target with a KMS Key, use the parameter s3_settings.server_side_encryption_kms_key_id. When engine_name is redshift, kms_key_arn is the KMS Key for the Redshift target and the parameter redshift_settings.server_side_encryption_kms_key_id encrypts the S3 intermediate storage.

The following arguments are optional:

mongodbSettings Property Map
Configuration block for MongoDB settings. See below.
password String
Password to be used to login to the endpoint database.
pauseReplicationTasks Boolean
port Number
Port used by the endpoint database.
postgresSettings Property Map
Configuration block for Postgres settings. See below.
redisSettings Property Map
redshiftSettings Property Map
Configuration block for Redshift settings. See below.
s3Settings Property Map
(Deprecated, use the aws.dms.S3Endpoint resource instead) Configuration block for S3 settings. See below.
secretsManagerAccessRoleArn String

ARN of the IAM role that specifies AWS DMS as the trusted entity and has the required permissions to access the value in the Secrets Manager secret referred to by secrets_manager_arn. The role must allow the iam:PassRole action.

Note: You can specify one of two sets of values for these permissions. You can specify the values for this setting and secrets_manager_arn. Or you can specify clear-text values for username, password , server_name, and port. You can't specify both.

secretsManagerArn String
Full ARN, partial ARN, or friendly name of the Secrets Manager secret that contains the endpoint connection details. Supported only when engine_name is aurora, aurora-postgresql, mariadb, mongodb, mysql, oracle, postgres, redshift, or sqlserver.
serverName String
Host name of the server.
serviceAccessRole String
ARN used by the service access IAM role for dynamodb endpoints.
sslMode String
SSL mode to use for the connection. Valid values are none, require, verify-ca, verify-full
tags Map<String>
Map of tags to assign to the resource. If configured with a provider default_tags configuration block present, tags with matching keys will overwrite those defined at the provider-level.
tagsAll Map<String>
Map of tags assigned to the resource, including those inherited from the provider default_tags configuration block.

Deprecated: Please use tags instead.

username String
User name to be used to login to the endpoint database.

Supporting Types

EndpointElasticsearchSettings
, EndpointElasticsearchSettingsArgs

EndpointUri
This property is required.
Changes to this property will trigger replacement.
string
Endpoint for the OpenSearch cluster.
ServiceAccessRoleArn
This property is required.
Changes to this property will trigger replacement.
string
ARN of the IAM Role with permissions to write to the OpenSearch cluster.
ErrorRetryDuration Changes to this property will trigger replacement. int
Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.
FullLoadErrorPercentage Changes to this property will trigger replacement. int
Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.
UseNewMappingType Changes to this property will trigger replacement. bool
Enable to migrate documentation using the documentation type _doc. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value is false.
EndpointUri
This property is required.
Changes to this property will trigger replacement.
string
Endpoint for the OpenSearch cluster.
ServiceAccessRoleArn
This property is required.
Changes to this property will trigger replacement.
string
ARN of the IAM Role with permissions to write to the OpenSearch cluster.
ErrorRetryDuration Changes to this property will trigger replacement. int
Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.
FullLoadErrorPercentage Changes to this property will trigger replacement. int
Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.
UseNewMappingType Changes to this property will trigger replacement. bool
Enable to migrate documentation using the documentation type _doc. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value is false.
endpointUri
This property is required.
Changes to this property will trigger replacement.
String
Endpoint for the OpenSearch cluster.
serviceAccessRoleArn
This property is required.
Changes to this property will trigger replacement.
String
ARN of the IAM Role with permissions to write to the OpenSearch cluster.
errorRetryDuration Changes to this property will trigger replacement. Integer
Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.
fullLoadErrorPercentage Changes to this property will trigger replacement. Integer
Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.
useNewMappingType Changes to this property will trigger replacement. Boolean
Enable to migrate documentation using the documentation type _doc. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value is false.
endpointUri
This property is required.
Changes to this property will trigger replacement.
string
Endpoint for the OpenSearch cluster.
serviceAccessRoleArn
This property is required.
Changes to this property will trigger replacement.
string
ARN of the IAM Role with permissions to write to the OpenSearch cluster.
errorRetryDuration Changes to this property will trigger replacement. number
Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.
fullLoadErrorPercentage Changes to this property will trigger replacement. number
Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.
useNewMappingType Changes to this property will trigger replacement. boolean
Enable to migrate documentation using the documentation type _doc. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value is false.
endpoint_uri
This property is required.
Changes to this property will trigger replacement.
str
Endpoint for the OpenSearch cluster.
service_access_role_arn
This property is required.
Changes to this property will trigger replacement.
str
ARN of the IAM Role with permissions to write to the OpenSearch cluster.
error_retry_duration Changes to this property will trigger replacement. int
Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.
full_load_error_percentage Changes to this property will trigger replacement. int
Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.
use_new_mapping_type Changes to this property will trigger replacement. bool
Enable to migrate documentation using the documentation type _doc. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value is false.
endpointUri
This property is required.
Changes to this property will trigger replacement.
String
Endpoint for the OpenSearch cluster.
serviceAccessRoleArn
This property is required.
Changes to this property will trigger replacement.
String
ARN of the IAM Role with permissions to write to the OpenSearch cluster.
errorRetryDuration Changes to this property will trigger replacement. Number
Maximum number of seconds for which DMS retries failed API requests to the OpenSearch cluster. Default is 300.
fullLoadErrorPercentage Changes to this property will trigger replacement. Number
Maximum percentage of records that can fail to be written before a full load operation stops. Default is 10.
useNewMappingType Changes to this property will trigger replacement. Boolean
Enable to migrate documentation using the documentation type _doc. OpenSearch and an Elasticsearch clusters only support the _doc documentation type in versions 7.x and later. The default value is false.

EndpointKafkaSettings
, EndpointKafkaSettingsArgs

Broker This property is required. string
Kafka broker location. Specify in the form broker-hostname-or-ip:port.
IncludeControlDetails bool
Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.
IncludeNullAndEmpty bool
Include NULL and empty columns for records migrated to the endpoint. Default is false.
IncludePartitionValue bool
Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.
IncludeTableAlterOperations bool
Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.
IncludeTransactionDetails bool
Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.
MessageFormat string
Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).
MessageMaxBytes int
Maximum size in bytes for records created on the endpoint Default is 1,000,000.
NoHexPrefix bool
Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.
PartitionIncludeSchemaTable bool
Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.
SaslMechanism string
For SASL/SSL authentication, AWS DMS supports the scram-sha-512 mechanism by default. AWS DMS versions 3.5.0 and later also support the PLAIN mechanism. To use the PLAIN mechanism, set this parameter to plain.
SaslPassword string
Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
SaslUsername string
Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
SecurityProtocol string
Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.
SslCaCertificateArn string
ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
SslClientCertificateArn string
ARN of the client certificate used to securely connect to a Kafka target endpoint.
SslClientKeyArn string
ARN for the client private key used to securely connect to a Kafka target endpoint.
SslClientKeyPassword string
Password for the client private key used to securely connect to a Kafka target endpoint.
Topic string
Kafka topic for migration. Default is kafka-default-topic.
Broker This property is required. string
Kafka broker location. Specify in the form broker-hostname-or-ip:port.
IncludeControlDetails bool
Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.
IncludeNullAndEmpty bool
Include NULL and empty columns for records migrated to the endpoint. Default is false.
IncludePartitionValue bool
Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.
IncludeTableAlterOperations bool
Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.
IncludeTransactionDetails bool
Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.
MessageFormat string
Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).
MessageMaxBytes int
Maximum size in bytes for records created on the endpoint Default is 1,000,000.
NoHexPrefix bool
Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.
PartitionIncludeSchemaTable bool
Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.
SaslMechanism string
For SASL/SSL authentication, AWS DMS supports the scram-sha-512 mechanism by default. AWS DMS versions 3.5.0 and later also support the PLAIN mechanism. To use the PLAIN mechanism, set this parameter to plain.
SaslPassword string
Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
SaslUsername string
Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
SecurityProtocol string
Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.
SslCaCertificateArn string
ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
SslClientCertificateArn string
ARN of the client certificate used to securely connect to a Kafka target endpoint.
SslClientKeyArn string
ARN for the client private key used to securely connect to a Kafka target endpoint.
SslClientKeyPassword string
Password for the client private key used to securely connect to a Kafka target endpoint.
Topic string
Kafka topic for migration. Default is kafka-default-topic.
broker This property is required. String
Kafka broker location. Specify in the form broker-hostname-or-ip:port.
includeControlDetails Boolean
Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.
includeNullAndEmpty Boolean
Include NULL and empty columns for records migrated to the endpoint. Default is false.
includePartitionValue Boolean
Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.
includeTableAlterOperations Boolean
Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.
includeTransactionDetails Boolean
Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.
messageFormat String
Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).
messageMaxBytes Integer
Maximum size in bytes for records created on the endpoint Default is 1,000,000.
noHexPrefix Boolean
Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.
partitionIncludeSchemaTable Boolean
Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.
saslMechanism String
For SASL/SSL authentication, AWS DMS supports the scram-sha-512 mechanism by default. AWS DMS versions 3.5.0 and later also support the PLAIN mechanism. To use the PLAIN mechanism, set this parameter to plain.
saslPassword String
Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
saslUsername String
Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
securityProtocol String
Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.
sslCaCertificateArn String
ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
sslClientCertificateArn String
ARN of the client certificate used to securely connect to a Kafka target endpoint.
sslClientKeyArn String
ARN for the client private key used to securely connect to a Kafka target endpoint.
sslClientKeyPassword String
Password for the client private key used to securely connect to a Kafka target endpoint.
topic String
Kafka topic for migration. Default is kafka-default-topic.
broker This property is required. string
Kafka broker location. Specify in the form broker-hostname-or-ip:port.
includeControlDetails boolean
Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.
includeNullAndEmpty boolean
Include NULL and empty columns for records migrated to the endpoint. Default is false.
includePartitionValue boolean
Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.
includeTableAlterOperations boolean
Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.
includeTransactionDetails boolean
Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.
messageFormat string
Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).
messageMaxBytes number
Maximum size in bytes for records created on the endpoint Default is 1,000,000.
noHexPrefix boolean
Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.
partitionIncludeSchemaTable boolean
Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.
saslMechanism string
For SASL/SSL authentication, AWS DMS supports the scram-sha-512 mechanism by default. AWS DMS versions 3.5.0 and later also support the PLAIN mechanism. To use the PLAIN mechanism, set this parameter to plain.
saslPassword string
Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
saslUsername string
Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
securityProtocol string
Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.
sslCaCertificateArn string
ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
sslClientCertificateArn string
ARN of the client certificate used to securely connect to a Kafka target endpoint.
sslClientKeyArn string
ARN for the client private key used to securely connect to a Kafka target endpoint.
sslClientKeyPassword string
Password for the client private key used to securely connect to a Kafka target endpoint.
topic string
Kafka topic for migration. Default is kafka-default-topic.
broker This property is required. str
Kafka broker location. Specify in the form broker-hostname-or-ip:port.
include_control_details bool
Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.
include_null_and_empty bool
Include NULL and empty columns for records migrated to the endpoint. Default is false.
include_partition_value bool
Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.
include_table_alter_operations bool
Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.
include_transaction_details bool
Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.
message_format str
Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).
message_max_bytes int
Maximum size in bytes for records created on the endpoint Default is 1,000,000.
no_hex_prefix bool
Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.
partition_include_schema_table bool
Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.
sasl_mechanism str
For SASL/SSL authentication, AWS DMS supports the scram-sha-512 mechanism by default. AWS DMS versions 3.5.0 and later also support the PLAIN mechanism. To use the PLAIN mechanism, set this parameter to plain.
sasl_password str
Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
sasl_username str
Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
security_protocol str
Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.
ssl_ca_certificate_arn str
ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
ssl_client_certificate_arn str
ARN of the client certificate used to securely connect to a Kafka target endpoint.
ssl_client_key_arn str
ARN for the client private key used to securely connect to a Kafka target endpoint.
ssl_client_key_password str
Password for the client private key used to securely connect to a Kafka target endpoint.
topic str
Kafka topic for migration. Default is kafka-default-topic.
broker This property is required. String
Kafka broker location. Specify in the form broker-hostname-or-ip:port.
includeControlDetails Boolean
Shows detailed control information for table definition, column definition, and table and column changes in the Kafka message output. Default is false.
includeNullAndEmpty Boolean
Include NULL and empty columns for records migrated to the endpoint. Default is false.
includePartitionValue Boolean
Shows the partition value within the Kafka message output unless the partition type is schema-table-type. Default is false.
includeTableAlterOperations Boolean
Includes any data definition language (DDL) operations that change the table in the control data, such as rename-table, drop-table, add-column, drop-column, and rename-column. Default is false.
includeTransactionDetails Boolean
Provides detailed transaction information from the source database. This information includes a commit timestamp, a log position, and values for transaction_id, previous transaction_id, and transaction_record_id (the record offset within a transaction). Default is false.
messageFormat String
Output format for the records created on the endpoint. Message format is JSON (default) or JSON_UNFORMATTED (a single line with no tab).
messageMaxBytes Number
Maximum size in bytes for records created on the endpoint Default is 1,000,000.
noHexPrefix Boolean
Set this optional parameter to true to avoid adding a '0x' prefix to raw data in hexadecimal format. For example, by default, AWS DMS adds a '0x' prefix to the LOB column type in hexadecimal format moving from an Oracle source to a Kafka target. Use the no_hex_prefix endpoint setting to enable migration of RAW data type columns without adding the '0x' prefix.
partitionIncludeSchemaTable Boolean
Prefixes schema and table names to partition values, when the partition type is primary-key-type. Doing this increases data distribution among Kafka partitions. For example, suppose that a SysBench schema has thousands of tables and each table has only limited range for a primary key. In this case, the same primary key is sent from thousands of tables to the same partition, which causes throttling. Default is false.
saslMechanism String
For SASL/SSL authentication, AWS DMS supports the scram-sha-512 mechanism by default. AWS DMS versions 3.5.0 and later also support the PLAIN mechanism. To use the PLAIN mechanism, set this parameter to plain.
saslPassword String
Secure password you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
saslUsername String
Secure user name you created when you first set up your MSK cluster to validate a client identity and make an encrypted connection between server and client using SASL-SSL authentication.
securityProtocol String
Set secure connection to a Kafka target endpoint using Transport Layer Security (TLS). Options include ssl-encryption, ssl-authentication, and sasl-ssl. sasl-ssl requires sasl_username and sasl_password.
sslCaCertificateArn String
ARN for the private certificate authority (CA) cert that AWS DMS uses to securely connect to your Kafka target endpoint.
sslClientCertificateArn String
ARN of the client certificate used to securely connect to a Kafka target endpoint.
sslClientKeyArn String
ARN for the client private key used to securely connect to a Kafka target endpoint.
sslClientKeyPassword String
Password for the client private key used to securely connect to a Kafka target endpoint.
topic String
Kafka topic for migration. Default is kafka-default-topic.

EndpointKinesisSettings
, EndpointKinesisSettingsArgs

IncludeControlDetails bool
Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.
IncludeNullAndEmpty bool
Include NULL and empty columns in the target. Default is false.
IncludePartitionValue bool
Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.
IncludeTableAlterOperations bool
Includes any data definition language (DDL) operations that change the table in the control data. Default is false.
IncludeTransactionDetails bool
Provides detailed transaction information from the source database. Default is false.
MessageFormat Changes to this property will trigger replacement. string
Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).
PartitionIncludeSchemaTable bool
Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.
ServiceAccessRoleArn string
ARN of the IAM Role with permissions to write to the Kinesis data stream.
StreamArn string
ARN of the Kinesis data stream.
IncludeControlDetails bool
Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.
IncludeNullAndEmpty bool
Include NULL and empty columns in the target. Default is false.
IncludePartitionValue bool
Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.
IncludeTableAlterOperations bool
Includes any data definition language (DDL) operations that change the table in the control data. Default is false.
IncludeTransactionDetails bool
Provides detailed transaction information from the source database. Default is false.
MessageFormat Changes to this property will trigger replacement. string
Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).
PartitionIncludeSchemaTable bool
Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.
ServiceAccessRoleArn string
ARN of the IAM Role with permissions to write to the Kinesis data stream.
StreamArn string
ARN of the Kinesis data stream.
includeControlDetails Boolean
Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.
includeNullAndEmpty Boolean
Include NULL and empty columns in the target. Default is false.
includePartitionValue Boolean
Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.
includeTableAlterOperations Boolean
Includes any data definition language (DDL) operations that change the table in the control data. Default is false.
includeTransactionDetails Boolean
Provides detailed transaction information from the source database. Default is false.
messageFormat Changes to this property will trigger replacement. String
Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).
partitionIncludeSchemaTable Boolean
Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.
serviceAccessRoleArn String
ARN of the IAM Role with permissions to write to the Kinesis data stream.
streamArn String
ARN of the Kinesis data stream.
includeControlDetails boolean
Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.
includeNullAndEmpty boolean
Include NULL and empty columns in the target. Default is false.
includePartitionValue boolean
Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.
includeTableAlterOperations boolean
Includes any data definition language (DDL) operations that change the table in the control data. Default is false.
includeTransactionDetails boolean
Provides detailed transaction information from the source database. Default is false.
messageFormat Changes to this property will trigger replacement. string
Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).
partitionIncludeSchemaTable boolean
Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.
serviceAccessRoleArn string
ARN of the IAM Role with permissions to write to the Kinesis data stream.
streamArn string
ARN of the Kinesis data stream.
include_control_details bool
Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.
include_null_and_empty bool
Include NULL and empty columns in the target. Default is false.
include_partition_value bool
Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.
include_table_alter_operations bool
Includes any data definition language (DDL) operations that change the table in the control data. Default is false.
include_transaction_details bool
Provides detailed transaction information from the source database. Default is false.
message_format Changes to this property will trigger replacement. str
Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).
partition_include_schema_table bool
Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.
service_access_role_arn str
ARN of the IAM Role with permissions to write to the Kinesis data stream.
stream_arn str
ARN of the Kinesis data stream.
includeControlDetails Boolean
Shows detailed control information for table definition, column definition, and table and column changes in the Kinesis message output. Default is false.
includeNullAndEmpty Boolean
Include NULL and empty columns in the target. Default is false.
includePartitionValue Boolean
Shows the partition value within the Kinesis message output, unless the partition type is schema-table-type. Default is false.
includeTableAlterOperations Boolean
Includes any data definition language (DDL) operations that change the table in the control data. Default is false.
includeTransactionDetails Boolean
Provides detailed transaction information from the source database. Default is false.
messageFormat Changes to this property will trigger replacement. String
Output format for the records created. Default is json. Valid values are json and json-unformatted (a single line with no tab).
partitionIncludeSchemaTable Boolean
Prefixes schema and table names to partition values, when the partition type is primary-key-type. Default is false.
serviceAccessRoleArn String
ARN of the IAM Role with permissions to write to the Kinesis data stream.
streamArn String
ARN of the Kinesis data stream.

EndpointMongodbSettings
, EndpointMongodbSettingsArgs

AuthMechanism string
Authentication mechanism to access the MongoDB source endpoint. Default is default.
AuthSource string
Authentication database name. Not used when auth_type is no. Default is admin.
AuthType string
Authentication type to access the MongoDB source endpoint. Default is password.
DocsToInvestigate string
Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.
ExtractDocId string
Document ID. Use this setting when nesting_level is set to none. Default is false.
NestingLevel string
Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).
AuthMechanism string
Authentication mechanism to access the MongoDB source endpoint. Default is default.
AuthSource string
Authentication database name. Not used when auth_type is no. Default is admin.
AuthType string
Authentication type to access the MongoDB source endpoint. Default is password.
DocsToInvestigate string
Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.
ExtractDocId string
Document ID. Use this setting when nesting_level is set to none. Default is false.
NestingLevel string
Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).
authMechanism String
Authentication mechanism to access the MongoDB source endpoint. Default is default.
authSource String
Authentication database name. Not used when auth_type is no. Default is admin.
authType String
Authentication type to access the MongoDB source endpoint. Default is password.
docsToInvestigate String
Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.
extractDocId String
Document ID. Use this setting when nesting_level is set to none. Default is false.
nestingLevel String
Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).
authMechanism string
Authentication mechanism to access the MongoDB source endpoint. Default is default.
authSource string
Authentication database name. Not used when auth_type is no. Default is admin.
authType string
Authentication type to access the MongoDB source endpoint. Default is password.
docsToInvestigate string
Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.
extractDocId string
Document ID. Use this setting when nesting_level is set to none. Default is false.
nestingLevel string
Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).
auth_mechanism str
Authentication mechanism to access the MongoDB source endpoint. Default is default.
auth_source str
Authentication database name. Not used when auth_type is no. Default is admin.
auth_type str
Authentication type to access the MongoDB source endpoint. Default is password.
docs_to_investigate str
Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.
extract_doc_id str
Document ID. Use this setting when nesting_level is set to none. Default is false.
nesting_level str
Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).
authMechanism String
Authentication mechanism to access the MongoDB source endpoint. Default is default.
authSource String
Authentication database name. Not used when auth_type is no. Default is admin.
authType String
Authentication type to access the MongoDB source endpoint. Default is password.
docsToInvestigate String
Number of documents to preview to determine the document organization. Use this setting when nesting_level is set to one. Default is 1000.
extractDocId String
Document ID. Use this setting when nesting_level is set to none. Default is false.
nestingLevel String
Specifies either document or table mode. Default is none. Valid values are one (table mode) and none (document mode).

EndpointPostgresSettings
, EndpointPostgresSettingsArgs

AfterConnectScript string
For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
BabelfishDatabaseName string
The Babelfish for Aurora PostgreSQL database name for the endpoint.
CaptureDdls bool
To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
DatabaseMode string
Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
DdlArtifactsSchema string
Sets the schema in which the operational DDL database artifacts are created. Default is public.
ExecuteTimeout int
Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is 60.
FailTasksOnLobTruncation bool
When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. Default is false.
HeartbeatEnable bool
The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
HeartbeatFrequency int
Sets the WAL heartbeat frequency (in minutes). Default value is 5.
HeartbeatSchema string
Sets the schema in which the heartbeat artifacts are created. Default value is public.
MapBooleanAsBoolean bool
You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is false.
MapJsonbAsClob bool
Optional When true, DMS migrates JSONB values as CLOB.
MapLongVarcharAs string
Optional When true, DMS migrates LONG values as VARCHAR.
MaxFileSize int
Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is 32,768 KB.
PluginName string
Specifies the plugin to use to create a replication slot. Valid values: pglogical, test_decoding.
SlotName string
Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
AfterConnectScript string
For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
BabelfishDatabaseName string
The Babelfish for Aurora PostgreSQL database name for the endpoint.
CaptureDdls bool
To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
DatabaseMode string
Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
DdlArtifactsSchema string
Sets the schema in which the operational DDL database artifacts are created. Default is public.
ExecuteTimeout int
Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is 60.
FailTasksOnLobTruncation bool
When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. Default is false.
HeartbeatEnable bool
The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
HeartbeatFrequency int
Sets the WAL heartbeat frequency (in minutes). Default value is 5.
HeartbeatSchema string
Sets the schema in which the heartbeat artifacts are created. Default value is public.
MapBooleanAsBoolean bool
You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is false.
MapJsonbAsClob bool
Optional When true, DMS migrates JSONB values as CLOB.
MapLongVarcharAs string
Optional When true, DMS migrates LONG values as VARCHAR.
MaxFileSize int
Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is 32,768 KB.
PluginName string
Specifies the plugin to use to create a replication slot. Valid values: pglogical, test_decoding.
SlotName string
Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
afterConnectScript String
For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
babelfishDatabaseName String
The Babelfish for Aurora PostgreSQL database name for the endpoint.
captureDdls Boolean
To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
databaseMode String
Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
ddlArtifactsSchema String
Sets the schema in which the operational DDL database artifacts are created. Default is public.
executeTimeout Integer
Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is 60.
failTasksOnLobTruncation Boolean
When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. Default is false.
heartbeatEnable Boolean
The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
heartbeatFrequency Integer
Sets the WAL heartbeat frequency (in minutes). Default value is 5.
heartbeatSchema String
Sets the schema in which the heartbeat artifacts are created. Default value is public.
mapBooleanAsBoolean Boolean
You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is false.
mapJsonbAsClob Boolean
Optional When true, DMS migrates JSONB values as CLOB.
mapLongVarcharAs String
Optional When true, DMS migrates LONG values as VARCHAR.
maxFileSize Integer
Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is 32,768 KB.
pluginName String
Specifies the plugin to use to create a replication slot. Valid values: pglogical, test_decoding.
slotName String
Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
afterConnectScript string
For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
babelfishDatabaseName string
The Babelfish for Aurora PostgreSQL database name for the endpoint.
captureDdls boolean
To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
databaseMode string
Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
ddlArtifactsSchema string
Sets the schema in which the operational DDL database artifacts are created. Default is public.
executeTimeout number
Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is 60.
failTasksOnLobTruncation boolean
When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. Default is false.
heartbeatEnable boolean
The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
heartbeatFrequency number
Sets the WAL heartbeat frequency (in minutes). Default value is 5.
heartbeatSchema string
Sets the schema in which the heartbeat artifacts are created. Default value is public.
mapBooleanAsBoolean boolean
You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is false.
mapJsonbAsClob boolean
Optional When true, DMS migrates JSONB values as CLOB.
mapLongVarcharAs string
Optional When true, DMS migrates LONG values as VARCHAR.
maxFileSize number
Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is 32,768 KB.
pluginName string
Specifies the plugin to use to create a replication slot. Valid values: pglogical, test_decoding.
slotName string
Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
after_connect_script str
For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
babelfish_database_name str
The Babelfish for Aurora PostgreSQL database name for the endpoint.
capture_ddls bool
To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
database_mode str
Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
ddl_artifacts_schema str
Sets the schema in which the operational DDL database artifacts are created. Default is public.
execute_timeout int
Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is 60.
fail_tasks_on_lob_truncation bool
When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. Default is false.
heartbeat_enable bool
The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
heartbeat_frequency int
Sets the WAL heartbeat frequency (in minutes). Default value is 5.
heartbeat_schema str
Sets the schema in which the heartbeat artifacts are created. Default value is public.
map_boolean_as_boolean bool
You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is false.
map_jsonb_as_clob bool
Optional When true, DMS migrates JSONB values as CLOB.
map_long_varchar_as str
Optional When true, DMS migrates LONG values as VARCHAR.
max_file_size int
Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is 32,768 KB.
plugin_name str
Specifies the plugin to use to create a replication slot. Valid values: pglogical, test_decoding.
slot_name str
Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.
afterConnectScript String
For use with change data capture (CDC) only, this attribute has AWS DMS bypass foreign keys and user triggers to reduce the time it takes to bulk load data.
babelfishDatabaseName String
The Babelfish for Aurora PostgreSQL database name for the endpoint.
captureDdls Boolean
To capture DDL events, AWS DMS creates various artifacts in the PostgreSQL database when the task starts.
databaseMode String
Specifies the default behavior of the replication's handling of PostgreSQL- compatible endpoints that require some additional configuration, such as Babelfish endpoints.
ddlArtifactsSchema String
Sets the schema in which the operational DDL database artifacts are created. Default is public.
executeTimeout Number
Sets the client statement timeout for the PostgreSQL instance, in seconds. Default value is 60.
failTasksOnLobTruncation Boolean
When set to true, this value causes a task to fail if the actual size of a LOB column is greater than the specified LobMaxSize. Default is false.
heartbeatEnable Boolean
The write-ahead log (WAL) heartbeat feature mimics a dummy transaction. By doing this, it prevents idle logical replication slots from holding onto old WAL logs, which can result in storage full situations on the source.
heartbeatFrequency Number
Sets the WAL heartbeat frequency (in minutes). Default value is 5.
heartbeatSchema String
Sets the schema in which the heartbeat artifacts are created. Default value is public.
mapBooleanAsBoolean Boolean
You can use PostgreSQL endpoint settings to map a boolean as a boolean from your PostgreSQL source to a Amazon Redshift target. Default value is false.
mapJsonbAsClob Boolean
Optional When true, DMS migrates JSONB values as CLOB.
mapLongVarcharAs String
Optional When true, DMS migrates LONG values as VARCHAR.
maxFileSize Number
Specifies the maximum size (in KB) of any .csv file used to transfer data to PostgreSQL. Default is 32,768 KB.
pluginName String
Specifies the plugin to use to create a replication slot. Valid values: pglogical, test_decoding.
slotName String
Sets the name of a previously created logical replication slot for a CDC load of the PostgreSQL source instance.

EndpointRedisSettings
, EndpointRedisSettingsArgs

AuthType This property is required. string
The type of authentication to perform when connecting to a Redis target. Options include none, auth-token, and auth-role. The auth-token option requires an auth_password value to be provided. The auth-role option requires auth_user_name and auth_password values to be provided.
Port This property is required. int
Transmission Control Protocol (TCP) port for the endpoint.
ServerName This property is required. string
Fully qualified domain name of the endpoint.
AuthPassword string
The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
AuthUserName string
The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.
SslCaCertificateArn string
The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
SslSecurityProtocol string
The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.
AuthType This property is required. string
The type of authentication to perform when connecting to a Redis target. Options include none, auth-token, and auth-role. The auth-token option requires an auth_password value to be provided. The auth-role option requires auth_user_name and auth_password values to be provided.
Port This property is required. int
Transmission Control Protocol (TCP) port for the endpoint.
ServerName This property is required. string
Fully qualified domain name of the endpoint.
AuthPassword string
The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
AuthUserName string
The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.
SslCaCertificateArn string
The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
SslSecurityProtocol string
The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.
authType This property is required. String
The type of authentication to perform when connecting to a Redis target. Options include none, auth-token, and auth-role. The auth-token option requires an auth_password value to be provided. The auth-role option requires auth_user_name and auth_password values to be provided.
port This property is required. Integer
Transmission Control Protocol (TCP) port for the endpoint.
serverName This property is required. String
Fully qualified domain name of the endpoint.
authPassword String
The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
authUserName String
The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.
sslCaCertificateArn String
The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
sslSecurityProtocol String
The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.
authType This property is required. string
The type of authentication to perform when connecting to a Redis target. Options include none, auth-token, and auth-role. The auth-token option requires an auth_password value to be provided. The auth-role option requires auth_user_name and auth_password values to be provided.
port This property is required. number
Transmission Control Protocol (TCP) port for the endpoint.
serverName This property is required. string
Fully qualified domain name of the endpoint.
authPassword string
The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
authUserName string
The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.
sslCaCertificateArn string
The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
sslSecurityProtocol string
The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.
auth_type This property is required. str
The type of authentication to perform when connecting to a Redis target. Options include none, auth-token, and auth-role. The auth-token option requires an auth_password value to be provided. The auth-role option requires auth_user_name and auth_password values to be provided.
port This property is required. int
Transmission Control Protocol (TCP) port for the endpoint.
server_name This property is required. str
Fully qualified domain name of the endpoint.
auth_password str
The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
auth_user_name str
The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.
ssl_ca_certificate_arn str
The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
ssl_security_protocol str
The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.
authType This property is required. String
The type of authentication to perform when connecting to a Redis target. Options include none, auth-token, and auth-role. The auth-token option requires an auth_password value to be provided. The auth-role option requires auth_user_name and auth_password values to be provided.
port This property is required. Number
Transmission Control Protocol (TCP) port for the endpoint.
serverName This property is required. String
Fully qualified domain name of the endpoint.
authPassword String
The password provided with the auth-role and auth-token options of the AuthType setting for a Redis target endpoint.
authUserName String
The username provided with the auth-role option of the AuthType setting for a Redis target endpoint.
sslCaCertificateArn String
The Amazon Resource Name (ARN) for the certificate authority (CA) that DMS uses to connect to your Redis target endpoint.
sslSecurityProtocol String
The plaintext option doesn't provide Transport Layer Security (TLS) encryption for traffic between endpoint and database. Options include plaintext, ssl-encryption. The default is ssl-encryption.

EndpointRedshiftSettings
, EndpointRedshiftSettingsArgs

BucketFolder string
Custom S3 Bucket Object prefix for intermediate storage.
BucketName string
Custom S3 Bucket name for intermediate storage.
EncryptionMode string
The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.
ServerSideEncryptionKmsKeyId string
ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
ServiceAccessRoleArn string
Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
BucketFolder string
Custom S3 Bucket Object prefix for intermediate storage.
BucketName string
Custom S3 Bucket name for intermediate storage.
EncryptionMode string
The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.
ServerSideEncryptionKmsKeyId string
ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
ServiceAccessRoleArn string
Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
bucketFolder String
Custom S3 Bucket Object prefix for intermediate storage.
bucketName String
Custom S3 Bucket name for intermediate storage.
encryptionMode String
The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.
serverSideEncryptionKmsKeyId String
ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
serviceAccessRoleArn String
Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
bucketFolder string
Custom S3 Bucket Object prefix for intermediate storage.
bucketName string
Custom S3 Bucket name for intermediate storage.
encryptionMode string
The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.
serverSideEncryptionKmsKeyId string
ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
serviceAccessRoleArn string
Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
bucket_folder str
Custom S3 Bucket Object prefix for intermediate storage.
bucket_name str
Custom S3 Bucket name for intermediate storage.
encryption_mode str
The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.
server_side_encryption_kms_key_id str
ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
service_access_role_arn str
Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.
bucketFolder String
Custom S3 Bucket Object prefix for intermediate storage.
bucketName String
Custom S3 Bucket name for intermediate storage.
encryptionMode String
The server-side encryption mode that you want to encrypt your intermediate .csv object files copied to S3. Defaults to SSE_S3. Valid values are SSE_S3 and SSE_KMS.
serverSideEncryptionKmsKeyId String
ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
serviceAccessRoleArn String
Amazon Resource Name (ARN) of the IAM Role with permissions to read from or write to the S3 Bucket for intermediate storage.

EndpointS3Settings
, EndpointS3SettingsArgs

AddColumnName bool
Whether to add column name information to the .csv output file. Default is false.
BucketFolder string
S3 object prefix.
BucketName string
S3 bucket name.
CannedAclForObjects string
Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
CdcInsertsAndUpdates bool
Whether to write insert and update operations to .csv or .parquet output files. Default is false.
CdcInsertsOnly bool
Whether to write insert operations to .csv or .parquet output files. Default is false.
CdcMaxBatchInterval int
Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.
CdcMinFileSize int
Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.
CdcPath string
Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
CompressionType string
Set to compress target files. Default is NONE. Valid values are GZIP and NONE.
CsvDelimiter string
Delimiter used to separate columns in the source files. Default is ,.
CsvNoSupValue string
String to use for all columns not included in the supplemental log.
CsvNullValue string
String to as null when writing to the target.
CsvRowDelimiter string
Delimiter used to separate rows in the source files. Default is \n.
DataFormat string
Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.
DataPageSize int
Size of one data page in bytes. Default is 1048576 (1 MiB).
DatePartitionDelimiter string
Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.
DatePartitionEnabled bool
Partition S3 bucket folders based on transaction commit dates. Default is false.
DatePartitionSequence string
Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.
DictPageSizeLimit int
Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).
EnableStatistics bool
Whether to enable statistics for Parquet pages and row groups. Default is true.
EncodingType string
Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.
EncryptionMode string
Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3.
ExternalTableDefinition string
JSON document that describes how AWS DMS should interpret the data.
GlueCatalogGeneration bool
Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
IgnoreHeaderRows int
When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.
IncludeOpForFullLoad bool
Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
MaxFileSize int
Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).
ParquetTimestampInMillisecond bool
Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.
ParquetVersion string
Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.
PreserveTransactions bool
Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.
Rfc4180 bool
For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
RowGroupLength int
Number of rows in a row group. Default is 10000.
ServerSideEncryptionKmsKeyId string
ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
ServiceAccessRoleArn string
ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
TimestampColumnName string
Column to add with timestamp information to the endpoint data for an Amazon S3 target.
UseCsvNoSupValue bool
Whether to use csv_no_sup_value for columns not included in the supplemental log.
UseTaskStartTimeForFullLoadTimestamp bool
When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
AddColumnName bool
Whether to add column name information to the .csv output file. Default is false.
BucketFolder string
S3 object prefix.
BucketName string
S3 bucket name.
CannedAclForObjects string
Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
CdcInsertsAndUpdates bool
Whether to write insert and update operations to .csv or .parquet output files. Default is false.
CdcInsertsOnly bool
Whether to write insert operations to .csv or .parquet output files. Default is false.
CdcMaxBatchInterval int
Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.
CdcMinFileSize int
Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.
CdcPath string
Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
CompressionType string
Set to compress target files. Default is NONE. Valid values are GZIP and NONE.
CsvDelimiter string
Delimiter used to separate columns in the source files. Default is ,.
CsvNoSupValue string
String to use for all columns not included in the supplemental log.
CsvNullValue string
String to as null when writing to the target.
CsvRowDelimiter string
Delimiter used to separate rows in the source files. Default is \n.
DataFormat string
Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.
DataPageSize int
Size of one data page in bytes. Default is 1048576 (1 MiB).
DatePartitionDelimiter string
Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.
DatePartitionEnabled bool
Partition S3 bucket folders based on transaction commit dates. Default is false.
DatePartitionSequence string
Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.
DictPageSizeLimit int
Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).
EnableStatistics bool
Whether to enable statistics for Parquet pages and row groups. Default is true.
EncodingType string
Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.
EncryptionMode string
Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3.
ExternalTableDefinition string
JSON document that describes how AWS DMS should interpret the data.
GlueCatalogGeneration bool
Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
IgnoreHeaderRows int
When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.
IncludeOpForFullLoad bool
Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
MaxFileSize int
Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).
ParquetTimestampInMillisecond bool
Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.
ParquetVersion string
Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.
PreserveTransactions bool
Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.
Rfc4180 bool
For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
RowGroupLength int
Number of rows in a row group. Default is 10000.
ServerSideEncryptionKmsKeyId string
ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
ServiceAccessRoleArn string
ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
TimestampColumnName string
Column to add with timestamp information to the endpoint data for an Amazon S3 target.
UseCsvNoSupValue bool
Whether to use csv_no_sup_value for columns not included in the supplemental log.
UseTaskStartTimeForFullLoadTimestamp bool
When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
addColumnName Boolean
Whether to add column name information to the .csv output file. Default is false.
bucketFolder String
S3 object prefix.
bucketName String
S3 bucket name.
cannedAclForObjects String
Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
cdcInsertsAndUpdates Boolean
Whether to write insert and update operations to .csv or .parquet output files. Default is false.
cdcInsertsOnly Boolean
Whether to write insert operations to .csv or .parquet output files. Default is false.
cdcMaxBatchInterval Integer
Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.
cdcMinFileSize Integer
Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.
cdcPath String
Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
compressionType String
Set to compress target files. Default is NONE. Valid values are GZIP and NONE.
csvDelimiter String
Delimiter used to separate columns in the source files. Default is ,.
csvNoSupValue String
String to use for all columns not included in the supplemental log.
csvNullValue String
String to as null when writing to the target.
csvRowDelimiter String
Delimiter used to separate rows in the source files. Default is \n.
dataFormat String
Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.
dataPageSize Integer
Size of one data page in bytes. Default is 1048576 (1 MiB).
datePartitionDelimiter String
Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.
datePartitionEnabled Boolean
Partition S3 bucket folders based on transaction commit dates. Default is false.
datePartitionSequence String
Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.
dictPageSizeLimit Integer
Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).
enableStatistics Boolean
Whether to enable statistics for Parquet pages and row groups. Default is true.
encodingType String
Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.
encryptionMode String
Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3.
externalTableDefinition String
JSON document that describes how AWS DMS should interpret the data.
glueCatalogGeneration Boolean
Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
ignoreHeaderRows Integer
When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.
includeOpForFullLoad Boolean
Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
maxFileSize Integer
Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).
parquetTimestampInMillisecond Boolean
Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.
parquetVersion String
Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.
preserveTransactions Boolean
Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.
rfc4180 Boolean
For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
rowGroupLength Integer
Number of rows in a row group. Default is 10000.
serverSideEncryptionKmsKeyId String
ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
serviceAccessRoleArn String
ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
timestampColumnName String
Column to add with timestamp information to the endpoint data for an Amazon S3 target.
useCsvNoSupValue Boolean
Whether to use csv_no_sup_value for columns not included in the supplemental log.
useTaskStartTimeForFullLoadTimestamp Boolean
When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
addColumnName boolean
Whether to add column name information to the .csv output file. Default is false.
bucketFolder string
S3 object prefix.
bucketName string
S3 bucket name.
cannedAclForObjects string
Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
cdcInsertsAndUpdates boolean
Whether to write insert and update operations to .csv or .parquet output files. Default is false.
cdcInsertsOnly boolean
Whether to write insert operations to .csv or .parquet output files. Default is false.
cdcMaxBatchInterval number
Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.
cdcMinFileSize number
Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.
cdcPath string
Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
compressionType string
Set to compress target files. Default is NONE. Valid values are GZIP and NONE.
csvDelimiter string
Delimiter used to separate columns in the source files. Default is ,.
csvNoSupValue string
String to use for all columns not included in the supplemental log.
csvNullValue string
String to as null when writing to the target.
csvRowDelimiter string
Delimiter used to separate rows in the source files. Default is \n.
dataFormat string
Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.
dataPageSize number
Size of one data page in bytes. Default is 1048576 (1 MiB).
datePartitionDelimiter string
Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.
datePartitionEnabled boolean
Partition S3 bucket folders based on transaction commit dates. Default is false.
datePartitionSequence string
Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.
dictPageSizeLimit number
Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).
enableStatistics boolean
Whether to enable statistics for Parquet pages and row groups. Default is true.
encodingType string
Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.
encryptionMode string
Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3.
externalTableDefinition string
JSON document that describes how AWS DMS should interpret the data.
glueCatalogGeneration boolean
Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
ignoreHeaderRows number
When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.
includeOpForFullLoad boolean
Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
maxFileSize number
Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).
parquetTimestampInMillisecond boolean
Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.
parquetVersion string
Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.
preserveTransactions boolean
Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.
rfc4180 boolean
For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
rowGroupLength number
Number of rows in a row group. Default is 10000.
serverSideEncryptionKmsKeyId string
ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
serviceAccessRoleArn string
ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
timestampColumnName string
Column to add with timestamp information to the endpoint data for an Amazon S3 target.
useCsvNoSupValue boolean
Whether to use csv_no_sup_value for columns not included in the supplemental log.
useTaskStartTimeForFullLoadTimestamp boolean
When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
add_column_name bool
Whether to add column name information to the .csv output file. Default is false.
bucket_folder str
S3 object prefix.
bucket_name str
S3 bucket name.
canned_acl_for_objects str
Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
cdc_inserts_and_updates bool
Whether to write insert and update operations to .csv or .parquet output files. Default is false.
cdc_inserts_only bool
Whether to write insert operations to .csv or .parquet output files. Default is false.
cdc_max_batch_interval int
Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.
cdc_min_file_size int
Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.
cdc_path str
Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
compression_type str
Set to compress target files. Default is NONE. Valid values are GZIP and NONE.
csv_delimiter str
Delimiter used to separate columns in the source files. Default is ,.
csv_no_sup_value str
String to use for all columns not included in the supplemental log.
csv_null_value str
String to as null when writing to the target.
csv_row_delimiter str
Delimiter used to separate rows in the source files. Default is \n.
data_format str
Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.
data_page_size int
Size of one data page in bytes. Default is 1048576 (1 MiB).
date_partition_delimiter str
Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.
date_partition_enabled bool
Partition S3 bucket folders based on transaction commit dates. Default is false.
date_partition_sequence str
Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.
dict_page_size_limit int
Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).
enable_statistics bool
Whether to enable statistics for Parquet pages and row groups. Default is true.
encoding_type str
Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.
encryption_mode str
Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3.
external_table_definition str
JSON document that describes how AWS DMS should interpret the data.
glue_catalog_generation bool
Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
ignore_header_rows int
When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.
include_op_for_full_load bool
Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
max_file_size int
Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).
parquet_timestamp_in_millisecond bool
Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.
parquet_version str
Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.
preserve_transactions bool
Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.
rfc4180 bool
For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
row_group_length int
Number of rows in a row group. Default is 10000.
server_side_encryption_kms_key_id str
ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
service_access_role_arn str
ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
timestamp_column_name str
Column to add with timestamp information to the endpoint data for an Amazon S3 target.
use_csv_no_sup_value bool
Whether to use csv_no_sup_value for columns not included in the supplemental log.
use_task_start_time_for_full_load_timestamp bool
When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.
addColumnName Boolean
Whether to add column name information to the .csv output file. Default is false.
bucketFolder String
S3 object prefix.
bucketName String
S3 bucket name.
cannedAclForObjects String
Predefined (canned) access control list for objects created in an S3 bucket. Valid values include none, private, public-read, public-read-write, authenticated-read, aws-exec-read, bucket-owner-read, and bucket-owner-full-control. Default is none.
cdcInsertsAndUpdates Boolean
Whether to write insert and update operations to .csv or .parquet output files. Default is false.
cdcInsertsOnly Boolean
Whether to write insert operations to .csv or .parquet output files. Default is false.
cdcMaxBatchInterval Number
Maximum length of the interval, defined in seconds, after which to output a file to Amazon S3. Default is 60.
cdcMinFileSize Number
Minimum file size condition as defined in kilobytes to output a file to Amazon S3. Default is 32000. NOTE: Previously, this setting was measured in megabytes but now represents kilobytes. Update configurations accordingly.
cdcPath String
Folder path of CDC files. For an S3 source, this setting is required if a task captures change data; otherwise, it's optional. If cdc_path is set, AWS DMS reads CDC files from this path and replicates the data changes to the target endpoint. Supported in AWS DMS versions 3.4.2 and later.
compressionType String
Set to compress target files. Default is NONE. Valid values are GZIP and NONE.
csvDelimiter String
Delimiter used to separate columns in the source files. Default is ,.
csvNoSupValue String
String to use for all columns not included in the supplemental log.
csvNullValue String
String to as null when writing to the target.
csvRowDelimiter String
Delimiter used to separate rows in the source files. Default is \n.
dataFormat String
Output format for the files that AWS DMS uses to create S3 objects. Valid values are csv and parquet. Default is csv.
dataPageSize Number
Size of one data page in bytes. Default is 1048576 (1 MiB).
datePartitionDelimiter String
Date separating delimiter to use during folder partitioning. Valid values are SLASH, UNDERSCORE, DASH, and NONE. Default is SLASH.
datePartitionEnabled Boolean
Partition S3 bucket folders based on transaction commit dates. Default is false.
datePartitionSequence String
Date format to use during folder partitioning. Use this parameter when date_partition_enabled is set to true. Valid values are YYYYMMDD, YYYYMMDDHH, YYYYMM, MMYYYYDD, and DDMMYYYY. Default is YYYYMMDD.
dictPageSizeLimit Number
Maximum size in bytes of an encoded dictionary page of a column. Default is 1048576 (1 MiB).
enableStatistics Boolean
Whether to enable statistics for Parquet pages and row groups. Default is true.
encodingType String
Type of encoding to use. Value values are rle_dictionary, plain, and plain_dictionary. Default is rle_dictionary.
encryptionMode String
Server-side encryption mode that you want to encrypt your .csv or .parquet object files copied to S3. Valid values are SSE_S3 and SSE_KMS. Default is SSE_S3.
externalTableDefinition String
JSON document that describes how AWS DMS should interpret the data.
glueCatalogGeneration Boolean
Whether to integrate AWS Glue Data Catalog with an Amazon S3 target. See Using AWS Glue Data Catalog with an Amazon S3 target for AWS DMS for more information. Default is false.
ignoreHeaderRows Number
When this value is set to 1, DMS ignores the first row header in a .csv file. Default is 0.
includeOpForFullLoad Boolean
Whether to enable a full load to write INSERT operations to the .csv output files only to indicate how the rows were added to the source database. Default is false.
maxFileSize Number
Maximum size (in KB) of any .csv file to be created while migrating to an S3 target during full load. Valid values are from 1 to 1048576. Default is 1048576 (1 GB).
parquetTimestampInMillisecond Boolean
Specifies the precision of any TIMESTAMP column values written to an S3 object file in .parquet format. Default is false.
parquetVersion String
Version of the .parquet file format. Default is parquet-1-0. Valid values are parquet-1-0 and parquet-2-0.
preserveTransactions Boolean
Whether DMS saves the transaction order for a CDC load on the S3 target specified by cdc_path. Default is false.
rfc4180 Boolean
For an S3 source, whether each leading double quotation mark has to be followed by an ending double quotation mark. Default is true.
rowGroupLength Number
Number of rows in a row group. Default is 10000.
serverSideEncryptionKmsKeyId String
ARN or Id of KMS Key to use when encryption_mode is SSE_KMS.
serviceAccessRoleArn String
ARN of the IAM Role with permissions to read from or write to the S3 Bucket.
timestampColumnName String
Column to add with timestamp information to the endpoint data for an Amazon S3 target.
useCsvNoSupValue Boolean
Whether to use csv_no_sup_value for columns not included in the supplemental log.
useTaskStartTimeForFullLoadTimestamp Boolean
When set to true, uses the task start time as the timestamp column value instead of the time data is written to target. For full load, when set to true, each row of the timestamp column contains the task start time. For CDC loads, each row of the timestamp column contains the transaction commit time. When set to false, the full load timestamp in the timestamp column increments with the time data arrives at the target. Default is false.

Import

Using pulumi import, import endpoints using the endpoint_id. For example:

$ pulumi import aws:dms/endpoint:Endpoint test test-dms-endpoint-tf
Copy

To learn more about importing existing cloud resources, see Importing resources.

Package Details

Repository
AWS Classic pulumi/pulumi-aws
License
Apache-2.0
Notes
This Pulumi package is based on the aws Terraform Provider.