1. Packages
  2. Confluent Provider
  3. API Docs
  4. KafkaClusterConfig
Confluent v2.23.0 published on Tuesday, Apr 1, 2025 by Pulumi

confluentcloud.KafkaClusterConfig

Explore with Pulumi AI

General Availability

confluentcloud.KafkaClusterConfig provides a Kafka cluster config resource that enables updating configs on a Dedicated Kafka cluster on Confluent Cloud.

Example Usage

Option #1: Manage multiple Kafka clusters in the same Pulumi Stack

import * as pulumi from "@pulumi/pulumi";
import * as confluentcloud from "@pulumi/confluentcloud";

const orders = new confluentcloud.KafkaClusterConfig("orders", {
    kafkaCluster: {
        id: dedicated.id,
    },
    restEndpoint: dedicated.restEndpoint,
    config: {
        "auto.create.topics.enable": "true",
        "log.retention.ms": "604800123",
    },
    credentials: {
        key: app_manager_kafka_api_key.id,
        secret: app_manager_kafka_api_key.secret,
    },
});
Copy
import pulumi
import pulumi_confluentcloud as confluentcloud

orders = confluentcloud.KafkaClusterConfig("orders",
    kafka_cluster={
        "id": dedicated["id"],
    },
    rest_endpoint=dedicated["restEndpoint"],
    config={
        "auto.create.topics.enable": "true",
        "log.retention.ms": "604800123",
    },
    credentials={
        "key": app_manager_kafka_api_key["id"],
        "secret": app_manager_kafka_api_key["secret"],
    })
Copy
package main

import (
	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := confluentcloud.NewKafkaClusterConfig(ctx, "orders", &confluentcloud.KafkaClusterConfigArgs{
			KafkaCluster: &confluentcloud.KafkaClusterConfigKafkaClusterArgs{
				Id: pulumi.Any(dedicated.Id),
			},
			RestEndpoint: pulumi.Any(dedicated.RestEndpoint),
			Config: pulumi.StringMap{
				"auto.create.topics.enable": pulumi.String("true"),
				"log.retention.ms":          pulumi.String("604800123"),
			},
			Credentials: &confluentcloud.KafkaClusterConfigCredentialsArgs{
				Key:    pulumi.Any(app_manager_kafka_api_key.Id),
				Secret: pulumi.Any(app_manager_kafka_api_key.Secret),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using ConfluentCloud = Pulumi.ConfluentCloud;

return await Deployment.RunAsync(() => 
{
    var orders = new ConfluentCloud.KafkaClusterConfig("orders", new()
    {
        KafkaCluster = new ConfluentCloud.Inputs.KafkaClusterConfigKafkaClusterArgs
        {
            Id = dedicated.Id,
        },
        RestEndpoint = dedicated.RestEndpoint,
        Config = 
        {
            { "auto.create.topics.enable", "true" },
            { "log.retention.ms", "604800123" },
        },
        Credentials = new ConfluentCloud.Inputs.KafkaClusterConfigCredentialsArgs
        {
            Key = app_manager_kafka_api_key.Id,
            Secret = app_manager_kafka_api_key.Secret,
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.confluentcloud.KafkaClusterConfig;
import com.pulumi.confluentcloud.KafkaClusterConfigArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterConfigKafkaClusterArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterConfigCredentialsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var orders = new KafkaClusterConfig("orders", KafkaClusterConfigArgs.builder()
            .kafkaCluster(KafkaClusterConfigKafkaClusterArgs.builder()
                .id(dedicated.id())
                .build())
            .restEndpoint(dedicated.restEndpoint())
            .config(Map.ofEntries(
                Map.entry("auto.create.topics.enable", "true"),
                Map.entry("log.retention.ms", "604800123")
            ))
            .credentials(KafkaClusterConfigCredentialsArgs.builder()
                .key(app_manager_kafka_api_key.id())
                .secret(app_manager_kafka_api_key.secret())
                .build())
            .build());

    }
}
Copy
resources:
  orders:
    type: confluentcloud:KafkaClusterConfig
    properties:
      kafkaCluster:
        id: ${dedicated.id}
      restEndpoint: ${dedicated.restEndpoint}
      config:
        auto.create.topics.enable: 'true'
        log.retention.ms: '604800123'
      credentials:
        key: ${["app-manager-kafka-api-key"].id}
        secret: ${["app-manager-kafka-api-key"].secret}
Copy

Option #2: Manage a single Kafka cluster in the same Pulumi Stack

import * as pulumi from "@pulumi/pulumi";
import * as confluentcloud from "@pulumi/confluentcloud";

const orders = new confluentcloud.KafkaClusterConfig("orders", {config: {
    "auto.create.topics.enable": "true",
    "log.retention.ms": "604800123",
}});
Copy
import pulumi
import pulumi_confluentcloud as confluentcloud

orders = confluentcloud.KafkaClusterConfig("orders", config={
    "auto.create.topics.enable": "true",
    "log.retention.ms": "604800123",
})
Copy
package main

import (
	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		_, err := confluentcloud.NewKafkaClusterConfig(ctx, "orders", &confluentcloud.KafkaClusterConfigArgs{
			Config: pulumi.StringMap{
				"auto.create.topics.enable": pulumi.String("true"),
				"log.retention.ms":          pulumi.String("604800123"),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using ConfluentCloud = Pulumi.ConfluentCloud;

return await Deployment.RunAsync(() => 
{
    var orders = new ConfluentCloud.KafkaClusterConfig("orders", new()
    {
        Config = 
        {
            { "auto.create.topics.enable", "true" },
            { "log.retention.ms", "604800123" },
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.confluentcloud.KafkaClusterConfig;
import com.pulumi.confluentcloud.KafkaClusterConfigArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        var orders = new KafkaClusterConfig("orders", KafkaClusterConfigArgs.builder()
            .config(Map.ofEntries(
                Map.entry("auto.create.topics.enable", "true"),
                Map.entry("log.retention.ms", "604800123")
            ))
            .build());

    }
}
Copy
resources:
  orders:
    type: confluentcloud:KafkaClusterConfig
    properties:
      config:
        auto.create.topics.enable: 'true'
        log.retention.ms: '604800123'
Copy

Create KafkaClusterConfig Resource

Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

Constructor syntax

new KafkaClusterConfig(name: string, args: KafkaClusterConfigArgs, opts?: CustomResourceOptions);
@overload
def KafkaClusterConfig(resource_name: str,
                       args: KafkaClusterConfigArgs,
                       opts: Optional[ResourceOptions] = None)

@overload
def KafkaClusterConfig(resource_name: str,
                       opts: Optional[ResourceOptions] = None,
                       config: Optional[Mapping[str, str]] = None,
                       credentials: Optional[KafkaClusterConfigCredentialsArgs] = None,
                       kafka_cluster: Optional[KafkaClusterConfigKafkaClusterArgs] = None,
                       rest_endpoint: Optional[str] = None)
func NewKafkaClusterConfig(ctx *Context, name string, args KafkaClusterConfigArgs, opts ...ResourceOption) (*KafkaClusterConfig, error)
public KafkaClusterConfig(string name, KafkaClusterConfigArgs args, CustomResourceOptions? opts = null)
public KafkaClusterConfig(String name, KafkaClusterConfigArgs args)
public KafkaClusterConfig(String name, KafkaClusterConfigArgs args, CustomResourceOptions options)
type: confluentcloud:KafkaClusterConfig
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

Parameters

name This property is required. string
The unique name of the resource.
args This property is required. KafkaClusterConfigArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name This property is required. str
The unique name of the resource.
args This property is required. KafkaClusterConfigArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name This property is required. string
The unique name of the resource.
args This property is required. KafkaClusterConfigArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name This property is required. string
The unique name of the resource.
args This property is required. KafkaClusterConfigArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name This property is required. String
The unique name of the resource.
args This property is required. KafkaClusterConfigArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Constructor example

The following reference example uses placeholder values for all input properties.

var kafkaClusterConfigResource = new ConfluentCloud.KafkaClusterConfig("kafkaClusterConfigResource", new()
{
    Config = 
    {
        { "string", "string" },
    },
    Credentials = new ConfluentCloud.Inputs.KafkaClusterConfigCredentialsArgs
    {
        Key = "string",
        Secret = "string",
    },
    KafkaCluster = new ConfluentCloud.Inputs.KafkaClusterConfigKafkaClusterArgs
    {
        Id = "string",
    },
    RestEndpoint = "string",
});
Copy
example, err := confluentcloud.NewKafkaClusterConfig(ctx, "kafkaClusterConfigResource", &confluentcloud.KafkaClusterConfigArgs{
	Config: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	Credentials: &confluentcloud.KafkaClusterConfigCredentialsArgs{
		Key:    pulumi.String("string"),
		Secret: pulumi.String("string"),
	},
	KafkaCluster: &confluentcloud.KafkaClusterConfigKafkaClusterArgs{
		Id: pulumi.String("string"),
	},
	RestEndpoint: pulumi.String("string"),
})
Copy
var kafkaClusterConfigResource = new KafkaClusterConfig("kafkaClusterConfigResource", KafkaClusterConfigArgs.builder()
    .config(Map.of("string", "string"))
    .credentials(KafkaClusterConfigCredentialsArgs.builder()
        .key("string")
        .secret("string")
        .build())
    .kafkaCluster(KafkaClusterConfigKafkaClusterArgs.builder()
        .id("string")
        .build())
    .restEndpoint("string")
    .build());
Copy
kafka_cluster_config_resource = confluentcloud.KafkaClusterConfig("kafkaClusterConfigResource",
    config={
        "string": "string",
    },
    credentials={
        "key": "string",
        "secret": "string",
    },
    kafka_cluster={
        "id": "string",
    },
    rest_endpoint="string")
Copy
const kafkaClusterConfigResource = new confluentcloud.KafkaClusterConfig("kafkaClusterConfigResource", {
    config: {
        string: "string",
    },
    credentials: {
        key: "string",
        secret: "string",
    },
    kafkaCluster: {
        id: "string",
    },
    restEndpoint: "string",
});
Copy
type: confluentcloud:KafkaClusterConfig
properties:
    config:
        string: string
    credentials:
        key: string
        secret: string
    kafkaCluster:
        id: string
    restEndpoint: string
Copy

KafkaClusterConfig Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

The KafkaClusterConfig resource accepts the following input properties:

Config This property is required. Dictionary<string, string>
The custom cluster settings to set:
Credentials Pulumi.ConfluentCloud.Inputs.KafkaClusterConfigCredentials
The Cluster API Credentials.
KafkaCluster Changes to this property will trigger replacement. Pulumi.ConfluentCloud.Inputs.KafkaClusterConfigKafkaCluster
RestEndpoint Changes to this property will trigger replacement. string
The REST endpoint of the Dedicated Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).
Config This property is required. map[string]string
The custom cluster settings to set:
Credentials KafkaClusterConfigCredentialsArgs
The Cluster API Credentials.
KafkaCluster Changes to this property will trigger replacement. KafkaClusterConfigKafkaClusterArgs
RestEndpoint Changes to this property will trigger replacement. string
The REST endpoint of the Dedicated Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).
config This property is required. Map<String,String>
The custom cluster settings to set:
credentials KafkaClusterConfigCredentials
The Cluster API Credentials.
kafkaCluster Changes to this property will trigger replacement. KafkaClusterConfigKafkaCluster
restEndpoint Changes to this property will trigger replacement. String
The REST endpoint of the Dedicated Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).
config This property is required. {[key: string]: string}
The custom cluster settings to set:
credentials KafkaClusterConfigCredentials
The Cluster API Credentials.
kafkaCluster Changes to this property will trigger replacement. KafkaClusterConfigKafkaCluster
restEndpoint Changes to this property will trigger replacement. string
The REST endpoint of the Dedicated Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).
config This property is required. Mapping[str, str]
The custom cluster settings to set:
credentials KafkaClusterConfigCredentialsArgs
The Cluster API Credentials.
kafka_cluster Changes to this property will trigger replacement. KafkaClusterConfigKafkaClusterArgs
rest_endpoint Changes to this property will trigger replacement. str
The REST endpoint of the Dedicated Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).
config This property is required. Map<String>
The custom cluster settings to set:
credentials Property Map
The Cluster API Credentials.
kafkaCluster Changes to this property will trigger replacement. Property Map
restEndpoint Changes to this property will trigger replacement. String
The REST endpoint of the Dedicated Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).

Outputs

All input properties are implicitly available as output properties. Additionally, the KafkaClusterConfig resource produces the following output properties:

Id string
The provider-assigned unique ID for this managed resource.
Id string
The provider-assigned unique ID for this managed resource.
id String
The provider-assigned unique ID for this managed resource.
id string
The provider-assigned unique ID for this managed resource.
id str
The provider-assigned unique ID for this managed resource.
id String
The provider-assigned unique ID for this managed resource.

Look up Existing KafkaClusterConfig Resource

Get an existing KafkaClusterConfig resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: KafkaClusterConfigState, opts?: CustomResourceOptions): KafkaClusterConfig
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        config: Optional[Mapping[str, str]] = None,
        credentials: Optional[KafkaClusterConfigCredentialsArgs] = None,
        kafka_cluster: Optional[KafkaClusterConfigKafkaClusterArgs] = None,
        rest_endpoint: Optional[str] = None) -> KafkaClusterConfig
func GetKafkaClusterConfig(ctx *Context, name string, id IDInput, state *KafkaClusterConfigState, opts ...ResourceOption) (*KafkaClusterConfig, error)
public static KafkaClusterConfig Get(string name, Input<string> id, KafkaClusterConfigState? state, CustomResourceOptions? opts = null)
public static KafkaClusterConfig get(String name, Output<String> id, KafkaClusterConfigState state, CustomResourceOptions options)
resources:  _:    type: confluentcloud:KafkaClusterConfig    get:      id: ${id}
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
Config Dictionary<string, string>
The custom cluster settings to set:
Credentials Pulumi.ConfluentCloud.Inputs.KafkaClusterConfigCredentials
The Cluster API Credentials.
KafkaCluster Changes to this property will trigger replacement. Pulumi.ConfluentCloud.Inputs.KafkaClusterConfigKafkaCluster
RestEndpoint Changes to this property will trigger replacement. string
The REST endpoint of the Dedicated Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).
Config map[string]string
The custom cluster settings to set:
Credentials KafkaClusterConfigCredentialsArgs
The Cluster API Credentials.
KafkaCluster Changes to this property will trigger replacement. KafkaClusterConfigKafkaClusterArgs
RestEndpoint Changes to this property will trigger replacement. string
The REST endpoint of the Dedicated Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).
config Map<String,String>
The custom cluster settings to set:
credentials KafkaClusterConfigCredentials
The Cluster API Credentials.
kafkaCluster Changes to this property will trigger replacement. KafkaClusterConfigKafkaCluster
restEndpoint Changes to this property will trigger replacement. String
The REST endpoint of the Dedicated Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).
config {[key: string]: string}
The custom cluster settings to set:
credentials KafkaClusterConfigCredentials
The Cluster API Credentials.
kafkaCluster Changes to this property will trigger replacement. KafkaClusterConfigKafkaCluster
restEndpoint Changes to this property will trigger replacement. string
The REST endpoint of the Dedicated Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).
config Mapping[str, str]
The custom cluster settings to set:
credentials KafkaClusterConfigCredentialsArgs
The Cluster API Credentials.
kafka_cluster Changes to this property will trigger replacement. KafkaClusterConfigKafkaClusterArgs
rest_endpoint Changes to this property will trigger replacement. str
The REST endpoint of the Dedicated Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).
config Map<String>
The custom cluster settings to set:
credentials Property Map
The Cluster API Credentials.
kafkaCluster Changes to this property will trigger replacement. Property Map
restEndpoint Changes to this property will trigger replacement. String
The REST endpoint of the Dedicated Kafka cluster, for example, https://pkc-00000.us-central1.gcp.confluent.cloud:443).

Supporting Types

KafkaClusterConfigCredentials
, KafkaClusterConfigCredentialsArgs

Key This property is required. string
The Kafka API Key.
Secret This property is required. string

The Kafka API Secret.

Note: A Kafka API key consists of a key and a secret. Kafka API keys are required to interact with Kafka clusters in Confluent Cloud. Each Kafka API key is valid for one specific Kafka cluster.

Note: Use Option #2 to simplify the key rotation process. When using Option #1, to rotate a Kafka API key, create a new Kafka API key, update the credentials block in all configuration files to use the new Kafka API key, run pulumi up -target="confluent_kafka_cluster_config.orders", and remove the old Kafka API key. Alternatively, in case the old Kafka API Key was deleted already, you might need to run pulumi preview -refresh=false -target="confluent_kafka_cluster_config.orders" -out=rotate-kafka-api-key and pulumi up rotate-kafka-api-key instead.

Key This property is required. string
The Kafka API Key.
Secret This property is required. string

The Kafka API Secret.

Note: A Kafka API key consists of a key and a secret. Kafka API keys are required to interact with Kafka clusters in Confluent Cloud. Each Kafka API key is valid for one specific Kafka cluster.

Note: Use Option #2 to simplify the key rotation process. When using Option #1, to rotate a Kafka API key, create a new Kafka API key, update the credentials block in all configuration files to use the new Kafka API key, run pulumi up -target="confluent_kafka_cluster_config.orders", and remove the old Kafka API key. Alternatively, in case the old Kafka API Key was deleted already, you might need to run pulumi preview -refresh=false -target="confluent_kafka_cluster_config.orders" -out=rotate-kafka-api-key and pulumi up rotate-kafka-api-key instead.

key This property is required. String
The Kafka API Key.
secret This property is required. String

The Kafka API Secret.

Note: A Kafka API key consists of a key and a secret. Kafka API keys are required to interact with Kafka clusters in Confluent Cloud. Each Kafka API key is valid for one specific Kafka cluster.

Note: Use Option #2 to simplify the key rotation process. When using Option #1, to rotate a Kafka API key, create a new Kafka API key, update the credentials block in all configuration files to use the new Kafka API key, run pulumi up -target="confluent_kafka_cluster_config.orders", and remove the old Kafka API key. Alternatively, in case the old Kafka API Key was deleted already, you might need to run pulumi preview -refresh=false -target="confluent_kafka_cluster_config.orders" -out=rotate-kafka-api-key and pulumi up rotate-kafka-api-key instead.

key This property is required. string
The Kafka API Key.
secret This property is required. string

The Kafka API Secret.

Note: A Kafka API key consists of a key and a secret. Kafka API keys are required to interact with Kafka clusters in Confluent Cloud. Each Kafka API key is valid for one specific Kafka cluster.

Note: Use Option #2 to simplify the key rotation process. When using Option #1, to rotate a Kafka API key, create a new Kafka API key, update the credentials block in all configuration files to use the new Kafka API key, run pulumi up -target="confluent_kafka_cluster_config.orders", and remove the old Kafka API key. Alternatively, in case the old Kafka API Key was deleted already, you might need to run pulumi preview -refresh=false -target="confluent_kafka_cluster_config.orders" -out=rotate-kafka-api-key and pulumi up rotate-kafka-api-key instead.

key This property is required. str
The Kafka API Key.
secret This property is required. str

The Kafka API Secret.

Note: A Kafka API key consists of a key and a secret. Kafka API keys are required to interact with Kafka clusters in Confluent Cloud. Each Kafka API key is valid for one specific Kafka cluster.

Note: Use Option #2 to simplify the key rotation process. When using Option #1, to rotate a Kafka API key, create a new Kafka API key, update the credentials block in all configuration files to use the new Kafka API key, run pulumi up -target="confluent_kafka_cluster_config.orders", and remove the old Kafka API key. Alternatively, in case the old Kafka API Key was deleted already, you might need to run pulumi preview -refresh=false -target="confluent_kafka_cluster_config.orders" -out=rotate-kafka-api-key and pulumi up rotate-kafka-api-key instead.

key This property is required. String
The Kafka API Key.
secret This property is required. String

The Kafka API Secret.

Note: A Kafka API key consists of a key and a secret. Kafka API keys are required to interact with Kafka clusters in Confluent Cloud. Each Kafka API key is valid for one specific Kafka cluster.

Note: Use Option #2 to simplify the key rotation process. When using Option #1, to rotate a Kafka API key, create a new Kafka API key, update the credentials block in all configuration files to use the new Kafka API key, run pulumi up -target="confluent_kafka_cluster_config.orders", and remove the old Kafka API key. Alternatively, in case the old Kafka API Key was deleted already, you might need to run pulumi preview -refresh=false -target="confluent_kafka_cluster_config.orders" -out=rotate-kafka-api-key and pulumi up rotate-kafka-api-key instead.

KafkaClusterConfigKafkaCluster
, KafkaClusterConfigKafkaClusterArgs

Id
This property is required.
Changes to this property will trigger replacement.
string
The ID of the Dedicated Kafka cluster, for example, lkc-abc123.
Id
This property is required.
Changes to this property will trigger replacement.
string
The ID of the Dedicated Kafka cluster, for example, lkc-abc123.
id
This property is required.
Changes to this property will trigger replacement.
String
The ID of the Dedicated Kafka cluster, for example, lkc-abc123.
id
This property is required.
Changes to this property will trigger replacement.
string
The ID of the Dedicated Kafka cluster, for example, lkc-abc123.
id
This property is required.
Changes to this property will trigger replacement.
str
The ID of the Dedicated Kafka cluster, for example, lkc-abc123.
id
This property is required.
Changes to this property will trigger replacement.
String
The ID of the Dedicated Kafka cluster, for example, lkc-abc123.

Import

You can import a Kafka cluster config by using the Kafka cluster ID, for example:

Option #1: Manage multiple Kafka clusters in the same Pulumi Stack

$ export IMPORT_KAFKA_API_KEY="<kafka_api_key>"

$ export IMPORT_KAFKA_API_SECRET="<kafka_api_secret>"

$ export IMPORT_KAFKA_REST_ENDPOINT="<kafka_rest_endpoint>"

$ pulumi import confluentcloud:index/kafkaClusterConfig:KafkaClusterConfig test lkc-abc123
Copy

Option #2: Manage a single Kafka cluster in the same Pulumi Stack

$ pulumi import confluentcloud:index/kafkaClusterConfig:KafkaClusterConfig test lkc-abc123
Copy

!> Warning: Do not forget to delete terminal command history afterwards for security purposes.

To learn more about importing existing cloud resources, see Importing resources.

Package Details

Repository
Confluent Cloud pulumi/pulumi-confluentcloud
License
Apache-2.0
Notes
This Pulumi package is based on the confluent Terraform Provider.