confluentcloud.KafkaCluster
Explore with Pulumi AI
Example Usage
Example Kafka clusters on AWS
import * as pulumi from "@pulumi/pulumi";
import * as confluentcloud from "@pulumi/confluentcloud";
const development = new confluentcloud.Environment("development", {displayName: "Development"});
const basic = new confluentcloud.KafkaCluster("basic", {
    displayName: "basic_kafka_cluster",
    availability: "SINGLE_ZONE",
    cloud: "AWS",
    region: "us-east-2",
    basic: {},
    environment: {
        id: development.id,
    },
});
const standard = new confluentcloud.KafkaCluster("standard", {
    displayName: "standard_kafka_cluster",
    availability: "SINGLE_ZONE",
    cloud: "AWS",
    region: "us-east-2",
    standard: {},
    environment: {
        id: development.id,
    },
});
const enterprise = new confluentcloud.KafkaCluster("enterprise", {
    enterprises: [{}],
    displayName: "enterprise_kafka_cluster",
    availability: "HIGH",
    cloud: "AWS",
    region: "us-east-2",
    environment: {
        id: development.id,
    },
});
const dedicated = new confluentcloud.KafkaCluster("dedicated", {
    displayName: "dedicated_kafka_cluster",
    availability: "MULTI_ZONE",
    cloud: "AWS",
    region: "us-east-2",
    dedicated: {
        cku: 2,
    },
    environment: {
        id: development.id,
    },
});
const freight = new confluentcloud.KafkaCluster("freight", {
    freights: [{}],
    displayName: "freight_kafka_cluster",
    availability: "HIGH",
    cloud: "AWS",
    region: "us-east-1",
    environment: {
        id: staging.id,
    },
});
import pulumi
import pulumi_confluentcloud as confluentcloud
development = confluentcloud.Environment("development", display_name="Development")
basic = confluentcloud.KafkaCluster("basic",
    display_name="basic_kafka_cluster",
    availability="SINGLE_ZONE",
    cloud="AWS",
    region="us-east-2",
    basic={},
    environment={
        "id": development.id,
    })
standard = confluentcloud.KafkaCluster("standard",
    display_name="standard_kafka_cluster",
    availability="SINGLE_ZONE",
    cloud="AWS",
    region="us-east-2",
    standard={},
    environment={
        "id": development.id,
    })
enterprise = confluentcloud.KafkaCluster("enterprise",
    enterprises=[{}],
    display_name="enterprise_kafka_cluster",
    availability="HIGH",
    cloud="AWS",
    region="us-east-2",
    environment={
        "id": development.id,
    })
dedicated = confluentcloud.KafkaCluster("dedicated",
    display_name="dedicated_kafka_cluster",
    availability="MULTI_ZONE",
    cloud="AWS",
    region="us-east-2",
    dedicated={
        "cku": 2,
    },
    environment={
        "id": development.id,
    })
freight = confluentcloud.KafkaCluster("freight",
    freights=[{}],
    display_name="freight_kafka_cluster",
    availability="HIGH",
    cloud="AWS",
    region="us-east-1",
    environment={
        "id": staging["id"],
    })
package main
import (
	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		development, err := confluentcloud.NewEnvironment(ctx, "development", &confluentcloud.EnvironmentArgs{
			DisplayName: pulumi.String("Development"),
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "basic", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("basic_kafka_cluster"),
			Availability: pulumi.String("SINGLE_ZONE"),
			Cloud:        pulumi.String("AWS"),
			Region:       pulumi.String("us-east-2"),
			Basic:        &confluentcloud.KafkaClusterBasicArgs{},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "standard", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("standard_kafka_cluster"),
			Availability: pulumi.String("SINGLE_ZONE"),
			Cloud:        pulumi.String("AWS"),
			Region:       pulumi.String("us-east-2"),
			Standard:     &confluentcloud.KafkaClusterStandardArgs{},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "enterprise", &confluentcloud.KafkaClusterArgs{
			Enterprises: confluentcloud.KafkaClusterEnterpriseArray{
				&confluentcloud.KafkaClusterEnterpriseArgs{},
			},
			DisplayName:  pulumi.String("enterprise_kafka_cluster"),
			Availability: pulumi.String("HIGH"),
			Cloud:        pulumi.String("AWS"),
			Region:       pulumi.String("us-east-2"),
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "dedicated", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("dedicated_kafka_cluster"),
			Availability: pulumi.String("MULTI_ZONE"),
			Cloud:        pulumi.String("AWS"),
			Region:       pulumi.String("us-east-2"),
			Dedicated: &confluentcloud.KafkaClusterDedicatedArgs{
				Cku: pulumi.Int(2),
			},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "freight", &confluentcloud.KafkaClusterArgs{
			Freights: confluentcloud.KafkaClusterFreightArray{
				&confluentcloud.KafkaClusterFreightArgs{},
			},
			DisplayName:  pulumi.String("freight_kafka_cluster"),
			Availability: pulumi.String("HIGH"),
			Cloud:        pulumi.String("AWS"),
			Region:       pulumi.String("us-east-1"),
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: pulumi.Any(staging.Id),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using ConfluentCloud = Pulumi.ConfluentCloud;
return await Deployment.RunAsync(() => 
{
    var development = new ConfluentCloud.Environment("development", new()
    {
        DisplayName = "Development",
    });
    var basic = new ConfluentCloud.KafkaCluster("basic", new()
    {
        DisplayName = "basic_kafka_cluster",
        Availability = "SINGLE_ZONE",
        Cloud = "AWS",
        Region = "us-east-2",
        Basic = null,
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });
    var standard = new ConfluentCloud.KafkaCluster("standard", new()
    {
        DisplayName = "standard_kafka_cluster",
        Availability = "SINGLE_ZONE",
        Cloud = "AWS",
        Region = "us-east-2",
        Standard = null,
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });
    var enterprise = new ConfluentCloud.KafkaCluster("enterprise", new()
    {
        Enterprises = new[]
        {
            null,
        },
        DisplayName = "enterprise_kafka_cluster",
        Availability = "HIGH",
        Cloud = "AWS",
        Region = "us-east-2",
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });
    var dedicated = new ConfluentCloud.KafkaCluster("dedicated", new()
    {
        DisplayName = "dedicated_kafka_cluster",
        Availability = "MULTI_ZONE",
        Cloud = "AWS",
        Region = "us-east-2",
        Dedicated = new ConfluentCloud.Inputs.KafkaClusterDedicatedArgs
        {
            Cku = 2,
        },
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });
    var freight = new ConfluentCloud.KafkaCluster("freight", new()
    {
        Freights = new[]
        {
            null,
        },
        DisplayName = "freight_kafka_cluster",
        Availability = "HIGH",
        Cloud = "AWS",
        Region = "us-east-1",
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = staging.Id,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.confluentcloud.Environment;
import com.pulumi.confluentcloud.EnvironmentArgs;
import com.pulumi.confluentcloud.KafkaCluster;
import com.pulumi.confluentcloud.KafkaClusterArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterBasicArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterEnvironmentArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterStandardArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterEnterpriseArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterDedicatedArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterFreightArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var development = new Environment("development", EnvironmentArgs.builder()
            .displayName("Development")
            .build());
        var basic = new KafkaCluster("basic", KafkaClusterArgs.builder()
            .displayName("basic_kafka_cluster")
            .availability("SINGLE_ZONE")
            .cloud("AWS")
            .region("us-east-2")
            .basic()
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());
        var standard = new KafkaCluster("standard", KafkaClusterArgs.builder()
            .displayName("standard_kafka_cluster")
            .availability("SINGLE_ZONE")
            .cloud("AWS")
            .region("us-east-2")
            .standard()
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());
        var enterprise = new KafkaCluster("enterprise", KafkaClusterArgs.builder()
            .enterprises()
            .displayName("enterprise_kafka_cluster")
            .availability("HIGH")
            .cloud("AWS")
            .region("us-east-2")
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());
        var dedicated = new KafkaCluster("dedicated", KafkaClusterArgs.builder()
            .displayName("dedicated_kafka_cluster")
            .availability("MULTI_ZONE")
            .cloud("AWS")
            .region("us-east-2")
            .dedicated(KafkaClusterDedicatedArgs.builder()
                .cku(2)
                .build())
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());
        var freight = new KafkaCluster("freight", KafkaClusterArgs.builder()
            .freights()
            .displayName("freight_kafka_cluster")
            .availability("HIGH")
            .cloud("AWS")
            .region("us-east-1")
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(staging.id())
                .build())
            .build());
    }
}
resources:
  development:
    type: confluentcloud:Environment
    properties:
      displayName: Development
  basic:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: basic_kafka_cluster
      availability: SINGLE_ZONE
      cloud: AWS
      region: us-east-2
      basic: {}
      environment:
        id: ${development.id}
  standard:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: standard_kafka_cluster
      availability: SINGLE_ZONE
      cloud: AWS
      region: us-east-2
      standard: {}
      environment:
        id: ${development.id}
  enterprise:
    type: confluentcloud:KafkaCluster
    properties:
      enterprises:
        - {}
      displayName: enterprise_kafka_cluster
      availability: HIGH
      cloud: AWS
      region: us-east-2
      environment:
        id: ${development.id}
  dedicated:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: dedicated_kafka_cluster
      availability: MULTI_ZONE
      cloud: AWS
      region: us-east-2
      dedicated:
        cku: 2
      environment:
        id: ${development.id}
  freight:
    type: confluentcloud:KafkaCluster
    properties:
      freights:
        - {}
      displayName: freight_kafka_cluster
      availability: HIGH
      cloud: AWS
      region: us-east-1
      environment:
        id: ${staging.id}
Example Kafka clusters on Azure
import * as pulumi from "@pulumi/pulumi";
import * as confluentcloud from "@pulumi/confluentcloud";
const development = new confluentcloud.Environment("development", {displayName: "Development"});
const basic = new confluentcloud.KafkaCluster("basic", {
    displayName: "basic_kafka_cluster",
    availability: "SINGLE_ZONE",
    cloud: "AZURE",
    region: "centralus",
    basic: {},
    environment: {
        id: development.id,
    },
});
const standard = new confluentcloud.KafkaCluster("standard", {
    displayName: "standard_kafka_cluster",
    availability: "SINGLE_ZONE",
    cloud: "AZURE",
    region: "centralus",
    standard: {},
    environment: {
        id: development.id,
    },
});
const enterprise = new confluentcloud.KafkaCluster("enterprise", {
    enterprises: [{}],
    displayName: "enterprise_kafka_cluster",
    availability: "HIGH",
    cloud: "AZURE",
    region: "centralus",
    environment: {
        id: development.id,
    },
});
const dedicated = new confluentcloud.KafkaCluster("dedicated", {
    displayName: "dedicated_kafka_cluster",
    availability: "MULTI_ZONE",
    cloud: "AZURE",
    region: "centralus",
    dedicated: {
        cku: 2,
    },
    environment: {
        id: development.id,
    },
});
import pulumi
import pulumi_confluentcloud as confluentcloud
development = confluentcloud.Environment("development", display_name="Development")
basic = confluentcloud.KafkaCluster("basic",
    display_name="basic_kafka_cluster",
    availability="SINGLE_ZONE",
    cloud="AZURE",
    region="centralus",
    basic={},
    environment={
        "id": development.id,
    })
standard = confluentcloud.KafkaCluster("standard",
    display_name="standard_kafka_cluster",
    availability="SINGLE_ZONE",
    cloud="AZURE",
    region="centralus",
    standard={},
    environment={
        "id": development.id,
    })
enterprise = confluentcloud.KafkaCluster("enterprise",
    enterprises=[{}],
    display_name="enterprise_kafka_cluster",
    availability="HIGH",
    cloud="AZURE",
    region="centralus",
    environment={
        "id": development.id,
    })
dedicated = confluentcloud.KafkaCluster("dedicated",
    display_name="dedicated_kafka_cluster",
    availability="MULTI_ZONE",
    cloud="AZURE",
    region="centralus",
    dedicated={
        "cku": 2,
    },
    environment={
        "id": development.id,
    })
package main
import (
	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		development, err := confluentcloud.NewEnvironment(ctx, "development", &confluentcloud.EnvironmentArgs{
			DisplayName: pulumi.String("Development"),
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "basic", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("basic_kafka_cluster"),
			Availability: pulumi.String("SINGLE_ZONE"),
			Cloud:        pulumi.String("AZURE"),
			Region:       pulumi.String("centralus"),
			Basic:        &confluentcloud.KafkaClusterBasicArgs{},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "standard", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("standard_kafka_cluster"),
			Availability: pulumi.String("SINGLE_ZONE"),
			Cloud:        pulumi.String("AZURE"),
			Region:       pulumi.String("centralus"),
			Standard:     &confluentcloud.KafkaClusterStandardArgs{},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "enterprise", &confluentcloud.KafkaClusterArgs{
			Enterprises: confluentcloud.KafkaClusterEnterpriseArray{
				&confluentcloud.KafkaClusterEnterpriseArgs{},
			},
			DisplayName:  pulumi.String("enterprise_kafka_cluster"),
			Availability: pulumi.String("HIGH"),
			Cloud:        pulumi.String("AZURE"),
			Region:       pulumi.String("centralus"),
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "dedicated", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("dedicated_kafka_cluster"),
			Availability: pulumi.String("MULTI_ZONE"),
			Cloud:        pulumi.String("AZURE"),
			Region:       pulumi.String("centralus"),
			Dedicated: &confluentcloud.KafkaClusterDedicatedArgs{
				Cku: pulumi.Int(2),
			},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using ConfluentCloud = Pulumi.ConfluentCloud;
return await Deployment.RunAsync(() => 
{
    var development = new ConfluentCloud.Environment("development", new()
    {
        DisplayName = "Development",
    });
    var basic = new ConfluentCloud.KafkaCluster("basic", new()
    {
        DisplayName = "basic_kafka_cluster",
        Availability = "SINGLE_ZONE",
        Cloud = "AZURE",
        Region = "centralus",
        Basic = null,
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });
    var standard = new ConfluentCloud.KafkaCluster("standard", new()
    {
        DisplayName = "standard_kafka_cluster",
        Availability = "SINGLE_ZONE",
        Cloud = "AZURE",
        Region = "centralus",
        Standard = null,
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });
    var enterprise = new ConfluentCloud.KafkaCluster("enterprise", new()
    {
        Enterprises = new[]
        {
            null,
        },
        DisplayName = "enterprise_kafka_cluster",
        Availability = "HIGH",
        Cloud = "AZURE",
        Region = "centralus",
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });
    var dedicated = new ConfluentCloud.KafkaCluster("dedicated", new()
    {
        DisplayName = "dedicated_kafka_cluster",
        Availability = "MULTI_ZONE",
        Cloud = "AZURE",
        Region = "centralus",
        Dedicated = new ConfluentCloud.Inputs.KafkaClusterDedicatedArgs
        {
            Cku = 2,
        },
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.confluentcloud.Environment;
import com.pulumi.confluentcloud.EnvironmentArgs;
import com.pulumi.confluentcloud.KafkaCluster;
import com.pulumi.confluentcloud.KafkaClusterArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterBasicArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterEnvironmentArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterStandardArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterEnterpriseArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterDedicatedArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var development = new Environment("development", EnvironmentArgs.builder()
            .displayName("Development")
            .build());
        var basic = new KafkaCluster("basic", KafkaClusterArgs.builder()
            .displayName("basic_kafka_cluster")
            .availability("SINGLE_ZONE")
            .cloud("AZURE")
            .region("centralus")
            .basic()
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());
        var standard = new KafkaCluster("standard", KafkaClusterArgs.builder()
            .displayName("standard_kafka_cluster")
            .availability("SINGLE_ZONE")
            .cloud("AZURE")
            .region("centralus")
            .standard()
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());
        var enterprise = new KafkaCluster("enterprise", KafkaClusterArgs.builder()
            .enterprises()
            .displayName("enterprise_kafka_cluster")
            .availability("HIGH")
            .cloud("AZURE")
            .region("centralus")
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());
        var dedicated = new KafkaCluster("dedicated", KafkaClusterArgs.builder()
            .displayName("dedicated_kafka_cluster")
            .availability("MULTI_ZONE")
            .cloud("AZURE")
            .region("centralus")
            .dedicated(KafkaClusterDedicatedArgs.builder()
                .cku(2)
                .build())
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());
    }
}
resources:
  development:
    type: confluentcloud:Environment
    properties:
      displayName: Development
  basic:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: basic_kafka_cluster
      availability: SINGLE_ZONE
      cloud: AZURE
      region: centralus
      basic: {}
      environment:
        id: ${development.id}
  standard:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: standard_kafka_cluster
      availability: SINGLE_ZONE
      cloud: AZURE
      region: centralus
      standard: {}
      environment:
        id: ${development.id}
  enterprise:
    type: confluentcloud:KafkaCluster
    properties:
      enterprises:
        - {}
      displayName: enterprise_kafka_cluster
      availability: HIGH
      cloud: AZURE
      region: centralus
      environment:
        id: ${development.id}
  dedicated:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: dedicated_kafka_cluster
      availability: MULTI_ZONE
      cloud: AZURE
      region: centralus
      dedicated:
        cku: 2
      environment:
        id: ${development.id}
Example Kafka clusters on GCP
import * as pulumi from "@pulumi/pulumi";
import * as confluentcloud from "@pulumi/confluentcloud";
const development = new confluentcloud.Environment("development", {displayName: "Development"});
const basic = new confluentcloud.KafkaCluster("basic", {
    displayName: "basic_kafka_cluster",
    availability: "SINGLE_ZONE",
    cloud: "GCP",
    region: "us-central1",
    basic: {},
    environment: {
        id: development.id,
    },
});
const standard = new confluentcloud.KafkaCluster("standard", {
    displayName: "standard_kafka_cluster",
    availability: "SINGLE_ZONE",
    cloud: "GCP",
    region: "us-central1",
    standard: {},
    environment: {
        id: development.id,
    },
});
const dedicated = new confluentcloud.KafkaCluster("dedicated", {
    displayName: "dedicated_kafka_cluster",
    availability: "MULTI_ZONE",
    cloud: "GCP",
    region: "us-central1",
    dedicated: {
        cku: 2,
    },
    environment: {
        id: development.id,
    },
});
import pulumi
import pulumi_confluentcloud as confluentcloud
development = confluentcloud.Environment("development", display_name="Development")
basic = confluentcloud.KafkaCluster("basic",
    display_name="basic_kafka_cluster",
    availability="SINGLE_ZONE",
    cloud="GCP",
    region="us-central1",
    basic={},
    environment={
        "id": development.id,
    })
standard = confluentcloud.KafkaCluster("standard",
    display_name="standard_kafka_cluster",
    availability="SINGLE_ZONE",
    cloud="GCP",
    region="us-central1",
    standard={},
    environment={
        "id": development.id,
    })
dedicated = confluentcloud.KafkaCluster("dedicated",
    display_name="dedicated_kafka_cluster",
    availability="MULTI_ZONE",
    cloud="GCP",
    region="us-central1",
    dedicated={
        "cku": 2,
    },
    environment={
        "id": development.id,
    })
package main
import (
	"github.com/pulumi/pulumi-confluentcloud/sdk/v2/go/confluentcloud"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		development, err := confluentcloud.NewEnvironment(ctx, "development", &confluentcloud.EnvironmentArgs{
			DisplayName: pulumi.String("Development"),
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "basic", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("basic_kafka_cluster"),
			Availability: pulumi.String("SINGLE_ZONE"),
			Cloud:        pulumi.String("GCP"),
			Region:       pulumi.String("us-central1"),
			Basic:        &confluentcloud.KafkaClusterBasicArgs{},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "standard", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("standard_kafka_cluster"),
			Availability: pulumi.String("SINGLE_ZONE"),
			Cloud:        pulumi.String("GCP"),
			Region:       pulumi.String("us-central1"),
			Standard:     &confluentcloud.KafkaClusterStandardArgs{},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		_, err = confluentcloud.NewKafkaCluster(ctx, "dedicated", &confluentcloud.KafkaClusterArgs{
			DisplayName:  pulumi.String("dedicated_kafka_cluster"),
			Availability: pulumi.String("MULTI_ZONE"),
			Cloud:        pulumi.String("GCP"),
			Region:       pulumi.String("us-central1"),
			Dedicated: &confluentcloud.KafkaClusterDedicatedArgs{
				Cku: pulumi.Int(2),
			},
			Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
				Id: development.ID(),
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using ConfluentCloud = Pulumi.ConfluentCloud;
return await Deployment.RunAsync(() => 
{
    var development = new ConfluentCloud.Environment("development", new()
    {
        DisplayName = "Development",
    });
    var basic = new ConfluentCloud.KafkaCluster("basic", new()
    {
        DisplayName = "basic_kafka_cluster",
        Availability = "SINGLE_ZONE",
        Cloud = "GCP",
        Region = "us-central1",
        Basic = null,
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });
    var standard = new ConfluentCloud.KafkaCluster("standard", new()
    {
        DisplayName = "standard_kafka_cluster",
        Availability = "SINGLE_ZONE",
        Cloud = "GCP",
        Region = "us-central1",
        Standard = null,
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });
    var dedicated = new ConfluentCloud.KafkaCluster("dedicated", new()
    {
        DisplayName = "dedicated_kafka_cluster",
        Availability = "MULTI_ZONE",
        Cloud = "GCP",
        Region = "us-central1",
        Dedicated = new ConfluentCloud.Inputs.KafkaClusterDedicatedArgs
        {
            Cku = 2,
        },
        Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
        {
            Id = development.Id,
        },
    });
});
package generated_program;
import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.confluentcloud.Environment;
import com.pulumi.confluentcloud.EnvironmentArgs;
import com.pulumi.confluentcloud.KafkaCluster;
import com.pulumi.confluentcloud.KafkaClusterArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterBasicArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterEnvironmentArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterStandardArgs;
import com.pulumi.confluentcloud.inputs.KafkaClusterDedicatedArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }
    public static void stack(Context ctx) {
        var development = new Environment("development", EnvironmentArgs.builder()
            .displayName("Development")
            .build());
        var basic = new KafkaCluster("basic", KafkaClusterArgs.builder()
            .displayName("basic_kafka_cluster")
            .availability("SINGLE_ZONE")
            .cloud("GCP")
            .region("us-central1")
            .basic()
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());
        var standard = new KafkaCluster("standard", KafkaClusterArgs.builder()
            .displayName("standard_kafka_cluster")
            .availability("SINGLE_ZONE")
            .cloud("GCP")
            .region("us-central1")
            .standard()
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());
        var dedicated = new KafkaCluster("dedicated", KafkaClusterArgs.builder()
            .displayName("dedicated_kafka_cluster")
            .availability("MULTI_ZONE")
            .cloud("GCP")
            .region("us-central1")
            .dedicated(KafkaClusterDedicatedArgs.builder()
                .cku(2)
                .build())
            .environment(KafkaClusterEnvironmentArgs.builder()
                .id(development.id())
                .build())
            .build());
    }
}
resources:
  development:
    type: confluentcloud:Environment
    properties:
      displayName: Development
  basic:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: basic_kafka_cluster
      availability: SINGLE_ZONE
      cloud: GCP
      region: us-central1
      basic: {}
      environment:
        id: ${development.id}
  standard:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: standard_kafka_cluster
      availability: SINGLE_ZONE
      cloud: GCP
      region: us-central1
      standard: {}
      environment:
        id: ${development.id}
  dedicated:
    type: confluentcloud:KafkaCluster
    properties:
      displayName: dedicated_kafka_cluster
      availability: MULTI_ZONE
      cloud: GCP
      region: us-central1
      dedicated:
        cku: 2
      environment:
        id: ${development.id}
Getting Started
The following end-to-end examples might help to get started with confluentcloud.KafkaCluster resource:
- basic-kafka-acls: Basic Kafka cluster with authorization using ACLs
- basic-kafka-acls-with-alias: Basic Kafka cluster with authorization using ACLs
- standard-kafka-acls: Standard Kafka cluster with authorization using ACLs
- standard-kafka-rbac: Standard Kafka cluster with authorization using RBAC
- dedicated-public-kafka-acls: Dedicated Kafka cluster that is accessible over the public internet with authorization using ACLs
- dedicated-public-kafka-rbac: Dedicated Kafka cluster that is accessible over the public internet with authorization using RBAC
- dedicated-privatelink-aws-kafka-acls: Dedicated Kafka cluster on AWS that is accessible via PrivateLink connections with authorization using ACLs
- dedicated-privatelink-aws-kafka-rbac: Dedicated Kafka cluster on AWS that is accessible via PrivateLink connections with authorization using RBAC
- dedicated-privatelink-azure-kafka-rbac: Dedicated Kafka cluster on Azure that is accessible via PrivateLink connections with authorization using RBAC
- dedicated-privatelink-azure-kafka-acls: Dedicated Kafka cluster on Azure that is accessible via PrivateLink connections with authorization using ACLs
- dedicated-private-service-connect-gcp-kafka-acls: Dedicated Kafka cluster on GCP that is accessible via Private Service Connect connections with authorization using ACLs
- dedicated-private-service-connect-gcp-kafka-rbac: Dedicated Kafka cluster on GCP that is accessible via Private Service Connect connections with authorization using RBAC
- dedicated-vnet-peering-azure-kafka-acls: Dedicated Kafka cluster on Azure that is accessible via VPC Peering connections with authorization using ACLs
- dedicated-vnet-peering-azure-kafka-rbac: Dedicated Kafka cluster on Azure that is accessible via VPC Peering connections with authorization using RBAC
- dedicated-vpc-peering-aws-kafka-acls: Dedicated Kafka cluster on AWS that is accessible via VPC Peering connections with authorization using ACLs
- dedicated-vpc-peering-aws-kafka-rbac: Dedicated Kafka cluster on AWS that is accessible via VPC Peering connections with authorization using RBAC
- dedicated-vpc-peering-gcp-kafka-acls: Dedicated Kafka cluster on GCP that is accessible via VPC Peering connections with authorization using ACLs
- dedicated-vpc-peering-gcp-kafka-rbac: Dedicated Kafka cluster on GCP that is accessible via VPC Peering connections with authorization using RBAC
- dedicated-transit-gateway-attachment-aws-kafka-acls: Dedicated Kafka cluster on AWS that is accessible via Transit Gateway Endpoint with authorization using ACLs
- dedicated-transit-gateway-attachment-aws-kafka-rbac: Dedicated Kafka cluster on AWS that is accessible via Transit Gateway Endpoint with authorization using RBAC
- enterprise-privatelinkattachment-aws-kafka-acls: Enterprise Kafka cluster on AWS that is accessible via PrivateLink connections with authorization using ACLs
- enterprise-privatelinkattachment-azure-kafka-acls: Enterprise Kafka cluster on Azure that is accessible via PrivateLink connections with authorization using ACLs
Create KafkaCluster Resource
Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.
Constructor syntax
new KafkaCluster(name: string, args: KafkaClusterArgs, opts?: CustomResourceOptions);@overload
def KafkaCluster(resource_name: str,
                 args: KafkaClusterArgs,
                 opts: Optional[ResourceOptions] = None)
@overload
def KafkaCluster(resource_name: str,
                 opts: Optional[ResourceOptions] = None,
                 availability: Optional[str] = None,
                 cloud: Optional[str] = None,
                 environment: Optional[KafkaClusterEnvironmentArgs] = None,
                 region: Optional[str] = None,
                 basic: Optional[KafkaClusterBasicArgs] = None,
                 byok_key: Optional[KafkaClusterByokKeyArgs] = None,
                 dedicated: Optional[KafkaClusterDedicatedArgs] = None,
                 display_name: Optional[str] = None,
                 enterprises: Optional[Sequence[KafkaClusterEnterpriseArgs]] = None,
                 freights: Optional[Sequence[KafkaClusterFreightArgs]] = None,
                 network: Optional[KafkaClusterNetworkArgs] = None,
                 standard: Optional[KafkaClusterStandardArgs] = None)func NewKafkaCluster(ctx *Context, name string, args KafkaClusterArgs, opts ...ResourceOption) (*KafkaCluster, error)public KafkaCluster(string name, KafkaClusterArgs args, CustomResourceOptions? opts = null)
public KafkaCluster(String name, KafkaClusterArgs args)
public KafkaCluster(String name, KafkaClusterArgs args, CustomResourceOptions options)
type: confluentcloud:KafkaCluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.
Parameters
- name string
- The unique name of the resource.
- args KafkaClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- resource_name str
- The unique name of the resource.
- args KafkaClusterArgs
- The arguments to resource properties.
- opts ResourceOptions
- Bag of options to control resource's behavior.
- ctx Context
- Context object for the current deployment.
- name string
- The unique name of the resource.
- args KafkaClusterArgs
- The arguments to resource properties.
- opts ResourceOption
- Bag of options to control resource's behavior.
- name string
- The unique name of the resource.
- args KafkaClusterArgs
- The arguments to resource properties.
- opts CustomResourceOptions
- Bag of options to control resource's behavior.
- name String
- The unique name of the resource.
- args KafkaClusterArgs
- The arguments to resource properties.
- options CustomResourceOptions
- Bag of options to control resource's behavior.
Constructor example
The following reference example uses placeholder values for all input properties.
var kafkaClusterResource = new ConfluentCloud.KafkaCluster("kafkaClusterResource", new()
{
    Availability = "string",
    Cloud = "string",
    Environment = new ConfluentCloud.Inputs.KafkaClusterEnvironmentArgs
    {
        Id = "string",
    },
    Region = "string",
    Basic = null,
    ByokKey = new ConfluentCloud.Inputs.KafkaClusterByokKeyArgs
    {
        Id = "string",
    },
    Dedicated = new ConfluentCloud.Inputs.KafkaClusterDedicatedArgs
    {
        Cku = 0,
        EncryptionKey = "string",
        Zones = new[]
        {
            "string",
        },
    },
    DisplayName = "string",
    Enterprises = new[]
    {
        null,
    },
    Freights = new[]
    {
        new ConfluentCloud.Inputs.KafkaClusterFreightArgs
        {
            Zones = new[]
            {
                "string",
            },
        },
    },
    Network = new ConfluentCloud.Inputs.KafkaClusterNetworkArgs
    {
        Id = "string",
    },
    Standard = null,
});
example, err := confluentcloud.NewKafkaCluster(ctx, "kafkaClusterResource", &confluentcloud.KafkaClusterArgs{
	Availability: pulumi.String("string"),
	Cloud:        pulumi.String("string"),
	Environment: &confluentcloud.KafkaClusterEnvironmentArgs{
		Id: pulumi.String("string"),
	},
	Region: pulumi.String("string"),
	Basic:  &confluentcloud.KafkaClusterBasicArgs{},
	ByokKey: &confluentcloud.KafkaClusterByokKeyArgs{
		Id: pulumi.String("string"),
	},
	Dedicated: &confluentcloud.KafkaClusterDedicatedArgs{
		Cku:           pulumi.Int(0),
		EncryptionKey: pulumi.String("string"),
		Zones: pulumi.StringArray{
			pulumi.String("string"),
		},
	},
	DisplayName: pulumi.String("string"),
	Enterprises: confluentcloud.KafkaClusterEnterpriseArray{
		&confluentcloud.KafkaClusterEnterpriseArgs{},
	},
	Freights: confluentcloud.KafkaClusterFreightArray{
		&confluentcloud.KafkaClusterFreightArgs{
			Zones: pulumi.StringArray{
				pulumi.String("string"),
			},
		},
	},
	Network: &confluentcloud.KafkaClusterNetworkArgs{
		Id: pulumi.String("string"),
	},
	Standard: &confluentcloud.KafkaClusterStandardArgs{},
})
var kafkaClusterResource = new KafkaCluster("kafkaClusterResource", KafkaClusterArgs.builder()
    .availability("string")
    .cloud("string")
    .environment(KafkaClusterEnvironmentArgs.builder()
        .id("string")
        .build())
    .region("string")
    .basic()
    .byokKey(KafkaClusterByokKeyArgs.builder()
        .id("string")
        .build())
    .dedicated(KafkaClusterDedicatedArgs.builder()
        .cku(0)
        .encryptionKey("string")
        .zones("string")
        .build())
    .displayName("string")
    .enterprises()
    .freights(KafkaClusterFreightArgs.builder()
        .zones("string")
        .build())
    .network(KafkaClusterNetworkArgs.builder()
        .id("string")
        .build())
    .standard()
    .build());
kafka_cluster_resource = confluentcloud.KafkaCluster("kafkaClusterResource",
    availability="string",
    cloud="string",
    environment={
        "id": "string",
    },
    region="string",
    basic={},
    byok_key={
        "id": "string",
    },
    dedicated={
        "cku": 0,
        "encryption_key": "string",
        "zones": ["string"],
    },
    display_name="string",
    enterprises=[{}],
    freights=[{
        "zones": ["string"],
    }],
    network={
        "id": "string",
    },
    standard={})
const kafkaClusterResource = new confluentcloud.KafkaCluster("kafkaClusterResource", {
    availability: "string",
    cloud: "string",
    environment: {
        id: "string",
    },
    region: "string",
    basic: {},
    byokKey: {
        id: "string",
    },
    dedicated: {
        cku: 0,
        encryptionKey: "string",
        zones: ["string"],
    },
    displayName: "string",
    enterprises: [{}],
    freights: [{
        zones: ["string"],
    }],
    network: {
        id: "string",
    },
    standard: {},
});
type: confluentcloud:KafkaCluster
properties:
    availability: string
    basic: {}
    byokKey:
        id: string
    cloud: string
    dedicated:
        cku: 0
        encryptionKey: string
        zones:
            - string
    displayName: string
    enterprises:
        - {}
    environment:
        id: string
    freights:
        - zones:
            - string
    network:
        id: string
    region: string
    standard: {}
KafkaCluster Resource Properties
To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.
Inputs
In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.
The KafkaCluster resource accepts the following input properties:
- Availability string
- The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE,MULTI_ZONE,LOW, andHIGH.
- Cloud string
- The cloud service provider that runs the Kafka cluster. Accepted values are: AWS,AZURE, andGCP.
- Environment
Pulumi.Confluent Cloud. Inputs. Kafka Cluster Environment 
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- Region string
- The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
- Basic
Pulumi.Confluent Cloud. Inputs. Kafka Cluster Basic 
- The configuration of the Basic Kafka cluster.
- ByokKey Pulumi.Confluent Cloud. Inputs. Kafka Cluster Byok Key 
- Dedicated
Pulumi.Confluent Cloud. Inputs. Kafka Cluster Dedicated 
- (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
- DisplayName string
- The name of the Kafka cluster.
- Enterprises
List<Pulumi.Confluent Cloud. Inputs. Kafka Cluster Enterprise> 
- The configuration of the Enterprise Kafka cluster.
- Freights
List<Pulumi.Confluent Cloud. Inputs. Kafka Cluster Freight> 
- The configuration of the Freight Kafka cluster.
- Network
Pulumi.Confluent Cloud. Inputs. Kafka Cluster Network 
- Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
- Standard
Pulumi.Confluent Cloud. Inputs. Kafka Cluster Standard 
- The configuration of the Standard Kafka cluster.
- Availability string
- The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE,MULTI_ZONE,LOW, andHIGH.
- Cloud string
- The cloud service provider that runs the Kafka cluster. Accepted values are: AWS,AZURE, andGCP.
- Environment
KafkaCluster Environment Args 
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- Region string
- The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
- Basic
KafkaCluster Basic Args 
- The configuration of the Basic Kafka cluster.
- ByokKey KafkaCluster Byok Key Args 
- Dedicated
KafkaCluster Dedicated Args 
- (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
- DisplayName string
- The name of the Kafka cluster.
- Enterprises
[]KafkaCluster Enterprise Args 
- The configuration of the Enterprise Kafka cluster.
- Freights
[]KafkaCluster Freight Args 
- The configuration of the Freight Kafka cluster.
- Network
KafkaCluster Network Args 
- Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
- Standard
KafkaCluster Standard Args 
- The configuration of the Standard Kafka cluster.
- availability String
- The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE,MULTI_ZONE,LOW, andHIGH.
- cloud String
- The cloud service provider that runs the Kafka cluster. Accepted values are: AWS,AZURE, andGCP.
- environment
KafkaCluster Environment 
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- region String
- The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
- basic
KafkaCluster Basic 
- The configuration of the Basic Kafka cluster.
- byokKey KafkaCluster Byok Key 
- dedicated
KafkaCluster Dedicated 
- (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
- displayName String
- The name of the Kafka cluster.
- enterprises
List<KafkaCluster Enterprise> 
- The configuration of the Enterprise Kafka cluster.
- freights
List<KafkaCluster Freight> 
- The configuration of the Freight Kafka cluster.
- network
KafkaCluster Network 
- Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
- standard
KafkaCluster Standard 
- The configuration of the Standard Kafka cluster.
- availability string
- The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE,MULTI_ZONE,LOW, andHIGH.
- cloud string
- The cloud service provider that runs the Kafka cluster. Accepted values are: AWS,AZURE, andGCP.
- environment
KafkaCluster Environment 
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- region string
- The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
- basic
KafkaCluster Basic 
- The configuration of the Basic Kafka cluster.
- byokKey KafkaCluster Byok Key 
- dedicated
KafkaCluster Dedicated 
- (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
- displayName string
- The name of the Kafka cluster.
- enterprises
KafkaCluster Enterprise[] 
- The configuration of the Enterprise Kafka cluster.
- freights
KafkaCluster Freight[] 
- The configuration of the Freight Kafka cluster.
- network
KafkaCluster Network 
- Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
- standard
KafkaCluster Standard 
- The configuration of the Standard Kafka cluster.
- availability str
- The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE,MULTI_ZONE,LOW, andHIGH.
- cloud str
- The cloud service provider that runs the Kafka cluster. Accepted values are: AWS,AZURE, andGCP.
- environment
KafkaCluster Environment Args 
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- region str
- The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
- basic
KafkaCluster Basic Args 
- The configuration of the Basic Kafka cluster.
- byok_key KafkaCluster Byok Key Args 
- dedicated
KafkaCluster Dedicated Args 
- (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
- display_name str
- The name of the Kafka cluster.
- enterprises
Sequence[KafkaCluster Enterprise Args] 
- The configuration of the Enterprise Kafka cluster.
- freights
Sequence[KafkaCluster Freight Args] 
- The configuration of the Freight Kafka cluster.
- network
KafkaCluster Network Args 
- Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
- standard
KafkaCluster Standard Args 
- The configuration of the Standard Kafka cluster.
- availability String
- The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE,MULTI_ZONE,LOW, andHIGH.
- cloud String
- The cloud service provider that runs the Kafka cluster. Accepted values are: AWS,AZURE, andGCP.
- environment Property Map
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- region String
- The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
- basic Property Map
- The configuration of the Basic Kafka cluster.
- byokKey Property Map
- dedicated Property Map
- (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
- displayName String
- The name of the Kafka cluster.
- enterprises List<Property Map>
- The configuration of the Enterprise Kafka cluster.
- freights List<Property Map>
- The configuration of the Freight Kafka cluster.
- network Property Map
- Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
- standard Property Map
- The configuration of the Standard Kafka cluster.
Outputs
All input properties are implicitly available as output properties. Additionally, the KafkaCluster resource produces the following output properties:
- ApiVersion string
- (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
- BootstrapEndpoint string
- (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
- Id string
- The provider-assigned unique ID for this managed resource.
- Kind string
- (Required String) A kind of the Kafka cluster, for example, Cluster.
- RbacCrn string
- (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
- RestEndpoint string
- (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
- ApiVersion string
- (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
- BootstrapEndpoint string
- (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
- Id string
- The provider-assigned unique ID for this managed resource.
- Kind string
- (Required String) A kind of the Kafka cluster, for example, Cluster.
- RbacCrn string
- (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
- RestEndpoint string
- (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
- apiVersion String
- (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
- bootstrapEndpoint String
- (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
- id String
- The provider-assigned unique ID for this managed resource.
- kind String
- (Required String) A kind of the Kafka cluster, for example, Cluster.
- rbacCrn String
- (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
- restEndpoint String
- (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
- apiVersion string
- (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
- bootstrapEndpoint string
- (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
- id string
- The provider-assigned unique ID for this managed resource.
- kind string
- (Required String) A kind of the Kafka cluster, for example, Cluster.
- rbacCrn string
- (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
- restEndpoint string
- (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
- api_version str
- (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
- bootstrap_endpoint str
- (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
- id str
- The provider-assigned unique ID for this managed resource.
- kind str
- (Required String) A kind of the Kafka cluster, for example, Cluster.
- rbac_crn str
- (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
- rest_endpoint str
- (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
- apiVersion String
- (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
- bootstrapEndpoint String
- (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
- id String
- The provider-assigned unique ID for this managed resource.
- kind String
- (Required String) A kind of the Kafka cluster, for example, Cluster.
- rbacCrn String
- (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
- restEndpoint String
- (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
Look up Existing KafkaCluster Resource
Get an existing KafkaCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.
public static get(name: string, id: Input<ID>, state?: KafkaClusterState, opts?: CustomResourceOptions): KafkaCluster@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        api_version: Optional[str] = None,
        availability: Optional[str] = None,
        basic: Optional[KafkaClusterBasicArgs] = None,
        bootstrap_endpoint: Optional[str] = None,
        byok_key: Optional[KafkaClusterByokKeyArgs] = None,
        cloud: Optional[str] = None,
        dedicated: Optional[KafkaClusterDedicatedArgs] = None,
        display_name: Optional[str] = None,
        enterprises: Optional[Sequence[KafkaClusterEnterpriseArgs]] = None,
        environment: Optional[KafkaClusterEnvironmentArgs] = None,
        freights: Optional[Sequence[KafkaClusterFreightArgs]] = None,
        kind: Optional[str] = None,
        network: Optional[KafkaClusterNetworkArgs] = None,
        rbac_crn: Optional[str] = None,
        region: Optional[str] = None,
        rest_endpoint: Optional[str] = None,
        standard: Optional[KafkaClusterStandardArgs] = None) -> KafkaClusterfunc GetKafkaCluster(ctx *Context, name string, id IDInput, state *KafkaClusterState, opts ...ResourceOption) (*KafkaCluster, error)public static KafkaCluster Get(string name, Input<string> id, KafkaClusterState? state, CustomResourceOptions? opts = null)public static KafkaCluster get(String name, Output<String> id, KafkaClusterState state, CustomResourceOptions options)resources:  _:    type: confluentcloud:KafkaCluster    get:      id: ${id}- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- resource_name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- name
- The unique name of the resulting resource.
- id
- The unique provider ID of the resource to lookup.
- state
- Any extra arguments used during the lookup.
- opts
- A bag of options that control this resource's behavior.
- ApiVersion string
- (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
- Availability string
- The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE,MULTI_ZONE,LOW, andHIGH.
- Basic
Pulumi.Confluent Cloud. Inputs. Kafka Cluster Basic 
- The configuration of the Basic Kafka cluster.
- BootstrapEndpoint string
- (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
- ByokKey Pulumi.Confluent Cloud. Inputs. Kafka Cluster Byok Key 
- Cloud string
- The cloud service provider that runs the Kafka cluster. Accepted values are: AWS,AZURE, andGCP.
- Dedicated
Pulumi.Confluent Cloud. Inputs. Kafka Cluster Dedicated 
- (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
- DisplayName string
- The name of the Kafka cluster.
- Enterprises
List<Pulumi.Confluent Cloud. Inputs. Kafka Cluster Enterprise> 
- The configuration of the Enterprise Kafka cluster.
- Environment
Pulumi.Confluent Cloud. Inputs. Kafka Cluster Environment 
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- Freights
List<Pulumi.Confluent Cloud. Inputs. Kafka Cluster Freight> 
- The configuration of the Freight Kafka cluster.
- Kind string
- (Required String) A kind of the Kafka cluster, for example, Cluster.
- Network
Pulumi.Confluent Cloud. Inputs. Kafka Cluster Network 
- Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
- RbacCrn string
- (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
- Region string
- The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
- RestEndpoint string
- (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
- Standard
Pulumi.Confluent Cloud. Inputs. Kafka Cluster Standard 
- The configuration of the Standard Kafka cluster.
- ApiVersion string
- (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
- Availability string
- The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE,MULTI_ZONE,LOW, andHIGH.
- Basic
KafkaCluster Basic Args 
- The configuration of the Basic Kafka cluster.
- BootstrapEndpoint string
- (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
- ByokKey KafkaCluster Byok Key Args 
- Cloud string
- The cloud service provider that runs the Kafka cluster. Accepted values are: AWS,AZURE, andGCP.
- Dedicated
KafkaCluster Dedicated Args 
- (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
- DisplayName string
- The name of the Kafka cluster.
- Enterprises
[]KafkaCluster Enterprise Args 
- The configuration of the Enterprise Kafka cluster.
- Environment
KafkaCluster Environment Args 
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- Freights
[]KafkaCluster Freight Args 
- The configuration of the Freight Kafka cluster.
- Kind string
- (Required String) A kind of the Kafka cluster, for example, Cluster.
- Network
KafkaCluster Network Args 
- Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
- RbacCrn string
- (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
- Region string
- The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
- RestEndpoint string
- (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
- Standard
KafkaCluster Standard Args 
- The configuration of the Standard Kafka cluster.
- apiVersion String
- (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
- availability String
- The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE,MULTI_ZONE,LOW, andHIGH.
- basic
KafkaCluster Basic 
- The configuration of the Basic Kafka cluster.
- bootstrapEndpoint String
- (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
- byokKey KafkaCluster Byok Key 
- cloud String
- The cloud service provider that runs the Kafka cluster. Accepted values are: AWS,AZURE, andGCP.
- dedicated
KafkaCluster Dedicated 
- (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
- displayName String
- The name of the Kafka cluster.
- enterprises
List<KafkaCluster Enterprise> 
- The configuration of the Enterprise Kafka cluster.
- environment
KafkaCluster Environment 
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- freights
List<KafkaCluster Freight> 
- The configuration of the Freight Kafka cluster.
- kind String
- (Required String) A kind of the Kafka cluster, for example, Cluster.
- network
KafkaCluster Network 
- Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
- rbacCrn String
- (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
- region String
- The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
- restEndpoint String
- (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
- standard
KafkaCluster Standard 
- The configuration of the Standard Kafka cluster.
- apiVersion string
- (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
- availability string
- The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE,MULTI_ZONE,LOW, andHIGH.
- basic
KafkaCluster Basic 
- The configuration of the Basic Kafka cluster.
- bootstrapEndpoint string
- (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
- byokKey KafkaCluster Byok Key 
- cloud string
- The cloud service provider that runs the Kafka cluster. Accepted values are: AWS,AZURE, andGCP.
- dedicated
KafkaCluster Dedicated 
- (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
- displayName string
- The name of the Kafka cluster.
- enterprises
KafkaCluster Enterprise[] 
- The configuration of the Enterprise Kafka cluster.
- environment
KafkaCluster Environment 
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- freights
KafkaCluster Freight[] 
- The configuration of the Freight Kafka cluster.
- kind string
- (Required String) A kind of the Kafka cluster, for example, Cluster.
- network
KafkaCluster Network 
- Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
- rbacCrn string
- (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
- region string
- The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
- restEndpoint string
- (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
- standard
KafkaCluster Standard 
- The configuration of the Standard Kafka cluster.
- api_version str
- (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
- availability str
- The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE,MULTI_ZONE,LOW, andHIGH.
- basic
KafkaCluster Basic Args 
- The configuration of the Basic Kafka cluster.
- bootstrap_endpoint str
- (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
- byok_key KafkaCluster Byok Key Args 
- cloud str
- The cloud service provider that runs the Kafka cluster. Accepted values are: AWS,AZURE, andGCP.
- dedicated
KafkaCluster Dedicated Args 
- (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
- display_name str
- The name of the Kafka cluster.
- enterprises
Sequence[KafkaCluster Enterprise Args] 
- The configuration of the Enterprise Kafka cluster.
- environment
KafkaCluster Environment Args 
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- freights
Sequence[KafkaCluster Freight Args] 
- The configuration of the Freight Kafka cluster.
- kind str
- (Required String) A kind of the Kafka cluster, for example, Cluster.
- network
KafkaCluster Network Args 
- Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
- rbac_crn str
- (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
- region str
- The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
- rest_endpoint str
- (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
- standard
KafkaCluster Standard Args 
- The configuration of the Standard Kafka cluster.
- apiVersion String
- (Required String) An API Version of the schema version of the Kafka cluster, for example, cmk/v2.
- availability String
- The availability zone configuration of the Kafka cluster. Accepted values are: SINGLE_ZONE,MULTI_ZONE,LOW, andHIGH.
- basic Property Map
- The configuration of the Basic Kafka cluster.
- bootstrapEndpoint String
- (Required String) The bootstrap endpoint used by Kafka clients to connect to the Kafka cluster. (e.g., SASL_SSL://pkc-00000.us-central1.gcp.confluent.cloud:9092).
- byokKey Property Map
- cloud String
- The cloud service provider that runs the Kafka cluster. Accepted values are: AWS,AZURE, andGCP.
- dedicated Property Map
- (Optional Configuration Block) The configuration of the Dedicated Kafka cluster. It supports the following:
- displayName String
- The name of the Kafka cluster.
- enterprises List<Property Map>
- The configuration of the Enterprise Kafka cluster.
- environment Property Map
- Environment objects represent an isolated namespace for your Confluent resources for organizational purposes.
- freights List<Property Map>
- The configuration of the Freight Kafka cluster.
- kind String
- (Required String) A kind of the Kafka cluster, for example, Cluster.
- network Property Map
- Network represents a network (VPC) in Confluent Cloud. All Networks exist within Confluent-managed cloud provider accounts.
- rbacCrn String
- (Required String) The Confluent Resource Name of the Kafka cluster, for example, crn://confluent.cloud/organization=1111aaaa-11aa-11aa-11aa-111111aaaaaa/environment=env-abc123/cloud-cluster=lkc-abc123.
- region String
- The cloud service provider region where the Kafka cluster is running, for example, us-west-2. See Cloud Providers and Regions for a full list of options for AWS, Azure, and GCP.
- restEndpoint String
- (Required String) The REST endpoint of the Kafka cluster (e.g., https://pkc-00000.us-central1.gcp.confluent.cloud:443).
- standard Property Map
- The configuration of the Standard Kafka cluster.
Supporting Types
KafkaClusterByokKey, KafkaClusterByokKeyArgs        
- Id string
- The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
- Id string
- The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
- id String
- The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
- id string
- The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
- id str
- The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
- id String
- The ID of the Confluent key that is used to encrypt the data in the Kafka cluster, for example, cck-lye5m.
KafkaClusterDedicated, KafkaClusterDedicatedArgs      
- Cku int
- The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for - SINGLE_ZONEdedicated clusters is- 1whereas- MULTI_ZONEdedicated clusters must have- 2CKUs or more.- Note: Exactly one from the - basic,- standard,- dedicated,- enterpriseor- freightconfiguration blocks must be specified.- Note: The - freightKafka cluster type is currently available only on AWS.- Note: The - enterpriseKafka cluster type is currently available only on AWS and Azure.- !> Warning: You can only upgrade clusters from - basicto- standard.- Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the - pulumi upstep to finish, you can exit it and import the cluster by using the- pulumi importcommand once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.
- EncryptionKey string
- The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
- Zones List<string>
- (Required List of String) The list of zones the cluster is in.- On AWS, zones are AWS AZ IDs, for example, use1-az3.
 
- On AWS, zones are AWS AZ IDs, for example, 
- Cku int
- The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for - SINGLE_ZONEdedicated clusters is- 1whereas- MULTI_ZONEdedicated clusters must have- 2CKUs or more.- Note: Exactly one from the - basic,- standard,- dedicated,- enterpriseor- freightconfiguration blocks must be specified.- Note: The - freightKafka cluster type is currently available only on AWS.- Note: The - enterpriseKafka cluster type is currently available only on AWS and Azure.- !> Warning: You can only upgrade clusters from - basicto- standard.- Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the - pulumi upstep to finish, you can exit it and import the cluster by using the- pulumi importcommand once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.
- EncryptionKey string
- The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
- Zones []string
- (Required List of String) The list of zones the cluster is in.- On AWS, zones are AWS AZ IDs, for example, use1-az3.
 
- On AWS, zones are AWS AZ IDs, for example, 
- cku Integer
- The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for - SINGLE_ZONEdedicated clusters is- 1whereas- MULTI_ZONEdedicated clusters must have- 2CKUs or more.- Note: Exactly one from the - basic,- standard,- dedicated,- enterpriseor- freightconfiguration blocks must be specified.- Note: The - freightKafka cluster type is currently available only on AWS.- Note: The - enterpriseKafka cluster type is currently available only on AWS and Azure.- !> Warning: You can only upgrade clusters from - basicto- standard.- Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the - pulumi upstep to finish, you can exit it and import the cluster by using the- pulumi importcommand once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.
- encryptionKey String
- The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
- zones List<String>
- (Required List of String) The list of zones the cluster is in.- On AWS, zones are AWS AZ IDs, for example, use1-az3.
 
- On AWS, zones are AWS AZ IDs, for example, 
- cku number
- The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for - SINGLE_ZONEdedicated clusters is- 1whereas- MULTI_ZONEdedicated clusters must have- 2CKUs or more.- Note: Exactly one from the - basic,- standard,- dedicated,- enterpriseor- freightconfiguration blocks must be specified.- Note: The - freightKafka cluster type is currently available only on AWS.- Note: The - enterpriseKafka cluster type is currently available only on AWS and Azure.- !> Warning: You can only upgrade clusters from - basicto- standard.- Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the - pulumi upstep to finish, you can exit it and import the cluster by using the- pulumi importcommand once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.
- encryptionKey string
- The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
- zones string[]
- (Required List of String) The list of zones the cluster is in.- On AWS, zones are AWS AZ IDs, for example, use1-az3.
 
- On AWS, zones are AWS AZ IDs, for example, 
- cku int
- The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for - SINGLE_ZONEdedicated clusters is- 1whereas- MULTI_ZONEdedicated clusters must have- 2CKUs or more.- Note: Exactly one from the - basic,- standard,- dedicated,- enterpriseor- freightconfiguration blocks must be specified.- Note: The - freightKafka cluster type is currently available only on AWS.- Note: The - enterpriseKafka cluster type is currently available only on AWS and Azure.- !> Warning: You can only upgrade clusters from - basicto- standard.- Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the - pulumi upstep to finish, you can exit it and import the cluster by using the- pulumi importcommand once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.
- encryption_key str
- The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
- zones Sequence[str]
- (Required List of String) The list of zones the cluster is in.- On AWS, zones are AWS AZ IDs, for example, use1-az3.
 
- On AWS, zones are AWS AZ IDs, for example, 
- cku Number
- The number of Confluent Kafka Units (CKUs) for Dedicated cluster types. The minimum number of CKUs for - SINGLE_ZONEdedicated clusters is- 1whereas- MULTI_ZONEdedicated clusters must have- 2CKUs or more.- Note: Exactly one from the - basic,- standard,- dedicated,- enterpriseor- freightconfiguration blocks must be specified.- Note: The - freightKafka cluster type is currently available only on AWS.- Note: The - enterpriseKafka cluster type is currently available only on AWS and Azure.- !> Warning: You can only upgrade clusters from - basicto- standard.- Note: Currently, provisioning of a Dedicated Kafka cluster takes around 25 minutes on average but might take up to 24 hours. If you can't wait for the - pulumi upstep to finish, you can exit it and import the cluster by using the- pulumi importcommand once it has been provisioned. When the cluster is provisioned, you will receive an email notification, and you can also follow updates on the Target Environment web page of the Confluent Cloud website.
- encryptionKey String
- The ID of the encryption key that is used to encrypt the data in the Kafka cluster.
- zones List<String>
- (Required List of String) The list of zones the cluster is in.- On AWS, zones are AWS AZ IDs, for example, use1-az3.
 
- On AWS, zones are AWS AZ IDs, for example, 
KafkaClusterEnvironment, KafkaClusterEnvironmentArgs      
- Id string
- The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
- Id string
- The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
- id String
- The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
- id string
- The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
- id str
- The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
- id String
- The ID of the Environment that the Kafka cluster belongs to, for example, env-abc123.
KafkaClusterFreight, KafkaClusterFreightArgs      
KafkaClusterNetwork, KafkaClusterNetworkArgs      
- Id string
- The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
- Id string
- The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
- id String
- The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
- id string
- The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
- id str
- The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
- id String
- The ID of the Network that the Kafka cluster belongs to, for example, n-abc123.
Import
You can import a Kafka cluster by using Environment ID and Kafka cluster ID, in the format <Environment ID>/<Kafka cluster ID>, e.g.
$ export CONFLUENT_CLOUD_API_KEY="<cloud_api_key>"
$ export CONFLUENT_CLOUD_API_SECRET="<cloud_api_secret>"
$ pulumi import confluentcloud:index/kafkaCluster:KafkaCluster my_kafka env-abc123/lkc-abc123
!> Warning: Do not forget to delete terminal command history afterwards for security purposes.
To learn more about importing existing cloud resources, see Importing resources.
Package Details
- Repository
- Confluent Cloud pulumi/pulumi-confluentcloud
- License
- Apache-2.0
- Notes
- This Pulumi package is based on the confluentTerraform Provider.