1. Packages
  2. Yandex
  3. API Docs
  4. MdbClickhouseCluster
Yandex v0.13.0 published on Tuesday, Feb 22, 2022 by Pulumi

yandex.MdbClickhouseCluster

Explore with Pulumi AI

Manages a ClickHouse cluster within the Yandex.Cloud. For more information, see the official documentation.

Example Usage

Example of creating a Single Node ClickHouse.

using Pulumi;
using Yandex = Pulumi.Yandex;

class MyStack : Stack
{
    public MyStack()
    {
        var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
        {
        });
        var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.5.0.0/24",
            },
            Zone = "ru-central1-a",
        });
        var fooMdbClickhouseCluster = new Yandex.MdbClickhouseCluster("fooMdbClickhouseCluster", new Yandex.MdbClickhouseClusterArgs
        {
            Clickhouse = new Yandex.Inputs.MdbClickhouseClusterClickhouseArgs
            {
                Config = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigArgs
                {
                    BackgroundPoolSize = 16,
                    BackgroundSchedulePoolSize = 16,
                    Compression = 
                    {
                        
                        {
                            { "method", "LZ4" },
                            { "minPartSize", 1024 },
                            { "minPartSizeRatio", 0.5 },
                        },
                        
                        {
                            { "method", "ZSTD" },
                            { "minPartSize", 2048 },
                            { "minPartSizeRatio", 0.7 },
                        },
                    },
                    GeobaseUri = "",
                    GraphiteRollup = 
                    {
                        
                        {
                            { "name", "rollup1" },
                            { "pattern", 
                            {
                                
                                {
                                    { "function", "func1" },
                                    { "regexp", "abc" },
                                    { "retention", 
                                    {
                                        
                                        {
                                            { "age", 1000 },
                                            { "precision", 3 },
                                        },
                                    } },
                                },
                            } },
                        },
                        
                        {
                            { "name", "rollup2" },
                            { "pattern", 
                            {
                                
                                {
                                    { "function", "func2" },
                                    { "retention", 
                                    {
                                        
                                        {
                                            { "age", 2000 },
                                            { "precision", 5 },
                                        },
                                    } },
                                },
                            } },
                        },
                    },
                    Kafka = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigKafkaArgs
                    {
                        SaslMechanism = "SASL_MECHANISM_GSSAPI",
                        SaslPassword = "pass1",
                        SaslUsername = "user1",
                        SecurityProtocol = "SECURITY_PROTOCOL_PLAINTEXT",
                    },
                    KafkaTopic = 
                    {
                        
                        {
                            { "name", "topic1" },
                            { "settings", 
                            {
                                { "saslMechanism", "SASL_MECHANISM_SCRAM_SHA_256" },
                                { "saslPassword", "pass2" },
                                { "saslUsername", "user2" },
                                { "securityProtocol", "SECURITY_PROTOCOL_SSL" },
                            } },
                        },
                        
                        {
                            { "name", "topic2" },
                            { "settings", 
                            {
                                { "saslMechanism", "SASL_MECHANISM_PLAIN" },
                                { "securityProtocol", "SECURITY_PROTOCOL_SASL_PLAINTEXT" },
                            } },
                        },
                    },
                    KeepAliveTimeout = 3000,
                    LogLevel = "TRACE",
                    MarkCacheSize = 5368709120,
                    MaxConcurrentQueries = 50,
                    MaxConnections = 100,
                    MaxPartitionSizeToDrop = 53687091200,
                    MaxTableSizeToDrop = 53687091200,
                    MergeTree = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigMergeTreeArgs
                    {
                        MaxBytesToMergeAtMinSpaceInPool = 1048576,
                        MaxReplicatedMergesInQueue = 16,
                        NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge = 8,
                        PartsToDelayInsert = 150,
                        PartsToThrowInsert = 300,
                        ReplicatedDeduplicationWindow = 100,
                        ReplicatedDeduplicationWindowSeconds = 604800,
                    },
                    MetricLogEnabled = true,
                    MetricLogRetentionSize = 536870912,
                    MetricLogRetentionTime = 2592000,
                    PartLogRetentionSize = 536870912,
                    PartLogRetentionTime = 2592000,
                    QueryLogRetentionSize = 1073741824,
                    QueryLogRetentionTime = 2592000,
                    QueryThreadLogEnabled = true,
                    QueryThreadLogRetentionSize = 536870912,
                    QueryThreadLogRetentionTime = 2592000,
                    Rabbitmq = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigRabbitmqArgs
                    {
                        Password = "rabbit_pass",
                        Username = "rabbit_user",
                    },
                    TextLogEnabled = true,
                    TextLogLevel = "TRACE",
                    TextLogRetentionSize = 536870912,
                    TextLogRetentionTime = 2592000,
                    Timezone = "UTC",
                    TraceLogEnabled = true,
                    TraceLogRetentionSize = 536870912,
                    TraceLogRetentionTime = 2592000,
                    UncompressedCacheSize = 8589934592,
                },
                Resources = new Yandex.Inputs.MdbClickhouseClusterClickhouseResourcesArgs
                {
                    DiskSize = 32,
                    DiskTypeId = "network-ssd",
                    ResourcePresetId = "s2.micro",
                },
            },
            CloudStorage = new Yandex.Inputs.MdbClickhouseClusterCloudStorageArgs
            {
                Enabled = false,
            },
            Databases = 
            {
                new Yandex.Inputs.MdbClickhouseClusterDatabaseArgs
                {
                    Name = "db_name",
                },
            },
            Environment = "PRESTABLE",
            FormatSchemas = 
            {
                new Yandex.Inputs.MdbClickhouseClusterFormatSchemaArgs
                {
                    Name = "test_schema",
                    Type = "FORMAT_SCHEMA_TYPE_CAPNPROTO",
                    Uri = "https://storage.yandexcloud.net/ch-data/schema.proto",
                },
            },
            Hosts = 
            {
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    SubnetId = fooVpcSubnet.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-a",
                },
            },
            MaintenanceWindow = new Yandex.Inputs.MdbClickhouseClusterMaintenanceWindowArgs
            {
                Type = "ANYTIME",
            },
            MlModels = 
            {
                new Yandex.Inputs.MdbClickhouseClusterMlModelArgs
                {
                    Name = "test_model",
                    Type = "ML_MODEL_TYPE_CATBOOST",
                    Uri = "https://storage.yandexcloud.net/ch-data/train.csv",
                },
            },
            NetworkId = fooVpcNetwork.Id,
            ServiceAccountId = "your_service_account_id",
            Users = 
            {
                new Yandex.Inputs.MdbClickhouseClusterUserArgs
                {
                    Name = "user",
                    Password = "your_password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserPermissionArgs
                        {
                            DatabaseName = "db_name",
                        },
                    },
                    Quotas = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Errors = 1000,
                            IntervalDuration = 3600000,
                            Queries = 10000,
                        },
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Error = 5000,
                            IntervalDuration = 79800000,
                            Queries = 50000,
                        },
                    },
                    Settings = new Yandex.Inputs.MdbClickhouseClusterUserSettingsArgs
                    {
                        MaxMemoryUsageForUser = 1000000000,
                        OutputFormatJsonQuote64bitIntegers = true,
                        ReadOverflowMode = "throw",
                    },
                },
            },
        });
    }

}
Copy
package main

import (
	"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
		if err != nil {
			return err
		}
		fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.5.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-a"),
		})
		if err != nil {
			return err
		}
		_, err = yandex.NewMdbClickhouseCluster(ctx, "fooMdbClickhouseCluster", &yandex.MdbClickhouseClusterArgs{
			Clickhouse: &MdbClickhouseClusterClickhouseArgs{
				Config: &MdbClickhouseClusterClickhouseConfigArgs{
					BackgroundPoolSize:         pulumi.Int(16),
					BackgroundSchedulePoolSize: pulumi.Int(16),
					Compression: []interface{}{
						map[string]interface{}{
							"method":           "LZ4",
							"minPartSize":      1024,
							"minPartSizeRatio": 0.5,
						},
						map[string]interface{}{
							"method":           "ZSTD",
							"minPartSize":      2048,
							"minPartSizeRatio": 0.7,
						},
					},
					GeobaseUri: pulumi.String(""),
					GraphiteRollup: []interface{}{
						map[string]interface{}{
							"name": "rollup1",
							"pattern": []map[string]interface{}{
								map[string]interface{}{
									"function": "func1",
									"regexp":   "abc",
									"retention": []map[string]interface{}{
										map[string]interface{}{
											"age":       1000,
											"precision": 3,
										},
									},
								},
							},
						},
						map[string]interface{}{
							"name": "rollup2",
							"pattern": []map[string]interface{}{
								map[string]interface{}{
									"function": "func2",
									"retention": []map[string]interface{}{
										map[string]interface{}{
											"age":       2000,
											"precision": 5,
										},
									},
								},
							},
						},
					},
					Kafka: &MdbClickhouseClusterClickhouseConfigKafkaArgs{
						SaslMechanism:    pulumi.String("SASL_MECHANISM_GSSAPI"),
						SaslPassword:     pulumi.String("pass1"),
						SaslUsername:     pulumi.String("user1"),
						SecurityProtocol: pulumi.String("SECURITY_PROTOCOL_PLAINTEXT"),
					},
					KafkaTopic: []interface{}{
						map[string]interface{}{
							"name": "topic1",
							"settings": map[string]interface{}{
								"saslMechanism":    "SASL_MECHANISM_SCRAM_SHA_256",
								"saslPassword":     "pass2",
								"saslUsername":     "user2",
								"securityProtocol": "SECURITY_PROTOCOL_SSL",
							},
						},
						map[string]interface{}{
							"name": "topic2",
							"settings": map[string]interface{}{
								"saslMechanism":    "SASL_MECHANISM_PLAIN",
								"securityProtocol": "SECURITY_PROTOCOL_SASL_PLAINTEXT",
							},
						},
					},
					KeepAliveTimeout:       pulumi.Int(3000),
					LogLevel:               pulumi.String("TRACE"),
					MarkCacheSize:          pulumi.Int(5368709120),
					MaxConcurrentQueries:   pulumi.Int(50),
					MaxConnections:         pulumi.Int(100),
					MaxPartitionSizeToDrop: pulumi.Int(53687091200),
					MaxTableSizeToDrop:     pulumi.Int(53687091200),
					MergeTree: &MdbClickhouseClusterClickhouseConfigMergeTreeArgs{
						MaxBytesToMergeAtMinSpaceInPool:                pulumi.Int(1048576),
						MaxReplicatedMergesInQueue:                     pulumi.Int(16),
						NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: pulumi.Int(8),
						PartsToDelayInsert:                             pulumi.Int(150),
						PartsToThrowInsert:                             pulumi.Int(300),
						ReplicatedDeduplicationWindow:                  pulumi.Int(100),
						ReplicatedDeduplicationWindowSeconds:           pulumi.Int(604800),
					},
					MetricLogEnabled:            pulumi.Bool(true),
					MetricLogRetentionSize:      pulumi.Int(536870912),
					MetricLogRetentionTime:      pulumi.Int(2592000),
					PartLogRetentionSize:        pulumi.Int(536870912),
					PartLogRetentionTime:        pulumi.Int(2592000),
					QueryLogRetentionSize:       pulumi.Int(1073741824),
					QueryLogRetentionTime:       pulumi.Int(2592000),
					QueryThreadLogEnabled:       pulumi.Bool(true),
					QueryThreadLogRetentionSize: pulumi.Int(536870912),
					QueryThreadLogRetentionTime: pulumi.Int(2592000),
					Rabbitmq: &MdbClickhouseClusterClickhouseConfigRabbitmqArgs{
						Password: pulumi.String("rabbit_pass"),
						Username: pulumi.String("rabbit_user"),
					},
					TextLogEnabled:        pulumi.Bool(true),
					TextLogLevel:          pulumi.String("TRACE"),
					TextLogRetentionSize:  pulumi.Int(536870912),
					TextLogRetentionTime:  pulumi.Int(2592000),
					Timezone:              pulumi.String("UTC"),
					TraceLogEnabled:       pulumi.Bool(true),
					TraceLogRetentionSize: pulumi.Int(536870912),
					TraceLogRetentionTime: pulumi.Int(2592000),
					UncompressedCacheSize: pulumi.Int(8589934592),
				},
				Resources: &MdbClickhouseClusterClickhouseResourcesArgs{
					DiskSize:         pulumi.Int(32),
					DiskTypeId:       pulumi.String("network-ssd"),
					ResourcePresetId: pulumi.String("s2.micro"),
				},
			},
			CloudStorage: &MdbClickhouseClusterCloudStorageArgs{
				Enabled: pulumi.Bool(false),
			},
			Databases: MdbClickhouseClusterDatabaseArray{
				&MdbClickhouseClusterDatabaseArgs{
					Name: pulumi.String("db_name"),
				},
			},
			Environment: pulumi.String("PRESTABLE"),
			FormatSchemas: MdbClickhouseClusterFormatSchemaArray{
				&MdbClickhouseClusterFormatSchemaArgs{
					Name: pulumi.String("test_schema"),
					Type: pulumi.String("FORMAT_SCHEMA_TYPE_CAPNPROTO"),
					Uri:  pulumi.String("https://storage.yandexcloud.net/ch-data/schema.proto"),
				},
			},
			Hosts: MdbClickhouseClusterHostArray{
				&MdbClickhouseClusterHostArgs{
					SubnetId: fooVpcSubnet.ID(),
					Type:     pulumi.String("CLICKHOUSE"),
					Zone:     pulumi.String("ru-central1-a"),
				},
			},
			MaintenanceWindow: &MdbClickhouseClusterMaintenanceWindowArgs{
				Type: pulumi.String("ANYTIME"),
			},
			MlModels: MdbClickhouseClusterMlModelArray{
				&MdbClickhouseClusterMlModelArgs{
					Name: pulumi.String("test_model"),
					Type: pulumi.String("ML_MODEL_TYPE_CATBOOST"),
					Uri:  pulumi.String("https://storage.yandexcloud.net/ch-data/train.csv"),
				},
			},
			NetworkId:        fooVpcNetwork.ID(),
			ServiceAccountId: pulumi.String("your_service_account_id"),
			Users: MdbClickhouseClusterUserArray{
				&MdbClickhouseClusterUserArgs{
					Name:     pulumi.String("user"),
					Password: pulumi.String("your_password"),
					Permissions: MdbClickhouseClusterUserPermissionArray{
						&MdbClickhouseClusterUserPermissionArgs{
							DatabaseName: pulumi.String("db_name"),
						},
					},
					Quotas: MdbClickhouseClusterUserQuotaArray{
						&MdbClickhouseClusterUserQuotaArgs{
							Errors:           pulumi.Int(1000),
							IntervalDuration: pulumi.Int(3600000),
							Queries:          pulumi.Int(10000),
						},
						&MdbClickhouseClusterUserQuotaArgs{
							Error:            5000,
							IntervalDuration: pulumi.Int(79800000),
							Queries:          pulumi.Int(50000),
						},
					},
					Settings: &MdbClickhouseClusterUserSettingsArgs{
						MaxMemoryUsageForUser:              pulumi.Int(1000000000),
						OutputFormatJsonQuote64bitIntegers: pulumi.Bool(true),
						ReadOverflowMode:                   pulumi.String("throw"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy

Coming soon!

import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";

const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.5.0.0/24"],
    zone: "ru-central1-a",
});
const fooMdbClickhouseCluster = new yandex.MdbClickhouseCluster("foo", {
    clickhouse: {
        config: {
            backgroundPoolSize: 16,
            backgroundSchedulePoolSize: 16,
            compressions: [
                {
                    method: "LZ4",
                    minPartSize: 1024,
                    minPartSizeRatio: 0.5,
                },
                {
                    method: "ZSTD",
                    minPartSize: 2048,
                    minPartSizeRatio: 0.7,
                },
            ],
            geobaseUri: "",
            graphiteRollups: [
                {
                    name: "rollup1",
                    patterns: [{
                        function: "func1",
                        regexp: "abc",
                        retentions: [{
                            age: 1000,
                            precision: 3,
                        }],
                    }],
                },
                {
                    name: "rollup2",
                    patterns: [{
                        function: "func2",
                        retentions: [{
                            age: 2000,
                            precision: 5,
                        }],
                    }],
                },
            ],
            kafka: {
                saslMechanism: "SASL_MECHANISM_GSSAPI",
                saslPassword: "pass1",
                saslUsername: "user1",
                securityProtocol: "SECURITY_PROTOCOL_PLAINTEXT",
            },
            kafkaTopics: [
                {
                    name: "topic1",
                    settings: {
                        saslMechanism: "SASL_MECHANISM_SCRAM_SHA_256",
                        saslPassword: "pass2",
                        saslUsername: "user2",
                        securityProtocol: "SECURITY_PROTOCOL_SSL",
                    },
                },
                {
                    name: "topic2",
                    settings: {
                        saslMechanism: "SASL_MECHANISM_PLAIN",
                        securityProtocol: "SECURITY_PROTOCOL_SASL_PLAINTEXT",
                    },
                },
            ],
            keepAliveTimeout: 3000,
            logLevel: "TRACE",
            markCacheSize: 5368709120,
            maxConcurrentQueries: 50,
            maxConnections: 100,
            maxPartitionSizeToDrop: 53687091200,
            maxTableSizeToDrop: 53687091200,
            mergeTree: {
                maxBytesToMergeAtMinSpaceInPool: 1048576,
                maxReplicatedMergesInQueue: 16,
                numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: 8,
                partsToDelayInsert: 150,
                partsToThrowInsert: 300,
                replicatedDeduplicationWindow: 100,
                replicatedDeduplicationWindowSeconds: 604800,
            },
            metricLogEnabled: true,
            metricLogRetentionSize: 536870912,
            metricLogRetentionTime: 2592000,
            partLogRetentionSize: 536870912,
            partLogRetentionTime: 2592000,
            queryLogRetentionSize: 1073741824,
            queryLogRetentionTime: 2592000,
            queryThreadLogEnabled: true,
            queryThreadLogRetentionSize: 536870912,
            queryThreadLogRetentionTime: 2592000,
            rabbitmq: {
                password: "rabbit_pass",
                username: "rabbit_user",
            },
            textLogEnabled: true,
            textLogLevel: "TRACE",
            textLogRetentionSize: 536870912,
            textLogRetentionTime: 2592000,
            timezone: "UTC",
            traceLogEnabled: true,
            traceLogRetentionSize: 536870912,
            traceLogRetentionTime: 2592000,
            uncompressedCacheSize: 8589934592,
        },
        resources: {
            diskSize: 32,
            diskTypeId: "network-ssd",
            resourcePresetId: "s2.micro",
        },
    },
    cloudStorage: {
        enabled: false,
    },
    databases: [{
        name: "db_name",
    }],
    environment: "PRESTABLE",
    formatSchemas: [{
        name: "test_schema",
        type: "FORMAT_SCHEMA_TYPE_CAPNPROTO",
        uri: "https://storage.yandexcloud.net/ch-data/schema.proto",
    }],
    hosts: [{
        subnetId: fooVpcSubnet.id,
        type: "CLICKHOUSE",
        zone: "ru-central1-a",
    }],
    maintenanceWindow: {
        type: "ANYTIME",
    },
    mlModels: [{
        name: "test_model",
        type: "ML_MODEL_TYPE_CATBOOST",
        uri: "https://storage.yandexcloud.net/ch-data/train.csv",
    }],
    networkId: fooVpcNetwork.id,
    serviceAccountId: "your_service_account_id",
    users: [{
        name: "user",
        password: "your_password",
        permissions: [{
            databaseName: "db_name",
        }],
        quotas: [
            {
                errors: 1000,
                intervalDuration: 3600000,
                queries: 10000,
            },
            {
                error: 5000,
                intervalDuration: 79800000,
                queries: 50000,
            },
        ],
        settings: {
            maxMemoryUsageForUser: 1000000000,
            outputFormatJsonQuote64bitIntegers: true,
            readOverflowMode: "throw",
        },
    }],
});
Copy
import pulumi
import pulumi_yandex as yandex

foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.5.0.0/24"],
    zone="ru-central1-a")
foo_mdb_clickhouse_cluster = yandex.MdbClickhouseCluster("fooMdbClickhouseCluster",
    clickhouse=yandex.MdbClickhouseClusterClickhouseArgs(
        config=yandex.MdbClickhouseClusterClickhouseConfigArgs(
            background_pool_size=16,
            background_schedule_pool_size=16,
            compression=[
                {
                    "method": "LZ4",
                    "minPartSize": 1024,
                    "minPartSizeRatio": 0.5,
                },
                {
                    "method": "ZSTD",
                    "minPartSize": 2048,
                    "minPartSizeRatio": 0.7,
                },
            ],
            geobase_uri="",
            graphite_rollup=[
                {
                    "name": "rollup1",
                    "pattern": [{
                        "function": "func1",
                        "regexp": "abc",
                        "retention": [{
                            "age": 1000,
                            "precision": 3,
                        }],
                    }],
                },
                {
                    "name": "rollup2",
                    "pattern": [{
                        "function": "func2",
                        "retention": [{
                            "age": 2000,
                            "precision": 5,
                        }],
                    }],
                },
            ],
            kafka=yandex.MdbClickhouseClusterClickhouseConfigKafkaArgs(
                sasl_mechanism="SASL_MECHANISM_GSSAPI",
                sasl_password="pass1",
                sasl_username="user1",
                security_protocol="SECURITY_PROTOCOL_PLAINTEXT",
            ),
            kafka_topic=[
                {
                    "name": "topic1",
                    "settings": {
                        "saslMechanism": "SASL_MECHANISM_SCRAM_SHA_256",
                        "saslPassword": "pass2",
                        "saslUsername": "user2",
                        "securityProtocol": "SECURITY_PROTOCOL_SSL",
                    },
                },
                {
                    "name": "topic2",
                    "settings": {
                        "saslMechanism": "SASL_MECHANISM_PLAIN",
                        "securityProtocol": "SECURITY_PROTOCOL_SASL_PLAINTEXT",
                    },
                },
            ],
            keep_alive_timeout=3000,
            log_level="TRACE",
            mark_cache_size=5368709120,
            max_concurrent_queries=50,
            max_connections=100,
            max_partition_size_to_drop=53687091200,
            max_table_size_to_drop=53687091200,
            merge_tree=yandex.MdbClickhouseClusterClickhouseConfigMergeTreeArgs(
                max_bytes_to_merge_at_min_space_in_pool=1048576,
                max_replicated_merges_in_queue=16,
                number_of_free_entries_in_pool_to_lower_max_size_of_merge=8,
                parts_to_delay_insert=150,
                parts_to_throw_insert=300,
                replicated_deduplication_window=100,
                replicated_deduplication_window_seconds=604800,
            ),
            metric_log_enabled=True,
            metric_log_retention_size=536870912,
            metric_log_retention_time=2592000,
            part_log_retention_size=536870912,
            part_log_retention_time=2592000,
            query_log_retention_size=1073741824,
            query_log_retention_time=2592000,
            query_thread_log_enabled=True,
            query_thread_log_retention_size=536870912,
            query_thread_log_retention_time=2592000,
            rabbitmq=yandex.MdbClickhouseClusterClickhouseConfigRabbitmqArgs(
                password="rabbit_pass",
                username="rabbit_user",
            ),
            text_log_enabled=True,
            text_log_level="TRACE",
            text_log_retention_size=536870912,
            text_log_retention_time=2592000,
            timezone="UTC",
            trace_log_enabled=True,
            trace_log_retention_size=536870912,
            trace_log_retention_time=2592000,
            uncompressed_cache_size=8589934592,
        ),
        resources=yandex.MdbClickhouseClusterClickhouseResourcesArgs(
            disk_size=32,
            disk_type_id="network-ssd",
            resource_preset_id="s2.micro",
        ),
    ),
    cloud_storage=yandex.MdbClickhouseClusterCloudStorageArgs(
        enabled=False,
    ),
    databases=[yandex.MdbClickhouseClusterDatabaseArgs(
        name="db_name",
    )],
    environment="PRESTABLE",
    format_schemas=[yandex.MdbClickhouseClusterFormatSchemaArgs(
        name="test_schema",
        type="FORMAT_SCHEMA_TYPE_CAPNPROTO",
        uri="https://storage.yandexcloud.net/ch-data/schema.proto",
    )],
    hosts=[yandex.MdbClickhouseClusterHostArgs(
        subnet_id=foo_vpc_subnet.id,
        type="CLICKHOUSE",
        zone="ru-central1-a",
    )],
    maintenance_window=yandex.MdbClickhouseClusterMaintenanceWindowArgs(
        type="ANYTIME",
    ),
    ml_models=[yandex.MdbClickhouseClusterMlModelArgs(
        name="test_model",
        type="ML_MODEL_TYPE_CATBOOST",
        uri="https://storage.yandexcloud.net/ch-data/train.csv",
    )],
    network_id=foo_vpc_network.id,
    service_account_id="your_service_account_id",
    users=[yandex.MdbClickhouseClusterUserArgs(
        name="user",
        password="your_password",
        permissions=[yandex.MdbClickhouseClusterUserPermissionArgs(
            database_name="db_name",
        )],
        quotas=[
            yandex.MdbClickhouseClusterUserQuotaArgs(
                errors=1000,
                interval_duration=3600000,
                queries=10000,
            ),
            yandex.MdbClickhouseClusterUserQuotaArgs(
                error=5000,
                interval_duration=79800000,
                queries=50000,
            ),
        ],
        settings=yandex.MdbClickhouseClusterUserSettingsArgs(
            max_memory_usage_for_user=1000000000,
            output_format_json_quote64bit_integers=True,
            read_overflow_mode="throw",
        ),
    )])
Copy

Coming soon!

Example of creating a HA ClickHouse Cluster.

using Pulumi;
using Yandex = Pulumi.Yandex;

class MyStack : Stack
{
    public MyStack()
    {
        var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
        {
        });
        var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.1.0.0/24",
            },
            Zone = "ru-central1-a",
        });
        var bar = new Yandex.VpcSubnet("bar", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.2.0.0/24",
            },
            Zone = "ru-central1-b",
        });
        var baz = new Yandex.VpcSubnet("baz", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.3.0.0/24",
            },
            Zone = "ru-central1-c",
        });
        var fooMdbClickhouseCluster = new Yandex.MdbClickhouseCluster("fooMdbClickhouseCluster", new Yandex.MdbClickhouseClusterArgs
        {
            Clickhouse = new Yandex.Inputs.MdbClickhouseClusterClickhouseArgs
            {
                Resources = new Yandex.Inputs.MdbClickhouseClusterClickhouseResourcesArgs
                {
                    DiskSize = 16,
                    DiskTypeId = "network-ssd",
                    ResourcePresetId = "s2.micro",
                },
            },
            CloudStorage = new Yandex.Inputs.MdbClickhouseClusterCloudStorageArgs
            {
                Enabled = false,
            },
            Databases = 
            {
                new Yandex.Inputs.MdbClickhouseClusterDatabaseArgs
                {
                    Name = "db_name",
                },
            },
            Environment = "PRESTABLE",
            Hosts = 
            {
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    SubnetId = fooVpcSubnet.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-a",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    SubnetId = bar.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-b",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    SubnetId = fooVpcSubnet.Id,
                    Type = "ZOOKEEPER",
                    Zone = "ru-central1-a",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    SubnetId = bar.Id,
                    Type = "ZOOKEEPER",
                    Zone = "ru-central1-b",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    SubnetId = baz.Id,
                    Type = "ZOOKEEPER",
                    Zone = "ru-central1-c",
                },
            },
            NetworkId = fooVpcNetwork.Id,
            Users = 
            {
                new Yandex.Inputs.MdbClickhouseClusterUserArgs
                {
                    Name = "user",
                    Password = "password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserPermissionArgs
                        {
                            DatabaseName = "db_name",
                        },
                    },
                    Quotas = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Errors = 1000,
                            IntervalDuration = 3600000,
                            Queries = 10000,
                        },
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Error = 5000,
                            IntervalDuration = 79800000,
                            Queries = 50000,
                        },
                    },
                    Settings = new Yandex.Inputs.MdbClickhouseClusterUserSettingsArgs
                    {
                        MaxMemoryUsageForUser = 1000000000,
                        OutputFormatJsonQuote64bitIntegers = true,
                        ReadOverflowMode = "throw",
                    },
                },
            },
            Zookeeper = new Yandex.Inputs.MdbClickhouseClusterZookeeperArgs
            {
                Resources = new Yandex.Inputs.MdbClickhouseClusterZookeeperResourcesArgs
                {
                    DiskSize = 10,
                    DiskTypeId = "network-ssd",
                    ResourcePresetId = "s2.micro",
                },
            },
        });
    }

}
Copy
package main

import (
	"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
		if err != nil {
			return err
		}
		fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.1.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-a"),
		})
		if err != nil {
			return err
		}
		bar, err := yandex.NewVpcSubnet(ctx, "bar", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.2.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-b"),
		})
		if err != nil {
			return err
		}
		baz, err := yandex.NewVpcSubnet(ctx, "baz", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.3.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-c"),
		})
		if err != nil {
			return err
		}
		_, err = yandex.NewMdbClickhouseCluster(ctx, "fooMdbClickhouseCluster", &yandex.MdbClickhouseClusterArgs{
			Clickhouse: &MdbClickhouseClusterClickhouseArgs{
				Resources: &MdbClickhouseClusterClickhouseResourcesArgs{
					DiskSize:         pulumi.Int(16),
					DiskTypeId:       pulumi.String("network-ssd"),
					ResourcePresetId: pulumi.String("s2.micro"),
				},
			},
			CloudStorage: &MdbClickhouseClusterCloudStorageArgs{
				Enabled: pulumi.Bool(false),
			},
			Databases: MdbClickhouseClusterDatabaseArray{
				&MdbClickhouseClusterDatabaseArgs{
					Name: pulumi.String("db_name"),
				},
			},
			Environment: pulumi.String("PRESTABLE"),
			Hosts: MdbClickhouseClusterHostArray{
				&MdbClickhouseClusterHostArgs{
					SubnetId: fooVpcSubnet.ID(),
					Type:     pulumi.String("CLICKHOUSE"),
					Zone:     pulumi.String("ru-central1-a"),
				},
				&MdbClickhouseClusterHostArgs{
					SubnetId: bar.ID(),
					Type:     pulumi.String("CLICKHOUSE"),
					Zone:     pulumi.String("ru-central1-b"),
				},
				&MdbClickhouseClusterHostArgs{
					SubnetId: fooVpcSubnet.ID(),
					Type:     pulumi.String("ZOOKEEPER"),
					Zone:     pulumi.String("ru-central1-a"),
				},
				&MdbClickhouseClusterHostArgs{
					SubnetId: bar.ID(),
					Type:     pulumi.String("ZOOKEEPER"),
					Zone:     pulumi.String("ru-central1-b"),
				},
				&MdbClickhouseClusterHostArgs{
					SubnetId: baz.ID(),
					Type:     pulumi.String("ZOOKEEPER"),
					Zone:     pulumi.String("ru-central1-c"),
				},
			},
			NetworkId: fooVpcNetwork.ID(),
			Users: MdbClickhouseClusterUserArray{
				&MdbClickhouseClusterUserArgs{
					Name:     pulumi.String("user"),
					Password: pulumi.String("password"),
					Permissions: MdbClickhouseClusterUserPermissionArray{
						&MdbClickhouseClusterUserPermissionArgs{
							DatabaseName: pulumi.String("db_name"),
						},
					},
					Quotas: MdbClickhouseClusterUserQuotaArray{
						&MdbClickhouseClusterUserQuotaArgs{
							Errors:           pulumi.Int(1000),
							IntervalDuration: pulumi.Int(3600000),
							Queries:          pulumi.Int(10000),
						},
						&MdbClickhouseClusterUserQuotaArgs{
							Error:            5000,
							IntervalDuration: pulumi.Int(79800000),
							Queries:          pulumi.Int(50000),
						},
					},
					Settings: &MdbClickhouseClusterUserSettingsArgs{
						MaxMemoryUsageForUser:              pulumi.Int(1000000000),
						OutputFormatJsonQuote64bitIntegers: pulumi.Bool(true),
						ReadOverflowMode:                   pulumi.String("throw"),
					},
				},
			},
			Zookeeper: &MdbClickhouseClusterZookeeperArgs{
				Resources: &MdbClickhouseClusterZookeeperResourcesArgs{
					DiskSize:         pulumi.Int(10),
					DiskTypeId:       pulumi.String("network-ssd"),
					ResourcePresetId: pulumi.String("s2.micro"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy

Coming soon!

import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";

const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.1.0.0/24"],
    zone: "ru-central1-a",
});
const bar = new yandex.VpcSubnet("bar", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.2.0.0/24"],
    zone: "ru-central1-b",
});
const baz = new yandex.VpcSubnet("baz", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.3.0.0/24"],
    zone: "ru-central1-c",
});
const fooMdbClickhouseCluster = new yandex.MdbClickhouseCluster("foo", {
    clickhouse: {
        resources: {
            diskSize: 16,
            diskTypeId: "network-ssd",
            resourcePresetId: "s2.micro",
        },
    },
    cloudStorage: {
        enabled: false,
    },
    databases: [{
        name: "db_name",
    }],
    environment: "PRESTABLE",
    hosts: [
        {
            subnetId: fooVpcSubnet.id,
            type: "CLICKHOUSE",
            zone: "ru-central1-a",
        },
        {
            subnetId: bar.id,
            type: "CLICKHOUSE",
            zone: "ru-central1-b",
        },
        {
            subnetId: fooVpcSubnet.id,
            type: "ZOOKEEPER",
            zone: "ru-central1-a",
        },
        {
            subnetId: bar.id,
            type: "ZOOKEEPER",
            zone: "ru-central1-b",
        },
        {
            subnetId: baz.id,
            type: "ZOOKEEPER",
            zone: "ru-central1-c",
        },
    ],
    networkId: fooVpcNetwork.id,
    users: [{
        name: "user",
        password: "password",
        permissions: [{
            databaseName: "db_name",
        }],
        quotas: [
            {
                errors: 1000,
                intervalDuration: 3600000,
                queries: 10000,
            },
            {
                error: 5000,
                intervalDuration: 79800000,
                queries: 50000,
            },
        ],
        settings: {
            maxMemoryUsageForUser: 1000000000,
            outputFormatJsonQuote64bitIntegers: true,
            readOverflowMode: "throw",
        },
    }],
    zookeeper: {
        resources: {
            diskSize: 10,
            diskTypeId: "network-ssd",
            resourcePresetId: "s2.micro",
        },
    },
});
Copy
import pulumi
import pulumi_yandex as yandex

foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.1.0.0/24"],
    zone="ru-central1-a")
bar = yandex.VpcSubnet("bar",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.2.0.0/24"],
    zone="ru-central1-b")
baz = yandex.VpcSubnet("baz",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.3.0.0/24"],
    zone="ru-central1-c")
foo_mdb_clickhouse_cluster = yandex.MdbClickhouseCluster("fooMdbClickhouseCluster",
    clickhouse=yandex.MdbClickhouseClusterClickhouseArgs(
        resources=yandex.MdbClickhouseClusterClickhouseResourcesArgs(
            disk_size=16,
            disk_type_id="network-ssd",
            resource_preset_id="s2.micro",
        ),
    ),
    cloud_storage=yandex.MdbClickhouseClusterCloudStorageArgs(
        enabled=False,
    ),
    databases=[yandex.MdbClickhouseClusterDatabaseArgs(
        name="db_name",
    )],
    environment="PRESTABLE",
    hosts=[
        yandex.MdbClickhouseClusterHostArgs(
            subnet_id=foo_vpc_subnet.id,
            type="CLICKHOUSE",
            zone="ru-central1-a",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            subnet_id=bar.id,
            type="CLICKHOUSE",
            zone="ru-central1-b",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            subnet_id=foo_vpc_subnet.id,
            type="ZOOKEEPER",
            zone="ru-central1-a",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            subnet_id=bar.id,
            type="ZOOKEEPER",
            zone="ru-central1-b",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            subnet_id=baz.id,
            type="ZOOKEEPER",
            zone="ru-central1-c",
        ),
    ],
    network_id=foo_vpc_network.id,
    users=[yandex.MdbClickhouseClusterUserArgs(
        name="user",
        password="password",
        permissions=[yandex.MdbClickhouseClusterUserPermissionArgs(
            database_name="db_name",
        )],
        quotas=[
            yandex.MdbClickhouseClusterUserQuotaArgs(
                errors=1000,
                interval_duration=3600000,
                queries=10000,
            ),
            yandex.MdbClickhouseClusterUserQuotaArgs(
                error=5000,
                interval_duration=79800000,
                queries=50000,
            ),
        ],
        settings=yandex.MdbClickhouseClusterUserSettingsArgs(
            max_memory_usage_for_user=1000000000,
            output_format_json_quote64bit_integers=True,
            read_overflow_mode="throw",
        ),
    )],
    zookeeper=yandex.MdbClickhouseClusterZookeeperArgs(
        resources=yandex.MdbClickhouseClusterZookeeperResourcesArgs(
            disk_size=10,
            disk_type_id="network-ssd",
            resource_preset_id="s2.micro",
        ),
    ))
Copy

Coming soon!

Example of creating a sharded ClickHouse Cluster.

using Pulumi;
using Yandex = Pulumi.Yandex;

class MyStack : Stack
{
    public MyStack()
    {
        var fooVpcNetwork = new Yandex.VpcNetwork("fooVpcNetwork", new Yandex.VpcNetworkArgs
        {
        });
        var fooVpcSubnet = new Yandex.VpcSubnet("fooVpcSubnet", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.1.0.0/24",
            },
            Zone = "ru-central1-a",
        });
        var bar = new Yandex.VpcSubnet("bar", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.2.0.0/24",
            },
            Zone = "ru-central1-b",
        });
        var baz = new Yandex.VpcSubnet("baz", new Yandex.VpcSubnetArgs
        {
            NetworkId = fooVpcNetwork.Id,
            V4CidrBlocks = 
            {
                "10.3.0.0/24",
            },
            Zone = "ru-central1-c",
        });
        var fooMdbClickhouseCluster = new Yandex.MdbClickhouseCluster("fooMdbClickhouseCluster", new Yandex.MdbClickhouseClusterArgs
        {
            Clickhouse = new Yandex.Inputs.MdbClickhouseClusterClickhouseArgs
            {
                Resources = new Yandex.Inputs.MdbClickhouseClusterClickhouseResourcesArgs
                {
                    DiskSize = 16,
                    DiskTypeId = "network-ssd",
                    ResourcePresetId = "s2.micro",
                },
            },
            CloudStorage = new Yandex.Inputs.MdbClickhouseClusterCloudStorageArgs
            {
                Enabled = false,
            },
            Databases = 
            {
                new Yandex.Inputs.MdbClickhouseClusterDatabaseArgs
                {
                    Name = "db_name",
                },
            },
            Environment = "PRODUCTION",
            Hosts = 
            {
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    ShardName = "shard1",
                    SubnetId = fooVpcSubnet.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-a",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    ShardName = "shard1",
                    SubnetId = bar.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-b",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    ShardName = "shard2",
                    SubnetId = bar.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-b",
                },
                new Yandex.Inputs.MdbClickhouseClusterHostArgs
                {
                    ShardName = "shard2",
                    SubnetId = baz.Id,
                    Type = "CLICKHOUSE",
                    Zone = "ru-central1-c",
                },
            },
            NetworkId = fooVpcNetwork.Id,
            ShardGroups = 
            {
                new Yandex.Inputs.MdbClickhouseClusterShardGroupArgs
                {
                    Description = "Cluster configuration that contain only shard1",
                    Name = "single_shard_group",
                    ShardNames = 
                    {
                        "shard1",
                    },
                },
            },
            Users = 
            {
                new Yandex.Inputs.MdbClickhouseClusterUserArgs
                {
                    Name = "user",
                    Password = "password",
                    Permissions = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserPermissionArgs
                        {
                            DatabaseName = "db_name",
                        },
                    },
                    Quotas = 
                    {
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Errors = 1000,
                            IntervalDuration = 3600000,
                            Queries = 10000,
                        },
                        new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                        {
                            Error = 5000,
                            IntervalDuration = 79800000,
                            Queries = 50000,
                        },
                    },
                    Settings = new Yandex.Inputs.MdbClickhouseClusterUserSettingsArgs
                    {
                        MaxMemoryUsageForUser = 1000000000,
                        OutputFormatJsonQuote64bitIntegers = true,
                        ReadOverflowMode = "throw",
                    },
                },
            },
            Zookeeper = new Yandex.Inputs.MdbClickhouseClusterZookeeperArgs
            {
                Resources = new Yandex.Inputs.MdbClickhouseClusterZookeeperResourcesArgs
                {
                    DiskSize = 10,
                    DiskTypeId = "network-ssd",
                    ResourcePresetId = "s2.micro",
                },
            },
        });
    }

}
Copy
package main

import (
	"github.com/pulumi/pulumi-yandex/sdk/go/yandex"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		fooVpcNetwork, err := yandex.NewVpcNetwork(ctx, "fooVpcNetwork", nil)
		if err != nil {
			return err
		}
		fooVpcSubnet, err := yandex.NewVpcSubnet(ctx, "fooVpcSubnet", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.1.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-a"),
		})
		if err != nil {
			return err
		}
		bar, err := yandex.NewVpcSubnet(ctx, "bar", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.2.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-b"),
		})
		if err != nil {
			return err
		}
		baz, err := yandex.NewVpcSubnet(ctx, "baz", &yandex.VpcSubnetArgs{
			NetworkId: fooVpcNetwork.ID(),
			V4CidrBlocks: pulumi.StringArray{
				pulumi.String("10.3.0.0/24"),
			},
			Zone: pulumi.String("ru-central1-c"),
		})
		if err != nil {
			return err
		}
		_, err = yandex.NewMdbClickhouseCluster(ctx, "fooMdbClickhouseCluster", &yandex.MdbClickhouseClusterArgs{
			Clickhouse: &MdbClickhouseClusterClickhouseArgs{
				Resources: &MdbClickhouseClusterClickhouseResourcesArgs{
					DiskSize:         pulumi.Int(16),
					DiskTypeId:       pulumi.String("network-ssd"),
					ResourcePresetId: pulumi.String("s2.micro"),
				},
			},
			CloudStorage: &MdbClickhouseClusterCloudStorageArgs{
				Enabled: pulumi.Bool(false),
			},
			Databases: MdbClickhouseClusterDatabaseArray{
				&MdbClickhouseClusterDatabaseArgs{
					Name: pulumi.String("db_name"),
				},
			},
			Environment: pulumi.String("PRODUCTION"),
			Hosts: MdbClickhouseClusterHostArray{
				&MdbClickhouseClusterHostArgs{
					ShardName: pulumi.String("shard1"),
					SubnetId:  fooVpcSubnet.ID(),
					Type:      pulumi.String("CLICKHOUSE"),
					Zone:      pulumi.String("ru-central1-a"),
				},
				&MdbClickhouseClusterHostArgs{
					ShardName: pulumi.String("shard1"),
					SubnetId:  bar.ID(),
					Type:      pulumi.String("CLICKHOUSE"),
					Zone:      pulumi.String("ru-central1-b"),
				},
				&MdbClickhouseClusterHostArgs{
					ShardName: pulumi.String("shard2"),
					SubnetId:  bar.ID(),
					Type:      pulumi.String("CLICKHOUSE"),
					Zone:      pulumi.String("ru-central1-b"),
				},
				&MdbClickhouseClusterHostArgs{
					ShardName: pulumi.String("shard2"),
					SubnetId:  baz.ID(),
					Type:      pulumi.String("CLICKHOUSE"),
					Zone:      pulumi.String("ru-central1-c"),
				},
			},
			NetworkId: fooVpcNetwork.ID(),
			ShardGroups: MdbClickhouseClusterShardGroupArray{
				&MdbClickhouseClusterShardGroupArgs{
					Description: pulumi.String("Cluster configuration that contain only shard1"),
					Name:        pulumi.String("single_shard_group"),
					ShardNames: pulumi.StringArray{
						pulumi.String("shard1"),
					},
				},
			},
			Users: MdbClickhouseClusterUserArray{
				&MdbClickhouseClusterUserArgs{
					Name:     pulumi.String("user"),
					Password: pulumi.String("password"),
					Permissions: MdbClickhouseClusterUserPermissionArray{
						&MdbClickhouseClusterUserPermissionArgs{
							DatabaseName: pulumi.String("db_name"),
						},
					},
					Quotas: MdbClickhouseClusterUserQuotaArray{
						&MdbClickhouseClusterUserQuotaArgs{
							Errors:           pulumi.Int(1000),
							IntervalDuration: pulumi.Int(3600000),
							Queries:          pulumi.Int(10000),
						},
						&MdbClickhouseClusterUserQuotaArgs{
							Error:            5000,
							IntervalDuration: pulumi.Int(79800000),
							Queries:          pulumi.Int(50000),
						},
					},
					Settings: &MdbClickhouseClusterUserSettingsArgs{
						MaxMemoryUsageForUser:              pulumi.Int(1000000000),
						OutputFormatJsonQuote64bitIntegers: pulumi.Bool(true),
						ReadOverflowMode:                   pulumi.String("throw"),
					},
				},
			},
			Zookeeper: &MdbClickhouseClusterZookeeperArgs{
				Resources: &MdbClickhouseClusterZookeeperResourcesArgs{
					DiskSize:         pulumi.Int(10),
					DiskTypeId:       pulumi.String("network-ssd"),
					ResourcePresetId: pulumi.String("s2.micro"),
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy

Coming soon!

import * as pulumi from "@pulumi/pulumi";
import * as yandex from "@pulumi/yandex";

const fooVpcNetwork = new yandex.VpcNetwork("foo", {});
const fooVpcSubnet = new yandex.VpcSubnet("foo", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.1.0.0/24"],
    zone: "ru-central1-a",
});
const bar = new yandex.VpcSubnet("bar", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.2.0.0/24"],
    zone: "ru-central1-b",
});
const baz = new yandex.VpcSubnet("baz", {
    networkId: fooVpcNetwork.id,
    v4CidrBlocks: ["10.3.0.0/24"],
    zone: "ru-central1-c",
});
const fooMdbClickhouseCluster = new yandex.MdbClickhouseCluster("foo", {
    clickhouse: {
        resources: {
            diskSize: 16,
            diskTypeId: "network-ssd",
            resourcePresetId: "s2.micro",
        },
    },
    cloudStorage: {
        enabled: false,
    },
    databases: [{
        name: "db_name",
    }],
    environment: "PRODUCTION",
    hosts: [
        {
            shardName: "shard1",
            subnetId: fooVpcSubnet.id,
            type: "CLICKHOUSE",
            zone: "ru-central1-a",
        },
        {
            shardName: "shard1",
            subnetId: bar.id,
            type: "CLICKHOUSE",
            zone: "ru-central1-b",
        },
        {
            shardName: "shard2",
            subnetId: bar.id,
            type: "CLICKHOUSE",
            zone: "ru-central1-b",
        },
        {
            shardName: "shard2",
            subnetId: baz.id,
            type: "CLICKHOUSE",
            zone: "ru-central1-c",
        },
    ],
    networkId: fooVpcNetwork.id,
    shardGroups: [{
        description: "Cluster configuration that contain only shard1",
        name: "single_shard_group",
        shardNames: ["shard1"],
    }],
    users: [{
        name: "user",
        password: "password",
        permissions: [{
            databaseName: "db_name",
        }],
        quotas: [
            {
                errors: 1000,
                intervalDuration: 3600000,
                queries: 10000,
            },
            {
                error: 5000,
                intervalDuration: 79800000,
                queries: 50000,
            },
        ],
        settings: {
            maxMemoryUsageForUser: 1000000000,
            outputFormatJsonQuote64bitIntegers: true,
            readOverflowMode: "throw",
        },
    }],
    zookeeper: {
        resources: {
            diskSize: 10,
            diskTypeId: "network-ssd",
            resourcePresetId: "s2.micro",
        },
    },
});
Copy
import pulumi
import pulumi_yandex as yandex

foo_vpc_network = yandex.VpcNetwork("fooVpcNetwork")
foo_vpc_subnet = yandex.VpcSubnet("fooVpcSubnet",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.1.0.0/24"],
    zone="ru-central1-a")
bar = yandex.VpcSubnet("bar",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.2.0.0/24"],
    zone="ru-central1-b")
baz = yandex.VpcSubnet("baz",
    network_id=foo_vpc_network.id,
    v4_cidr_blocks=["10.3.0.0/24"],
    zone="ru-central1-c")
foo_mdb_clickhouse_cluster = yandex.MdbClickhouseCluster("fooMdbClickhouseCluster",
    clickhouse=yandex.MdbClickhouseClusterClickhouseArgs(
        resources=yandex.MdbClickhouseClusterClickhouseResourcesArgs(
            disk_size=16,
            disk_type_id="network-ssd",
            resource_preset_id="s2.micro",
        ),
    ),
    cloud_storage=yandex.MdbClickhouseClusterCloudStorageArgs(
        enabled=False,
    ),
    databases=[yandex.MdbClickhouseClusterDatabaseArgs(
        name="db_name",
    )],
    environment="PRODUCTION",
    hosts=[
        yandex.MdbClickhouseClusterHostArgs(
            shard_name="shard1",
            subnet_id=foo_vpc_subnet.id,
            type="CLICKHOUSE",
            zone="ru-central1-a",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            shard_name="shard1",
            subnet_id=bar.id,
            type="CLICKHOUSE",
            zone="ru-central1-b",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            shard_name="shard2",
            subnet_id=bar.id,
            type="CLICKHOUSE",
            zone="ru-central1-b",
        ),
        yandex.MdbClickhouseClusterHostArgs(
            shard_name="shard2",
            subnet_id=baz.id,
            type="CLICKHOUSE",
            zone="ru-central1-c",
        ),
    ],
    network_id=foo_vpc_network.id,
    shard_groups=[yandex.MdbClickhouseClusterShardGroupArgs(
        description="Cluster configuration that contain only shard1",
        name="single_shard_group",
        shard_names=["shard1"],
    )],
    users=[yandex.MdbClickhouseClusterUserArgs(
        name="user",
        password="password",
        permissions=[yandex.MdbClickhouseClusterUserPermissionArgs(
            database_name="db_name",
        )],
        quotas=[
            yandex.MdbClickhouseClusterUserQuotaArgs(
                errors=1000,
                interval_duration=3600000,
                queries=10000,
            ),
            yandex.MdbClickhouseClusterUserQuotaArgs(
                error=5000,
                interval_duration=79800000,
                queries=50000,
            ),
        ],
        settings=yandex.MdbClickhouseClusterUserSettingsArgs(
            max_memory_usage_for_user=1000000000,
            output_format_json_quote64bit_integers=True,
            read_overflow_mode="throw",
        ),
    )],
    zookeeper=yandex.MdbClickhouseClusterZookeeperArgs(
        resources=yandex.MdbClickhouseClusterZookeeperResourcesArgs(
            disk_size=10,
            disk_type_id="network-ssd",
            resource_preset_id="s2.micro",
        ),
    ))
Copy

Coming soon!

Create MdbClickhouseCluster Resource

Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

Constructor syntax

new MdbClickhouseCluster(name: string, args: MdbClickhouseClusterArgs, opts?: CustomResourceOptions);
@overload
def MdbClickhouseCluster(resource_name: str,
                         args: MdbClickhouseClusterArgs,
                         opts: Optional[ResourceOptions] = None)

@overload
def MdbClickhouseCluster(resource_name: str,
                         opts: Optional[ResourceOptions] = None,
                         environment: Optional[str] = None,
                         network_id: Optional[str] = None,
                         hosts: Optional[Sequence[MdbClickhouseClusterHostArgs]] = None,
                         clickhouse: Optional[MdbClickhouseClusterClickhouseArgs] = None,
                         databases: Optional[Sequence[MdbClickhouseClusterDatabaseArgs]] = None,
                         ml_models: Optional[Sequence[MdbClickhouseClusterMlModelArgs]] = None,
                         access: Optional[MdbClickhouseClusterAccessArgs] = None,
                         deletion_protection: Optional[bool] = None,
                         description: Optional[str] = None,
                         cloud_storage: Optional[MdbClickhouseClusterCloudStorageArgs] = None,
                         folder_id: Optional[str] = None,
                         format_schemas: Optional[Sequence[MdbClickhouseClusterFormatSchemaArgs]] = None,
                         backup_window_start: Optional[MdbClickhouseClusterBackupWindowStartArgs] = None,
                         labels: Optional[Mapping[str, str]] = None,
                         maintenance_window: Optional[MdbClickhouseClusterMaintenanceWindowArgs] = None,
                         copy_schema_on_new_hosts: Optional[bool] = None,
                         name: Optional[str] = None,
                         admin_password: Optional[str] = None,
                         security_group_ids: Optional[Sequence[str]] = None,
                         service_account_id: Optional[str] = None,
                         shard_groups: Optional[Sequence[MdbClickhouseClusterShardGroupArgs]] = None,
                         sql_database_management: Optional[bool] = None,
                         sql_user_management: Optional[bool] = None,
                         users: Optional[Sequence[MdbClickhouseClusterUserArgs]] = None,
                         version: Optional[str] = None,
                         zookeeper: Optional[MdbClickhouseClusterZookeeperArgs] = None)
func NewMdbClickhouseCluster(ctx *Context, name string, args MdbClickhouseClusterArgs, opts ...ResourceOption) (*MdbClickhouseCluster, error)
public MdbClickhouseCluster(string name, MdbClickhouseClusterArgs args, CustomResourceOptions? opts = null)
public MdbClickhouseCluster(String name, MdbClickhouseClusterArgs args)
public MdbClickhouseCluster(String name, MdbClickhouseClusterArgs args, CustomResourceOptions options)
type: yandex:MdbClickhouseCluster
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

Parameters

name This property is required. string
The unique name of the resource.
args This property is required. MdbClickhouseClusterArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name This property is required. str
The unique name of the resource.
args This property is required. MdbClickhouseClusterArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name This property is required. string
The unique name of the resource.
args This property is required. MdbClickhouseClusterArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name This property is required. string
The unique name of the resource.
args This property is required. MdbClickhouseClusterArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name This property is required. String
The unique name of the resource.
args This property is required. MdbClickhouseClusterArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Constructor example

The following reference example uses placeholder values for all input properties.

var mdbClickhouseClusterResource = new Yandex.MdbClickhouseCluster("mdbClickhouseClusterResource", new()
{
    Environment = "string",
    NetworkId = "string",
    Hosts = new[]
    {
        new Yandex.Inputs.MdbClickhouseClusterHostArgs
        {
            Type = "string",
            Zone = "string",
            AssignPublicIp = false,
            Fqdn = "string",
            ShardName = "string",
            SubnetId = "string",
        },
    },
    Clickhouse = new Yandex.Inputs.MdbClickhouseClusterClickhouseArgs
    {
        Resources = new Yandex.Inputs.MdbClickhouseClusterClickhouseResourcesArgs
        {
            DiskSize = 0,
            DiskTypeId = "string",
            ResourcePresetId = "string",
        },
        Config = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigArgs
        {
            BackgroundPoolSize = 0,
            BackgroundSchedulePoolSize = 0,
            Compressions = new[]
            {
                new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigCompressionArgs
                {
                    Method = "string",
                    MinPartSize = 0,
                    MinPartSizeRatio = 0,
                },
            },
            GeobaseUri = "string",
            GraphiteRollups = new[]
            {
                new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs
                {
                    Name = "string",
                    Patterns = new[]
                    {
                        new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArgs
                        {
                            Function = "string",
                            Regexp = "string",
                            Retentions = new[]
                            {
                                new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArgs
                                {
                                    Age = 0,
                                    Precision = 0,
                                },
                            },
                        },
                    },
                },
            },
            Kafka = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigKafkaArgs
            {
                SaslMechanism = "string",
                SaslPassword = "string",
                SaslUsername = "string",
                SecurityProtocol = "string",
            },
            KafkaTopics = new[]
            {
                new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigKafkaTopicArgs
                {
                    Name = "string",
                    Settings = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigKafkaTopicSettingsArgs
                    {
                        SaslMechanism = "string",
                        SaslPassword = "string",
                        SaslUsername = "string",
                        SecurityProtocol = "string",
                    },
                },
            },
            KeepAliveTimeout = 0,
            LogLevel = "string",
            MarkCacheSize = 0,
            MaxConcurrentQueries = 0,
            MaxConnections = 0,
            MaxPartitionSizeToDrop = 0,
            MaxTableSizeToDrop = 0,
            MergeTree = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigMergeTreeArgs
            {
                MaxBytesToMergeAtMinSpaceInPool = 0,
                MaxReplicatedMergesInQueue = 0,
                NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge = 0,
                PartsToDelayInsert = 0,
                PartsToThrowInsert = 0,
                ReplicatedDeduplicationWindow = 0,
                ReplicatedDeduplicationWindowSeconds = 0,
            },
            MetricLogEnabled = false,
            MetricLogRetentionSize = 0,
            MetricLogRetentionTime = 0,
            PartLogRetentionSize = 0,
            PartLogRetentionTime = 0,
            QueryLogRetentionSize = 0,
            QueryLogRetentionTime = 0,
            QueryThreadLogEnabled = false,
            QueryThreadLogRetentionSize = 0,
            QueryThreadLogRetentionTime = 0,
            Rabbitmq = new Yandex.Inputs.MdbClickhouseClusterClickhouseConfigRabbitmqArgs
            {
                Password = "string",
                Username = "string",
            },
            TextLogEnabled = false,
            TextLogLevel = "string",
            TextLogRetentionSize = 0,
            TextLogRetentionTime = 0,
            Timezone = "string",
            TraceLogEnabled = false,
            TraceLogRetentionSize = 0,
            TraceLogRetentionTime = 0,
            UncompressedCacheSize = 0,
        },
    },
    Databases = new[]
    {
        new Yandex.Inputs.MdbClickhouseClusterDatabaseArgs
        {
            Name = "string",
        },
    },
    MlModels = new[]
    {
        new Yandex.Inputs.MdbClickhouseClusterMlModelArgs
        {
            Name = "string",
            Type = "string",
            Uri = "string",
        },
    },
    Access = new Yandex.Inputs.MdbClickhouseClusterAccessArgs
    {
        DataLens = false,
        Metrika = false,
        Serverless = false,
        WebSql = false,
    },
    DeletionProtection = false,
    Description = "string",
    CloudStorage = new Yandex.Inputs.MdbClickhouseClusterCloudStorageArgs
    {
        Enabled = false,
    },
    FolderId = "string",
    FormatSchemas = new[]
    {
        new Yandex.Inputs.MdbClickhouseClusterFormatSchemaArgs
        {
            Name = "string",
            Type = "string",
            Uri = "string",
        },
    },
    BackupWindowStart = new Yandex.Inputs.MdbClickhouseClusterBackupWindowStartArgs
    {
        Hours = 0,
        Minutes = 0,
    },
    Labels = 
    {
        { "string", "string" },
    },
    MaintenanceWindow = new Yandex.Inputs.MdbClickhouseClusterMaintenanceWindowArgs
    {
        Type = "string",
        Day = "string",
        Hour = 0,
    },
    CopySchemaOnNewHosts = false,
    Name = "string",
    AdminPassword = "string",
    SecurityGroupIds = new[]
    {
        "string",
    },
    ServiceAccountId = "string",
    ShardGroups = new[]
    {
        new Yandex.Inputs.MdbClickhouseClusterShardGroupArgs
        {
            Name = "string",
            ShardNames = new[]
            {
                "string",
            },
            Description = "string",
        },
    },
    SqlDatabaseManagement = false,
    SqlUserManagement = false,
    Users = new[]
    {
        new Yandex.Inputs.MdbClickhouseClusterUserArgs
        {
            Name = "string",
            Password = "string",
            Permissions = new[]
            {
                new Yandex.Inputs.MdbClickhouseClusterUserPermissionArgs
                {
                    DatabaseName = "string",
                },
            },
            Quotas = new[]
            {
                new Yandex.Inputs.MdbClickhouseClusterUserQuotaArgs
                {
                    IntervalDuration = 0,
                    Errors = 0,
                    ExecutionTime = 0,
                    Queries = 0,
                    ReadRows = 0,
                    ResultRows = 0,
                },
            },
            Settings = new Yandex.Inputs.MdbClickhouseClusterUserSettingsArgs
            {
                AddHttpCorsHeader = false,
                AllowDdl = false,
                Compile = false,
                CompileExpressions = false,
                ConnectTimeout = 0,
                CountDistinctImplementation = "string",
                DistinctOverflowMode = "string",
                DistributedAggregationMemoryEfficient = false,
                DistributedDdlTaskTimeout = 0,
                DistributedProductMode = "string",
                EmptyResultForAggregationByEmptySet = false,
                EnableHttpCompression = false,
                FallbackToStaleReplicasForDistributedQueries = false,
                ForceIndexByDate = false,
                ForcePrimaryKey = false,
                GroupByOverflowMode = "string",
                GroupByTwoLevelThreshold = 0,
                GroupByTwoLevelThresholdBytes = 0,
                HttpConnectionTimeout = 0,
                HttpHeadersProgressInterval = 0,
                HttpReceiveTimeout = 0,
                HttpSendTimeout = 0,
                InputFormatDefaultsForOmittedFields = false,
                InputFormatValuesInterpretExpressions = false,
                InsertQuorum = 0,
                InsertQuorumTimeout = 0,
                JoinOverflowMode = "string",
                JoinUseNulls = false,
                JoinedSubqueryRequiresAlias = false,
                LowCardinalityAllowInNativeFormat = false,
                MaxAstDepth = 0,
                MaxAstElements = 0,
                MaxBlockSize = 0,
                MaxBytesBeforeExternalGroupBy = 0,
                MaxBytesBeforeExternalSort = 0,
                MaxBytesInDistinct = 0,
                MaxBytesInJoin = 0,
                MaxBytesInSet = 0,
                MaxBytesToRead = 0,
                MaxBytesToSort = 0,
                MaxBytesToTransfer = 0,
                MaxColumnsToRead = 0,
                MaxExecutionTime = 0,
                MaxExpandedAstElements = 0,
                MaxInsertBlockSize = 0,
                MaxMemoryUsage = 0,
                MaxMemoryUsageForUser = 0,
                MaxNetworkBandwidth = 0,
                MaxNetworkBandwidthForUser = 0,
                MaxQuerySize = 0,
                MaxReplicaDelayForDistributedQueries = 0,
                MaxResultBytes = 0,
                MaxResultRows = 0,
                MaxRowsInDistinct = 0,
                MaxRowsInJoin = 0,
                MaxRowsInSet = 0,
                MaxRowsToGroupBy = 0,
                MaxRowsToRead = 0,
                MaxRowsToSort = 0,
                MaxRowsToTransfer = 0,
                MaxTemporaryColumns = 0,
                MaxTemporaryNonConstColumns = 0,
                MaxThreads = 0,
                MergeTreeMaxBytesToUseCache = 0,
                MergeTreeMaxRowsToUseCache = 0,
                MergeTreeMinBytesForConcurrentRead = 0,
                MergeTreeMinRowsForConcurrentRead = 0,
                MinBytesToUseDirectIo = 0,
                MinCountToCompile = 0,
                MinCountToCompileExpression = 0,
                MinExecutionSpeed = 0,
                MinExecutionSpeedBytes = 0,
                MinInsertBlockSizeBytes = 0,
                MinInsertBlockSizeRows = 0,
                OutputFormatJsonQuote64bitIntegers = false,
                OutputFormatJsonQuoteDenormals = false,
                Priority = 0,
                QuotaMode = "string",
                ReadOverflowMode = "string",
                Readonly = 0,
                ReceiveTimeout = 0,
                ReplicationAlterPartitionsSync = 0,
                ResultOverflowMode = "string",
                SelectSequentialConsistency = false,
                SendProgressInHttpHeaders = false,
                SendTimeout = 0,
                SetOverflowMode = "string",
                SkipUnavailableShards = false,
                SortOverflowMode = "string",
                TimeoutOverflowMode = "string",
                TransferOverflowMode = "string",
                TransformNullIn = false,
                UseUncompressedCache = false,
            },
        },
    },
    Version = "string",
    Zookeeper = new Yandex.Inputs.MdbClickhouseClusterZookeeperArgs
    {
        Resources = new Yandex.Inputs.MdbClickhouseClusterZookeeperResourcesArgs
        {
            DiskSize = 0,
            DiskTypeId = "string",
            ResourcePresetId = "string",
        },
    },
});
Copy
example, err := yandex.NewMdbClickhouseCluster(ctx, "mdbClickhouseClusterResource", &yandex.MdbClickhouseClusterArgs{
	Environment: pulumi.String("string"),
	NetworkId:   pulumi.String("string"),
	Hosts: yandex.MdbClickhouseClusterHostArray{
		&yandex.MdbClickhouseClusterHostArgs{
			Type:           pulumi.String("string"),
			Zone:           pulumi.String("string"),
			AssignPublicIp: pulumi.Bool(false),
			Fqdn:           pulumi.String("string"),
			ShardName:      pulumi.String("string"),
			SubnetId:       pulumi.String("string"),
		},
	},
	Clickhouse: &yandex.MdbClickhouseClusterClickhouseArgs{
		Resources: &yandex.MdbClickhouseClusterClickhouseResourcesArgs{
			DiskSize:         pulumi.Int(0),
			DiskTypeId:       pulumi.String("string"),
			ResourcePresetId: pulumi.String("string"),
		},
		Config: &yandex.MdbClickhouseClusterClickhouseConfigArgs{
			BackgroundPoolSize:         pulumi.Int(0),
			BackgroundSchedulePoolSize: pulumi.Int(0),
			Compressions: yandex.MdbClickhouseClusterClickhouseConfigCompressionArray{
				&yandex.MdbClickhouseClusterClickhouseConfigCompressionArgs{
					Method:           pulumi.String("string"),
					MinPartSize:      pulumi.Int(0),
					MinPartSizeRatio: pulumi.Float64(0),
				},
			},
			GeobaseUri: pulumi.String("string"),
			GraphiteRollups: yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupArray{
				&yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs{
					Name: pulumi.String("string"),
					Patterns: yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArray{
						&yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArgs{
							Function: pulumi.String("string"),
							Regexp:   pulumi.String("string"),
							Retentions: yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArray{
								&yandex.MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArgs{
									Age:       pulumi.Int(0),
									Precision: pulumi.Int(0),
								},
							},
						},
					},
				},
			},
			Kafka: &yandex.MdbClickhouseClusterClickhouseConfigKafkaArgs{
				SaslMechanism:    pulumi.String("string"),
				SaslPassword:     pulumi.String("string"),
				SaslUsername:     pulumi.String("string"),
				SecurityProtocol: pulumi.String("string"),
			},
			KafkaTopics: yandex.MdbClickhouseClusterClickhouseConfigKafkaTopicArray{
				&yandex.MdbClickhouseClusterClickhouseConfigKafkaTopicArgs{
					Name: pulumi.String("string"),
					Settings: &yandex.MdbClickhouseClusterClickhouseConfigKafkaTopicSettingsArgs{
						SaslMechanism:    pulumi.String("string"),
						SaslPassword:     pulumi.String("string"),
						SaslUsername:     pulumi.String("string"),
						SecurityProtocol: pulumi.String("string"),
					},
				},
			},
			KeepAliveTimeout:       pulumi.Int(0),
			LogLevel:               pulumi.String("string"),
			MarkCacheSize:          pulumi.Int(0),
			MaxConcurrentQueries:   pulumi.Int(0),
			MaxConnections:         pulumi.Int(0),
			MaxPartitionSizeToDrop: pulumi.Int(0),
			MaxTableSizeToDrop:     pulumi.Int(0),
			MergeTree: &yandex.MdbClickhouseClusterClickhouseConfigMergeTreeArgs{
				MaxBytesToMergeAtMinSpaceInPool:                pulumi.Int(0),
				MaxReplicatedMergesInQueue:                     pulumi.Int(0),
				NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: pulumi.Int(0),
				PartsToDelayInsert:                             pulumi.Int(0),
				PartsToThrowInsert:                             pulumi.Int(0),
				ReplicatedDeduplicationWindow:                  pulumi.Int(0),
				ReplicatedDeduplicationWindowSeconds:           pulumi.Int(0),
			},
			MetricLogEnabled:            pulumi.Bool(false),
			MetricLogRetentionSize:      pulumi.Int(0),
			MetricLogRetentionTime:      pulumi.Int(0),
			PartLogRetentionSize:        pulumi.Int(0),
			PartLogRetentionTime:        pulumi.Int(0),
			QueryLogRetentionSize:       pulumi.Int(0),
			QueryLogRetentionTime:       pulumi.Int(0),
			QueryThreadLogEnabled:       pulumi.Bool(false),
			QueryThreadLogRetentionSize: pulumi.Int(0),
			QueryThreadLogRetentionTime: pulumi.Int(0),
			Rabbitmq: &yandex.MdbClickhouseClusterClickhouseConfigRabbitmqArgs{
				Password: pulumi.String("string"),
				Username: pulumi.String("string"),
			},
			TextLogEnabled:        pulumi.Bool(false),
			TextLogLevel:          pulumi.String("string"),
			TextLogRetentionSize:  pulumi.Int(0),
			TextLogRetentionTime:  pulumi.Int(0),
			Timezone:              pulumi.String("string"),
			TraceLogEnabled:       pulumi.Bool(false),
			TraceLogRetentionSize: pulumi.Int(0),
			TraceLogRetentionTime: pulumi.Int(0),
			UncompressedCacheSize: pulumi.Int(0),
		},
	},
	Databases: yandex.MdbClickhouseClusterDatabaseArray{
		&yandex.MdbClickhouseClusterDatabaseArgs{
			Name: pulumi.String("string"),
		},
	},
	MlModels: yandex.MdbClickhouseClusterMlModelArray{
		&yandex.MdbClickhouseClusterMlModelArgs{
			Name: pulumi.String("string"),
			Type: pulumi.String("string"),
			Uri:  pulumi.String("string"),
		},
	},
	Access: &yandex.MdbClickhouseClusterAccessArgs{
		DataLens:   pulumi.Bool(false),
		Metrika:    pulumi.Bool(false),
		Serverless: pulumi.Bool(false),
		WebSql:     pulumi.Bool(false),
	},
	DeletionProtection: pulumi.Bool(false),
	Description:        pulumi.String("string"),
	CloudStorage: &yandex.MdbClickhouseClusterCloudStorageArgs{
		Enabled: pulumi.Bool(false),
	},
	FolderId: pulumi.String("string"),
	FormatSchemas: yandex.MdbClickhouseClusterFormatSchemaArray{
		&yandex.MdbClickhouseClusterFormatSchemaArgs{
			Name: pulumi.String("string"),
			Type: pulumi.String("string"),
			Uri:  pulumi.String("string"),
		},
	},
	BackupWindowStart: &yandex.MdbClickhouseClusterBackupWindowStartArgs{
		Hours:   pulumi.Int(0),
		Minutes: pulumi.Int(0),
	},
	Labels: pulumi.StringMap{
		"string": pulumi.String("string"),
	},
	MaintenanceWindow: &yandex.MdbClickhouseClusterMaintenanceWindowArgs{
		Type: pulumi.String("string"),
		Day:  pulumi.String("string"),
		Hour: pulumi.Int(0),
	},
	CopySchemaOnNewHosts: pulumi.Bool(false),
	Name:                 pulumi.String("string"),
	AdminPassword:        pulumi.String("string"),
	SecurityGroupIds: pulumi.StringArray{
		pulumi.String("string"),
	},
	ServiceAccountId: pulumi.String("string"),
	ShardGroups: yandex.MdbClickhouseClusterShardGroupArray{
		&yandex.MdbClickhouseClusterShardGroupArgs{
			Name: pulumi.String("string"),
			ShardNames: pulumi.StringArray{
				pulumi.String("string"),
			},
			Description: pulumi.String("string"),
		},
	},
	SqlDatabaseManagement: pulumi.Bool(false),
	SqlUserManagement:     pulumi.Bool(false),
	Users: yandex.MdbClickhouseClusterUserArray{
		&yandex.MdbClickhouseClusterUserArgs{
			Name:     pulumi.String("string"),
			Password: pulumi.String("string"),
			Permissions: yandex.MdbClickhouseClusterUserPermissionArray{
				&yandex.MdbClickhouseClusterUserPermissionArgs{
					DatabaseName: pulumi.String("string"),
				},
			},
			Quotas: yandex.MdbClickhouseClusterUserQuotaArray{
				&yandex.MdbClickhouseClusterUserQuotaArgs{
					IntervalDuration: pulumi.Int(0),
					Errors:           pulumi.Int(0),
					ExecutionTime:    pulumi.Int(0),
					Queries:          pulumi.Int(0),
					ReadRows:         pulumi.Int(0),
					ResultRows:       pulumi.Int(0),
				},
			},
			Settings: &yandex.MdbClickhouseClusterUserSettingsArgs{
				AddHttpCorsHeader:                            pulumi.Bool(false),
				AllowDdl:                                     pulumi.Bool(false),
				Compile:                                      pulumi.Bool(false),
				CompileExpressions:                           pulumi.Bool(false),
				ConnectTimeout:                               pulumi.Int(0),
				CountDistinctImplementation:                  pulumi.String("string"),
				DistinctOverflowMode:                         pulumi.String("string"),
				DistributedAggregationMemoryEfficient:        pulumi.Bool(false),
				DistributedDdlTaskTimeout:                    pulumi.Int(0),
				DistributedProductMode:                       pulumi.String("string"),
				EmptyResultForAggregationByEmptySet:          pulumi.Bool(false),
				EnableHttpCompression:                        pulumi.Bool(false),
				FallbackToStaleReplicasForDistributedQueries: pulumi.Bool(false),
				ForceIndexByDate:                             pulumi.Bool(false),
				ForcePrimaryKey:                              pulumi.Bool(false),
				GroupByOverflowMode:                          pulumi.String("string"),
				GroupByTwoLevelThreshold:                     pulumi.Int(0),
				GroupByTwoLevelThresholdBytes:                pulumi.Int(0),
				HttpConnectionTimeout:                        pulumi.Int(0),
				HttpHeadersProgressInterval:                  pulumi.Int(0),
				HttpReceiveTimeout:                           pulumi.Int(0),
				HttpSendTimeout:                              pulumi.Int(0),
				InputFormatDefaultsForOmittedFields:          pulumi.Bool(false),
				InputFormatValuesInterpretExpressions:        pulumi.Bool(false),
				InsertQuorum:                                 pulumi.Int(0),
				InsertQuorumTimeout:                          pulumi.Int(0),
				JoinOverflowMode:                             pulumi.String("string"),
				JoinUseNulls:                                 pulumi.Bool(false),
				JoinedSubqueryRequiresAlias:                  pulumi.Bool(false),
				LowCardinalityAllowInNativeFormat:            pulumi.Bool(false),
				MaxAstDepth:                                  pulumi.Int(0),
				MaxAstElements:                               pulumi.Int(0),
				MaxBlockSize:                                 pulumi.Int(0),
				MaxBytesBeforeExternalGroupBy:                pulumi.Int(0),
				MaxBytesBeforeExternalSort:                   pulumi.Int(0),
				MaxBytesInDistinct:                           pulumi.Int(0),
				MaxBytesInJoin:                               pulumi.Int(0),
				MaxBytesInSet:                                pulumi.Int(0),
				MaxBytesToRead:                               pulumi.Int(0),
				MaxBytesToSort:                               pulumi.Int(0),
				MaxBytesToTransfer:                           pulumi.Int(0),
				MaxColumnsToRead:                             pulumi.Int(0),
				MaxExecutionTime:                             pulumi.Int(0),
				MaxExpandedAstElements:                       pulumi.Int(0),
				MaxInsertBlockSize:                           pulumi.Int(0),
				MaxMemoryUsage:                               pulumi.Int(0),
				MaxMemoryUsageForUser:                        pulumi.Int(0),
				MaxNetworkBandwidth:                          pulumi.Int(0),
				MaxNetworkBandwidthForUser:                   pulumi.Int(0),
				MaxQuerySize:                                 pulumi.Int(0),
				MaxReplicaDelayForDistributedQueries:         pulumi.Int(0),
				MaxResultBytes:                               pulumi.Int(0),
				MaxResultRows:                                pulumi.Int(0),
				MaxRowsInDistinct:                            pulumi.Int(0),
				MaxRowsInJoin:                                pulumi.Int(0),
				MaxRowsInSet:                                 pulumi.Int(0),
				MaxRowsToGroupBy:                             pulumi.Int(0),
				MaxRowsToRead:                                pulumi.Int(0),
				MaxRowsToSort:                                pulumi.Int(0),
				MaxRowsToTransfer:                            pulumi.Int(0),
				MaxTemporaryColumns:                          pulumi.Int(0),
				MaxTemporaryNonConstColumns:                  pulumi.Int(0),
				MaxThreads:                                   pulumi.Int(0),
				MergeTreeMaxBytesToUseCache:                  pulumi.Int(0),
				MergeTreeMaxRowsToUseCache:                   pulumi.Int(0),
				MergeTreeMinBytesForConcurrentRead:           pulumi.Int(0),
				MergeTreeMinRowsForConcurrentRead:            pulumi.Int(0),
				MinBytesToUseDirectIo:                        pulumi.Int(0),
				MinCountToCompile:                            pulumi.Int(0),
				MinCountToCompileExpression:                  pulumi.Int(0),
				MinExecutionSpeed:                            pulumi.Int(0),
				MinExecutionSpeedBytes:                       pulumi.Int(0),
				MinInsertBlockSizeBytes:                      pulumi.Int(0),
				MinInsertBlockSizeRows:                       pulumi.Int(0),
				OutputFormatJsonQuote64bitIntegers:           pulumi.Bool(false),
				OutputFormatJsonQuoteDenormals:               pulumi.Bool(false),
				Priority:                                     pulumi.Int(0),
				QuotaMode:                                    pulumi.String("string"),
				ReadOverflowMode:                             pulumi.String("string"),
				Readonly:                                     pulumi.Int(0),
				ReceiveTimeout:                               pulumi.Int(0),
				ReplicationAlterPartitionsSync:               pulumi.Int(0),
				ResultOverflowMode:                           pulumi.String("string"),
				SelectSequentialConsistency:                  pulumi.Bool(false),
				SendProgressInHttpHeaders:                    pulumi.Bool(false),
				SendTimeout:                                  pulumi.Int(0),
				SetOverflowMode:                              pulumi.String("string"),
				SkipUnavailableShards:                        pulumi.Bool(false),
				SortOverflowMode:                             pulumi.String("string"),
				TimeoutOverflowMode:                          pulumi.String("string"),
				TransferOverflowMode:                         pulumi.String("string"),
				TransformNullIn:                              pulumi.Bool(false),
				UseUncompressedCache:                         pulumi.Bool(false),
			},
		},
	},
	Version: pulumi.String("string"),
	Zookeeper: &yandex.MdbClickhouseClusterZookeeperArgs{
		Resources: &yandex.MdbClickhouseClusterZookeeperResourcesArgs{
			DiskSize:         pulumi.Int(0),
			DiskTypeId:       pulumi.String("string"),
			ResourcePresetId: pulumi.String("string"),
		},
	},
})
Copy
var mdbClickhouseClusterResource = new MdbClickhouseCluster("mdbClickhouseClusterResource", MdbClickhouseClusterArgs.builder()
    .environment("string")
    .networkId("string")
    .hosts(MdbClickhouseClusterHostArgs.builder()
        .type("string")
        .zone("string")
        .assignPublicIp(false)
        .fqdn("string")
        .shardName("string")
        .subnetId("string")
        .build())
    .clickhouse(MdbClickhouseClusterClickhouseArgs.builder()
        .resources(MdbClickhouseClusterClickhouseResourcesArgs.builder()
            .diskSize(0)
            .diskTypeId("string")
            .resourcePresetId("string")
            .build())
        .config(MdbClickhouseClusterClickhouseConfigArgs.builder()
            .backgroundPoolSize(0)
            .backgroundSchedulePoolSize(0)
            .compressions(MdbClickhouseClusterClickhouseConfigCompressionArgs.builder()
                .method("string")
                .minPartSize(0)
                .minPartSizeRatio(0)
                .build())
            .geobaseUri("string")
            .graphiteRollups(MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs.builder()
                .name("string")
                .patterns(MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArgs.builder()
                    .function("string")
                    .regexp("string")
                    .retentions(MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArgs.builder()
                        .age(0)
                        .precision(0)
                        .build())
                    .build())
                .build())
            .kafka(MdbClickhouseClusterClickhouseConfigKafkaArgs.builder()
                .saslMechanism("string")
                .saslPassword("string")
                .saslUsername("string")
                .securityProtocol("string")
                .build())
            .kafkaTopics(MdbClickhouseClusterClickhouseConfigKafkaTopicArgs.builder()
                .name("string")
                .settings(MdbClickhouseClusterClickhouseConfigKafkaTopicSettingsArgs.builder()
                    .saslMechanism("string")
                    .saslPassword("string")
                    .saslUsername("string")
                    .securityProtocol("string")
                    .build())
                .build())
            .keepAliveTimeout(0)
            .logLevel("string")
            .markCacheSize(0)
            .maxConcurrentQueries(0)
            .maxConnections(0)
            .maxPartitionSizeToDrop(0)
            .maxTableSizeToDrop(0)
            .mergeTree(MdbClickhouseClusterClickhouseConfigMergeTreeArgs.builder()
                .maxBytesToMergeAtMinSpaceInPool(0)
                .maxReplicatedMergesInQueue(0)
                .numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge(0)
                .partsToDelayInsert(0)
                .partsToThrowInsert(0)
                .replicatedDeduplicationWindow(0)
                .replicatedDeduplicationWindowSeconds(0)
                .build())
            .metricLogEnabled(false)
            .metricLogRetentionSize(0)
            .metricLogRetentionTime(0)
            .partLogRetentionSize(0)
            .partLogRetentionTime(0)
            .queryLogRetentionSize(0)
            .queryLogRetentionTime(0)
            .queryThreadLogEnabled(false)
            .queryThreadLogRetentionSize(0)
            .queryThreadLogRetentionTime(0)
            .rabbitmq(MdbClickhouseClusterClickhouseConfigRabbitmqArgs.builder()
                .password("string")
                .username("string")
                .build())
            .textLogEnabled(false)
            .textLogLevel("string")
            .textLogRetentionSize(0)
            .textLogRetentionTime(0)
            .timezone("string")
            .traceLogEnabled(false)
            .traceLogRetentionSize(0)
            .traceLogRetentionTime(0)
            .uncompressedCacheSize(0)
            .build())
        .build())
    .databases(MdbClickhouseClusterDatabaseArgs.builder()
        .name("string")
        .build())
    .mlModels(MdbClickhouseClusterMlModelArgs.builder()
        .name("string")
        .type("string")
        .uri("string")
        .build())
    .access(MdbClickhouseClusterAccessArgs.builder()
        .dataLens(false)
        .metrika(false)
        .serverless(false)
        .webSql(false)
        .build())
    .deletionProtection(false)
    .description("string")
    .cloudStorage(MdbClickhouseClusterCloudStorageArgs.builder()
        .enabled(false)
        .build())
    .folderId("string")
    .formatSchemas(MdbClickhouseClusterFormatSchemaArgs.builder()
        .name("string")
        .type("string")
        .uri("string")
        .build())
    .backupWindowStart(MdbClickhouseClusterBackupWindowStartArgs.builder()
        .hours(0)
        .minutes(0)
        .build())
    .labels(Map.of("string", "string"))
    .maintenanceWindow(MdbClickhouseClusterMaintenanceWindowArgs.builder()
        .type("string")
        .day("string")
        .hour(0)
        .build())
    .copySchemaOnNewHosts(false)
    .name("string")
    .adminPassword("string")
    .securityGroupIds("string")
    .serviceAccountId("string")
    .shardGroups(MdbClickhouseClusterShardGroupArgs.builder()
        .name("string")
        .shardNames("string")
        .description("string")
        .build())
    .sqlDatabaseManagement(false)
    .sqlUserManagement(false)
    .users(MdbClickhouseClusterUserArgs.builder()
        .name("string")
        .password("string")
        .permissions(MdbClickhouseClusterUserPermissionArgs.builder()
            .databaseName("string")
            .build())
        .quotas(MdbClickhouseClusterUserQuotaArgs.builder()
            .intervalDuration(0)
            .errors(0)
            .executionTime(0)
            .queries(0)
            .readRows(0)
            .resultRows(0)
            .build())
        .settings(MdbClickhouseClusterUserSettingsArgs.builder()
            .addHttpCorsHeader(false)
            .allowDdl(false)
            .compile(false)
            .compileExpressions(false)
            .connectTimeout(0)
            .countDistinctImplementation("string")
            .distinctOverflowMode("string")
            .distributedAggregationMemoryEfficient(false)
            .distributedDdlTaskTimeout(0)
            .distributedProductMode("string")
            .emptyResultForAggregationByEmptySet(false)
            .enableHttpCompression(false)
            .fallbackToStaleReplicasForDistributedQueries(false)
            .forceIndexByDate(false)
            .forcePrimaryKey(false)
            .groupByOverflowMode("string")
            .groupByTwoLevelThreshold(0)
            .groupByTwoLevelThresholdBytes(0)
            .httpConnectionTimeout(0)
            .httpHeadersProgressInterval(0)
            .httpReceiveTimeout(0)
            .httpSendTimeout(0)
            .inputFormatDefaultsForOmittedFields(false)
            .inputFormatValuesInterpretExpressions(false)
            .insertQuorum(0)
            .insertQuorumTimeout(0)
            .joinOverflowMode("string")
            .joinUseNulls(false)
            .joinedSubqueryRequiresAlias(false)
            .lowCardinalityAllowInNativeFormat(false)
            .maxAstDepth(0)
            .maxAstElements(0)
            .maxBlockSize(0)
            .maxBytesBeforeExternalGroupBy(0)
            .maxBytesBeforeExternalSort(0)
            .maxBytesInDistinct(0)
            .maxBytesInJoin(0)
            .maxBytesInSet(0)
            .maxBytesToRead(0)
            .maxBytesToSort(0)
            .maxBytesToTransfer(0)
            .maxColumnsToRead(0)
            .maxExecutionTime(0)
            .maxExpandedAstElements(0)
            .maxInsertBlockSize(0)
            .maxMemoryUsage(0)
            .maxMemoryUsageForUser(0)
            .maxNetworkBandwidth(0)
            .maxNetworkBandwidthForUser(0)
            .maxQuerySize(0)
            .maxReplicaDelayForDistributedQueries(0)
            .maxResultBytes(0)
            .maxResultRows(0)
            .maxRowsInDistinct(0)
            .maxRowsInJoin(0)
            .maxRowsInSet(0)
            .maxRowsToGroupBy(0)
            .maxRowsToRead(0)
            .maxRowsToSort(0)
            .maxRowsToTransfer(0)
            .maxTemporaryColumns(0)
            .maxTemporaryNonConstColumns(0)
            .maxThreads(0)
            .mergeTreeMaxBytesToUseCache(0)
            .mergeTreeMaxRowsToUseCache(0)
            .mergeTreeMinBytesForConcurrentRead(0)
            .mergeTreeMinRowsForConcurrentRead(0)
            .minBytesToUseDirectIo(0)
            .minCountToCompile(0)
            .minCountToCompileExpression(0)
            .minExecutionSpeed(0)
            .minExecutionSpeedBytes(0)
            .minInsertBlockSizeBytes(0)
            .minInsertBlockSizeRows(0)
            .outputFormatJsonQuote64bitIntegers(false)
            .outputFormatJsonQuoteDenormals(false)
            .priority(0)
            .quotaMode("string")
            .readOverflowMode("string")
            .readonly(0)
            .receiveTimeout(0)
            .replicationAlterPartitionsSync(0)
            .resultOverflowMode("string")
            .selectSequentialConsistency(false)
            .sendProgressInHttpHeaders(false)
            .sendTimeout(0)
            .setOverflowMode("string")
            .skipUnavailableShards(false)
            .sortOverflowMode("string")
            .timeoutOverflowMode("string")
            .transferOverflowMode("string")
            .transformNullIn(false)
            .useUncompressedCache(false)
            .build())
        .build())
    .version("string")
    .zookeeper(MdbClickhouseClusterZookeeperArgs.builder()
        .resources(MdbClickhouseClusterZookeeperResourcesArgs.builder()
            .diskSize(0)
            .diskTypeId("string")
            .resourcePresetId("string")
            .build())
        .build())
    .build());
Copy
mdb_clickhouse_cluster_resource = yandex.MdbClickhouseCluster("mdbClickhouseClusterResource",
    environment="string",
    network_id="string",
    hosts=[{
        "type": "string",
        "zone": "string",
        "assign_public_ip": False,
        "fqdn": "string",
        "shard_name": "string",
        "subnet_id": "string",
    }],
    clickhouse={
        "resources": {
            "disk_size": 0,
            "disk_type_id": "string",
            "resource_preset_id": "string",
        },
        "config": {
            "background_pool_size": 0,
            "background_schedule_pool_size": 0,
            "compressions": [{
                "method": "string",
                "min_part_size": 0,
                "min_part_size_ratio": 0,
            }],
            "geobase_uri": "string",
            "graphite_rollups": [{
                "name": "string",
                "patterns": [{
                    "function": "string",
                    "regexp": "string",
                    "retentions": [{
                        "age": 0,
                        "precision": 0,
                    }],
                }],
            }],
            "kafka": {
                "sasl_mechanism": "string",
                "sasl_password": "string",
                "sasl_username": "string",
                "security_protocol": "string",
            },
            "kafka_topics": [{
                "name": "string",
                "settings": {
                    "sasl_mechanism": "string",
                    "sasl_password": "string",
                    "sasl_username": "string",
                    "security_protocol": "string",
                },
            }],
            "keep_alive_timeout": 0,
            "log_level": "string",
            "mark_cache_size": 0,
            "max_concurrent_queries": 0,
            "max_connections": 0,
            "max_partition_size_to_drop": 0,
            "max_table_size_to_drop": 0,
            "merge_tree": {
                "max_bytes_to_merge_at_min_space_in_pool": 0,
                "max_replicated_merges_in_queue": 0,
                "number_of_free_entries_in_pool_to_lower_max_size_of_merge": 0,
                "parts_to_delay_insert": 0,
                "parts_to_throw_insert": 0,
                "replicated_deduplication_window": 0,
                "replicated_deduplication_window_seconds": 0,
            },
            "metric_log_enabled": False,
            "metric_log_retention_size": 0,
            "metric_log_retention_time": 0,
            "part_log_retention_size": 0,
            "part_log_retention_time": 0,
            "query_log_retention_size": 0,
            "query_log_retention_time": 0,
            "query_thread_log_enabled": False,
            "query_thread_log_retention_size": 0,
            "query_thread_log_retention_time": 0,
            "rabbitmq": {
                "password": "string",
                "username": "string",
            },
            "text_log_enabled": False,
            "text_log_level": "string",
            "text_log_retention_size": 0,
            "text_log_retention_time": 0,
            "timezone": "string",
            "trace_log_enabled": False,
            "trace_log_retention_size": 0,
            "trace_log_retention_time": 0,
            "uncompressed_cache_size": 0,
        },
    },
    databases=[{
        "name": "string",
    }],
    ml_models=[{
        "name": "string",
        "type": "string",
        "uri": "string",
    }],
    access={
        "data_lens": False,
        "metrika": False,
        "serverless": False,
        "web_sql": False,
    },
    deletion_protection=False,
    description="string",
    cloud_storage={
        "enabled": False,
    },
    folder_id="string",
    format_schemas=[{
        "name": "string",
        "type": "string",
        "uri": "string",
    }],
    backup_window_start={
        "hours": 0,
        "minutes": 0,
    },
    labels={
        "string": "string",
    },
    maintenance_window={
        "type": "string",
        "day": "string",
        "hour": 0,
    },
    copy_schema_on_new_hosts=False,
    name="string",
    admin_password="string",
    security_group_ids=["string"],
    service_account_id="string",
    shard_groups=[{
        "name": "string",
        "shard_names": ["string"],
        "description": "string",
    }],
    sql_database_management=False,
    sql_user_management=False,
    users=[{
        "name": "string",
        "password": "string",
        "permissions": [{
            "database_name": "string",
        }],
        "quotas": [{
            "interval_duration": 0,
            "errors": 0,
            "execution_time": 0,
            "queries": 0,
            "read_rows": 0,
            "result_rows": 0,
        }],
        "settings": {
            "add_http_cors_header": False,
            "allow_ddl": False,
            "compile": False,
            "compile_expressions": False,
            "connect_timeout": 0,
            "count_distinct_implementation": "string",
            "distinct_overflow_mode": "string",
            "distributed_aggregation_memory_efficient": False,
            "distributed_ddl_task_timeout": 0,
            "distributed_product_mode": "string",
            "empty_result_for_aggregation_by_empty_set": False,
            "enable_http_compression": False,
            "fallback_to_stale_replicas_for_distributed_queries": False,
            "force_index_by_date": False,
            "force_primary_key": False,
            "group_by_overflow_mode": "string",
            "group_by_two_level_threshold": 0,
            "group_by_two_level_threshold_bytes": 0,
            "http_connection_timeout": 0,
            "http_headers_progress_interval": 0,
            "http_receive_timeout": 0,
            "http_send_timeout": 0,
            "input_format_defaults_for_omitted_fields": False,
            "input_format_values_interpret_expressions": False,
            "insert_quorum": 0,
            "insert_quorum_timeout": 0,
            "join_overflow_mode": "string",
            "join_use_nulls": False,
            "joined_subquery_requires_alias": False,
            "low_cardinality_allow_in_native_format": False,
            "max_ast_depth": 0,
            "max_ast_elements": 0,
            "max_block_size": 0,
            "max_bytes_before_external_group_by": 0,
            "max_bytes_before_external_sort": 0,
            "max_bytes_in_distinct": 0,
            "max_bytes_in_join": 0,
            "max_bytes_in_set": 0,
            "max_bytes_to_read": 0,
            "max_bytes_to_sort": 0,
            "max_bytes_to_transfer": 0,
            "max_columns_to_read": 0,
            "max_execution_time": 0,
            "max_expanded_ast_elements": 0,
            "max_insert_block_size": 0,
            "max_memory_usage": 0,
            "max_memory_usage_for_user": 0,
            "max_network_bandwidth": 0,
            "max_network_bandwidth_for_user": 0,
            "max_query_size": 0,
            "max_replica_delay_for_distributed_queries": 0,
            "max_result_bytes": 0,
            "max_result_rows": 0,
            "max_rows_in_distinct": 0,
            "max_rows_in_join": 0,
            "max_rows_in_set": 0,
            "max_rows_to_group_by": 0,
            "max_rows_to_read": 0,
            "max_rows_to_sort": 0,
            "max_rows_to_transfer": 0,
            "max_temporary_columns": 0,
            "max_temporary_non_const_columns": 0,
            "max_threads": 0,
            "merge_tree_max_bytes_to_use_cache": 0,
            "merge_tree_max_rows_to_use_cache": 0,
            "merge_tree_min_bytes_for_concurrent_read": 0,
            "merge_tree_min_rows_for_concurrent_read": 0,
            "min_bytes_to_use_direct_io": 0,
            "min_count_to_compile": 0,
            "min_count_to_compile_expression": 0,
            "min_execution_speed": 0,
            "min_execution_speed_bytes": 0,
            "min_insert_block_size_bytes": 0,
            "min_insert_block_size_rows": 0,
            "output_format_json_quote64bit_integers": False,
            "output_format_json_quote_denormals": False,
            "priority": 0,
            "quota_mode": "string",
            "read_overflow_mode": "string",
            "readonly": 0,
            "receive_timeout": 0,
            "replication_alter_partitions_sync": 0,
            "result_overflow_mode": "string",
            "select_sequential_consistency": False,
            "send_progress_in_http_headers": False,
            "send_timeout": 0,
            "set_overflow_mode": "string",
            "skip_unavailable_shards": False,
            "sort_overflow_mode": "string",
            "timeout_overflow_mode": "string",
            "transfer_overflow_mode": "string",
            "transform_null_in": False,
            "use_uncompressed_cache": False,
        },
    }],
    version="string",
    zookeeper={
        "resources": {
            "disk_size": 0,
            "disk_type_id": "string",
            "resource_preset_id": "string",
        },
    })
Copy
const mdbClickhouseClusterResource = new yandex.MdbClickhouseCluster("mdbClickhouseClusterResource", {
    environment: "string",
    networkId: "string",
    hosts: [{
        type: "string",
        zone: "string",
        assignPublicIp: false,
        fqdn: "string",
        shardName: "string",
        subnetId: "string",
    }],
    clickhouse: {
        resources: {
            diskSize: 0,
            diskTypeId: "string",
            resourcePresetId: "string",
        },
        config: {
            backgroundPoolSize: 0,
            backgroundSchedulePoolSize: 0,
            compressions: [{
                method: "string",
                minPartSize: 0,
                minPartSizeRatio: 0,
            }],
            geobaseUri: "string",
            graphiteRollups: [{
                name: "string",
                patterns: [{
                    "function": "string",
                    regexp: "string",
                    retentions: [{
                        age: 0,
                        precision: 0,
                    }],
                }],
            }],
            kafka: {
                saslMechanism: "string",
                saslPassword: "string",
                saslUsername: "string",
                securityProtocol: "string",
            },
            kafkaTopics: [{
                name: "string",
                settings: {
                    saslMechanism: "string",
                    saslPassword: "string",
                    saslUsername: "string",
                    securityProtocol: "string",
                },
            }],
            keepAliveTimeout: 0,
            logLevel: "string",
            markCacheSize: 0,
            maxConcurrentQueries: 0,
            maxConnections: 0,
            maxPartitionSizeToDrop: 0,
            maxTableSizeToDrop: 0,
            mergeTree: {
                maxBytesToMergeAtMinSpaceInPool: 0,
                maxReplicatedMergesInQueue: 0,
                numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: 0,
                partsToDelayInsert: 0,
                partsToThrowInsert: 0,
                replicatedDeduplicationWindow: 0,
                replicatedDeduplicationWindowSeconds: 0,
            },
            metricLogEnabled: false,
            metricLogRetentionSize: 0,
            metricLogRetentionTime: 0,
            partLogRetentionSize: 0,
            partLogRetentionTime: 0,
            queryLogRetentionSize: 0,
            queryLogRetentionTime: 0,
            queryThreadLogEnabled: false,
            queryThreadLogRetentionSize: 0,
            queryThreadLogRetentionTime: 0,
            rabbitmq: {
                password: "string",
                username: "string",
            },
            textLogEnabled: false,
            textLogLevel: "string",
            textLogRetentionSize: 0,
            textLogRetentionTime: 0,
            timezone: "string",
            traceLogEnabled: false,
            traceLogRetentionSize: 0,
            traceLogRetentionTime: 0,
            uncompressedCacheSize: 0,
        },
    },
    databases: [{
        name: "string",
    }],
    mlModels: [{
        name: "string",
        type: "string",
        uri: "string",
    }],
    access: {
        dataLens: false,
        metrika: false,
        serverless: false,
        webSql: false,
    },
    deletionProtection: false,
    description: "string",
    cloudStorage: {
        enabled: false,
    },
    folderId: "string",
    formatSchemas: [{
        name: "string",
        type: "string",
        uri: "string",
    }],
    backupWindowStart: {
        hours: 0,
        minutes: 0,
    },
    labels: {
        string: "string",
    },
    maintenanceWindow: {
        type: "string",
        day: "string",
        hour: 0,
    },
    copySchemaOnNewHosts: false,
    name: "string",
    adminPassword: "string",
    securityGroupIds: ["string"],
    serviceAccountId: "string",
    shardGroups: [{
        name: "string",
        shardNames: ["string"],
        description: "string",
    }],
    sqlDatabaseManagement: false,
    sqlUserManagement: false,
    users: [{
        name: "string",
        password: "string",
        permissions: [{
            databaseName: "string",
        }],
        quotas: [{
            intervalDuration: 0,
            errors: 0,
            executionTime: 0,
            queries: 0,
            readRows: 0,
            resultRows: 0,
        }],
        settings: {
            addHttpCorsHeader: false,
            allowDdl: false,
            compile: false,
            compileExpressions: false,
            connectTimeout: 0,
            countDistinctImplementation: "string",
            distinctOverflowMode: "string",
            distributedAggregationMemoryEfficient: false,
            distributedDdlTaskTimeout: 0,
            distributedProductMode: "string",
            emptyResultForAggregationByEmptySet: false,
            enableHttpCompression: false,
            fallbackToStaleReplicasForDistributedQueries: false,
            forceIndexByDate: false,
            forcePrimaryKey: false,
            groupByOverflowMode: "string",
            groupByTwoLevelThreshold: 0,
            groupByTwoLevelThresholdBytes: 0,
            httpConnectionTimeout: 0,
            httpHeadersProgressInterval: 0,
            httpReceiveTimeout: 0,
            httpSendTimeout: 0,
            inputFormatDefaultsForOmittedFields: false,
            inputFormatValuesInterpretExpressions: false,
            insertQuorum: 0,
            insertQuorumTimeout: 0,
            joinOverflowMode: "string",
            joinUseNulls: false,
            joinedSubqueryRequiresAlias: false,
            lowCardinalityAllowInNativeFormat: false,
            maxAstDepth: 0,
            maxAstElements: 0,
            maxBlockSize: 0,
            maxBytesBeforeExternalGroupBy: 0,
            maxBytesBeforeExternalSort: 0,
            maxBytesInDistinct: 0,
            maxBytesInJoin: 0,
            maxBytesInSet: 0,
            maxBytesToRead: 0,
            maxBytesToSort: 0,
            maxBytesToTransfer: 0,
            maxColumnsToRead: 0,
            maxExecutionTime: 0,
            maxExpandedAstElements: 0,
            maxInsertBlockSize: 0,
            maxMemoryUsage: 0,
            maxMemoryUsageForUser: 0,
            maxNetworkBandwidth: 0,
            maxNetworkBandwidthForUser: 0,
            maxQuerySize: 0,
            maxReplicaDelayForDistributedQueries: 0,
            maxResultBytes: 0,
            maxResultRows: 0,
            maxRowsInDistinct: 0,
            maxRowsInJoin: 0,
            maxRowsInSet: 0,
            maxRowsToGroupBy: 0,
            maxRowsToRead: 0,
            maxRowsToSort: 0,
            maxRowsToTransfer: 0,
            maxTemporaryColumns: 0,
            maxTemporaryNonConstColumns: 0,
            maxThreads: 0,
            mergeTreeMaxBytesToUseCache: 0,
            mergeTreeMaxRowsToUseCache: 0,
            mergeTreeMinBytesForConcurrentRead: 0,
            mergeTreeMinRowsForConcurrentRead: 0,
            minBytesToUseDirectIo: 0,
            minCountToCompile: 0,
            minCountToCompileExpression: 0,
            minExecutionSpeed: 0,
            minExecutionSpeedBytes: 0,
            minInsertBlockSizeBytes: 0,
            minInsertBlockSizeRows: 0,
            outputFormatJsonQuote64bitIntegers: false,
            outputFormatJsonQuoteDenormals: false,
            priority: 0,
            quotaMode: "string",
            readOverflowMode: "string",
            readonly: 0,
            receiveTimeout: 0,
            replicationAlterPartitionsSync: 0,
            resultOverflowMode: "string",
            selectSequentialConsistency: false,
            sendProgressInHttpHeaders: false,
            sendTimeout: 0,
            setOverflowMode: "string",
            skipUnavailableShards: false,
            sortOverflowMode: "string",
            timeoutOverflowMode: "string",
            transferOverflowMode: "string",
            transformNullIn: false,
            useUncompressedCache: false,
        },
    }],
    version: "string",
    zookeeper: {
        resources: {
            diskSize: 0,
            diskTypeId: "string",
            resourcePresetId: "string",
        },
    },
});
Copy
type: yandex:MdbClickhouseCluster
properties:
    access:
        dataLens: false
        metrika: false
        serverless: false
        webSql: false
    adminPassword: string
    backupWindowStart:
        hours: 0
        minutes: 0
    clickhouse:
        config:
            backgroundPoolSize: 0
            backgroundSchedulePoolSize: 0
            compressions:
                - method: string
                  minPartSize: 0
                  minPartSizeRatio: 0
            geobaseUri: string
            graphiteRollups:
                - name: string
                  patterns:
                    - function: string
                      regexp: string
                      retentions:
                        - age: 0
                          precision: 0
            kafka:
                saslMechanism: string
                saslPassword: string
                saslUsername: string
                securityProtocol: string
            kafkaTopics:
                - name: string
                  settings:
                    saslMechanism: string
                    saslPassword: string
                    saslUsername: string
                    securityProtocol: string
            keepAliveTimeout: 0
            logLevel: string
            markCacheSize: 0
            maxConcurrentQueries: 0
            maxConnections: 0
            maxPartitionSizeToDrop: 0
            maxTableSizeToDrop: 0
            mergeTree:
                maxBytesToMergeAtMinSpaceInPool: 0
                maxReplicatedMergesInQueue: 0
                numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge: 0
                partsToDelayInsert: 0
                partsToThrowInsert: 0
                replicatedDeduplicationWindow: 0
                replicatedDeduplicationWindowSeconds: 0
            metricLogEnabled: false
            metricLogRetentionSize: 0
            metricLogRetentionTime: 0
            partLogRetentionSize: 0
            partLogRetentionTime: 0
            queryLogRetentionSize: 0
            queryLogRetentionTime: 0
            queryThreadLogEnabled: false
            queryThreadLogRetentionSize: 0
            queryThreadLogRetentionTime: 0
            rabbitmq:
                password: string
                username: string
            textLogEnabled: false
            textLogLevel: string
            textLogRetentionSize: 0
            textLogRetentionTime: 0
            timezone: string
            traceLogEnabled: false
            traceLogRetentionSize: 0
            traceLogRetentionTime: 0
            uncompressedCacheSize: 0
        resources:
            diskSize: 0
            diskTypeId: string
            resourcePresetId: string
    cloudStorage:
        enabled: false
    copySchemaOnNewHosts: false
    databases:
        - name: string
    deletionProtection: false
    description: string
    environment: string
    folderId: string
    formatSchemas:
        - name: string
          type: string
          uri: string
    hosts:
        - assignPublicIp: false
          fqdn: string
          shardName: string
          subnetId: string
          type: string
          zone: string
    labels:
        string: string
    maintenanceWindow:
        day: string
        hour: 0
        type: string
    mlModels:
        - name: string
          type: string
          uri: string
    name: string
    networkId: string
    securityGroupIds:
        - string
    serviceAccountId: string
    shardGroups:
        - description: string
          name: string
          shardNames:
            - string
    sqlDatabaseManagement: false
    sqlUserManagement: false
    users:
        - name: string
          password: string
          permissions:
            - databaseName: string
          quotas:
            - errors: 0
              executionTime: 0
              intervalDuration: 0
              queries: 0
              readRows: 0
              resultRows: 0
          settings:
            addHttpCorsHeader: false
            allowDdl: false
            compile: false
            compileExpressions: false
            connectTimeout: 0
            countDistinctImplementation: string
            distinctOverflowMode: string
            distributedAggregationMemoryEfficient: false
            distributedDdlTaskTimeout: 0
            distributedProductMode: string
            emptyResultForAggregationByEmptySet: false
            enableHttpCompression: false
            fallbackToStaleReplicasForDistributedQueries: false
            forceIndexByDate: false
            forcePrimaryKey: false
            groupByOverflowMode: string
            groupByTwoLevelThreshold: 0
            groupByTwoLevelThresholdBytes: 0
            httpConnectionTimeout: 0
            httpHeadersProgressInterval: 0
            httpReceiveTimeout: 0
            httpSendTimeout: 0
            inputFormatDefaultsForOmittedFields: false
            inputFormatValuesInterpretExpressions: false
            insertQuorum: 0
            insertQuorumTimeout: 0
            joinOverflowMode: string
            joinUseNulls: false
            joinedSubqueryRequiresAlias: false
            lowCardinalityAllowInNativeFormat: false
            maxAstDepth: 0
            maxAstElements: 0
            maxBlockSize: 0
            maxBytesBeforeExternalGroupBy: 0
            maxBytesBeforeExternalSort: 0
            maxBytesInDistinct: 0
            maxBytesInJoin: 0
            maxBytesInSet: 0
            maxBytesToRead: 0
            maxBytesToSort: 0
            maxBytesToTransfer: 0
            maxColumnsToRead: 0
            maxExecutionTime: 0
            maxExpandedAstElements: 0
            maxInsertBlockSize: 0
            maxMemoryUsage: 0
            maxMemoryUsageForUser: 0
            maxNetworkBandwidth: 0
            maxNetworkBandwidthForUser: 0
            maxQuerySize: 0
            maxReplicaDelayForDistributedQueries: 0
            maxResultBytes: 0
            maxResultRows: 0
            maxRowsInDistinct: 0
            maxRowsInJoin: 0
            maxRowsInSet: 0
            maxRowsToGroupBy: 0
            maxRowsToRead: 0
            maxRowsToSort: 0
            maxRowsToTransfer: 0
            maxTemporaryColumns: 0
            maxTemporaryNonConstColumns: 0
            maxThreads: 0
            mergeTreeMaxBytesToUseCache: 0
            mergeTreeMaxRowsToUseCache: 0
            mergeTreeMinBytesForConcurrentRead: 0
            mergeTreeMinRowsForConcurrentRead: 0
            minBytesToUseDirectIo: 0
            minCountToCompile: 0
            minCountToCompileExpression: 0
            minExecutionSpeed: 0
            minExecutionSpeedBytes: 0
            minInsertBlockSizeBytes: 0
            minInsertBlockSizeRows: 0
            outputFormatJsonQuote64bitIntegers: false
            outputFormatJsonQuoteDenormals: false
            priority: 0
            quotaMode: string
            readOverflowMode: string
            readonly: 0
            receiveTimeout: 0
            replicationAlterPartitionsSync: 0
            resultOverflowMode: string
            selectSequentialConsistency: false
            sendProgressInHttpHeaders: false
            sendTimeout: 0
            setOverflowMode: string
            skipUnavailableShards: false
            sortOverflowMode: string
            timeoutOverflowMode: string
            transferOverflowMode: string
            transformNullIn: false
            useUncompressedCache: false
    version: string
    zookeeper:
        resources:
            diskSize: 0
            diskTypeId: string
            resourcePresetId: string
Copy

MdbClickhouseCluster Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

The MdbClickhouseCluster resource accepts the following input properties:

Clickhouse This property is required. MdbClickhouseClusterClickhouse
Configuration of the ClickHouse subcluster. The structure is documented below.
Environment This property is required. string
Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.
Hosts This property is required. List<MdbClickhouseClusterHost>
A host of the ClickHouse cluster. The structure is documented below.
NetworkId This property is required. string
ID of the network, to which the ClickHouse cluster belongs.
Access MdbClickhouseClusterAccess
Access policy to the ClickHouse cluster. The structure is documented below.
AdminPassword string
A password used to authorize as user admin when sql_user_management enabled.
BackupWindowStart MdbClickhouseClusterBackupWindowStart
Time to start the daily backup, in the UTC timezone. The structure is documented below.
CloudStorage MdbClickhouseClusterCloudStorage
CopySchemaOnNewHosts bool
Whether to copy schema on new ClickHouse hosts.
Databases List<MdbClickhouseClusterDatabase>
A database of the ClickHouse cluster. The structure is documented below.
DeletionProtection bool
Inhibits deletion of the cluster. Can be either true or false.
Description string
Description of the shard group.
FolderId string
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
FormatSchemas List<MdbClickhouseClusterFormatSchema>
A set of protobuf or capnproto format schemas. The structure is documented below.
Labels Dictionary<string, string>
A set of key/value label pairs to assign to the ClickHouse cluster.
MaintenanceWindow MdbClickhouseClusterMaintenanceWindow
MlModels List<MdbClickhouseClusterMlModel>
A group of machine learning models. The structure is documented below
Name string
Graphite rollup configuration name.
SecurityGroupIds List<string>
A set of ids of security groups assigned to hosts of the cluster.
ServiceAccountId string
ID of the service account used for access to Yandex Object Storage.
ShardGroups List<MdbClickhouseClusterShardGroup>
A group of clickhouse shards. The structure is documented below.
SqlDatabaseManagement bool
Grants admin user database management permission.
SqlUserManagement bool
Enables admin user with user management permission.
Users List<MdbClickhouseClusterUser>
A user of the ClickHouse cluster. The structure is documented below.
Version string
Version of the ClickHouse server software.
Zookeeper MdbClickhouseClusterZookeeper
Configuration of the ZooKeeper subcluster. The structure is documented below.
Clickhouse This property is required. MdbClickhouseClusterClickhouseArgs
Configuration of the ClickHouse subcluster. The structure is documented below.
Environment This property is required. string
Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.
Hosts This property is required. []MdbClickhouseClusterHostArgs
A host of the ClickHouse cluster. The structure is documented below.
NetworkId This property is required. string
ID of the network, to which the ClickHouse cluster belongs.
Access MdbClickhouseClusterAccessArgs
Access policy to the ClickHouse cluster. The structure is documented below.
AdminPassword string
A password used to authorize as user admin when sql_user_management enabled.
BackupWindowStart MdbClickhouseClusterBackupWindowStartArgs
Time to start the daily backup, in the UTC timezone. The structure is documented below.
CloudStorage MdbClickhouseClusterCloudStorageArgs
CopySchemaOnNewHosts bool
Whether to copy schema on new ClickHouse hosts.
Databases []MdbClickhouseClusterDatabaseArgs
A database of the ClickHouse cluster. The structure is documented below.
DeletionProtection bool
Inhibits deletion of the cluster. Can be either true or false.
Description string
Description of the shard group.
FolderId string
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
FormatSchemas []MdbClickhouseClusterFormatSchemaArgs
A set of protobuf or capnproto format schemas. The structure is documented below.
Labels map[string]string
A set of key/value label pairs to assign to the ClickHouse cluster.
MaintenanceWindow MdbClickhouseClusterMaintenanceWindowArgs
MlModels []MdbClickhouseClusterMlModelArgs
A group of machine learning models. The structure is documented below
Name string
Graphite rollup configuration name.
SecurityGroupIds []string
A set of ids of security groups assigned to hosts of the cluster.
ServiceAccountId string
ID of the service account used for access to Yandex Object Storage.
ShardGroups []MdbClickhouseClusterShardGroupArgs
A group of clickhouse shards. The structure is documented below.
SqlDatabaseManagement bool
Grants admin user database management permission.
SqlUserManagement bool
Enables admin user with user management permission.
Users []MdbClickhouseClusterUserArgs
A user of the ClickHouse cluster. The structure is documented below.
Version string
Version of the ClickHouse server software.
Zookeeper MdbClickhouseClusterZookeeperArgs
Configuration of the ZooKeeper subcluster. The structure is documented below.
clickhouse This property is required. MdbClickhouseClusterClickhouse
Configuration of the ClickHouse subcluster. The structure is documented below.
environment This property is required. String
Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.
hosts This property is required. List<MdbClickhouseClusterHost>
A host of the ClickHouse cluster. The structure is documented below.
networkId This property is required. String
ID of the network, to which the ClickHouse cluster belongs.
access MdbClickhouseClusterAccess
Access policy to the ClickHouse cluster. The structure is documented below.
adminPassword String
A password used to authorize as user admin when sql_user_management enabled.
backupWindowStart MdbClickhouseClusterBackupWindowStart
Time to start the daily backup, in the UTC timezone. The structure is documented below.
cloudStorage MdbClickhouseClusterCloudStorage
copySchemaOnNewHosts Boolean
Whether to copy schema on new ClickHouse hosts.
databases List<MdbClickhouseClusterDatabase>
A database of the ClickHouse cluster. The structure is documented below.
deletionProtection Boolean
Inhibits deletion of the cluster. Can be either true or false.
description String
Description of the shard group.
folderId String
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
formatSchemas List<MdbClickhouseClusterFormatSchema>
A set of protobuf or capnproto format schemas. The structure is documented below.
labels Map<String,String>
A set of key/value label pairs to assign to the ClickHouse cluster.
maintenanceWindow MdbClickhouseClusterMaintenanceWindow
mlModels List<MdbClickhouseClusterMlModel>
A group of machine learning models. The structure is documented below
name String
Graphite rollup configuration name.
securityGroupIds List<String>
A set of ids of security groups assigned to hosts of the cluster.
serviceAccountId String
ID of the service account used for access to Yandex Object Storage.
shardGroups List<MdbClickhouseClusterShardGroup>
A group of clickhouse shards. The structure is documented below.
sqlDatabaseManagement Boolean
Grants admin user database management permission.
sqlUserManagement Boolean
Enables admin user with user management permission.
users List<MdbClickhouseClusterUser>
A user of the ClickHouse cluster. The structure is documented below.
version String
Version of the ClickHouse server software.
zookeeper MdbClickhouseClusterZookeeper
Configuration of the ZooKeeper subcluster. The structure is documented below.
clickhouse This property is required. MdbClickhouseClusterClickhouse
Configuration of the ClickHouse subcluster. The structure is documented below.
environment This property is required. string
Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.
hosts This property is required. MdbClickhouseClusterHost[]
A host of the ClickHouse cluster. The structure is documented below.
networkId This property is required. string
ID of the network, to which the ClickHouse cluster belongs.
access MdbClickhouseClusterAccess
Access policy to the ClickHouse cluster. The structure is documented below.
adminPassword string
A password used to authorize as user admin when sql_user_management enabled.
backupWindowStart MdbClickhouseClusterBackupWindowStart
Time to start the daily backup, in the UTC timezone. The structure is documented below.
cloudStorage MdbClickhouseClusterCloudStorage
copySchemaOnNewHosts boolean
Whether to copy schema on new ClickHouse hosts.
databases MdbClickhouseClusterDatabase[]
A database of the ClickHouse cluster. The structure is documented below.
deletionProtection boolean
Inhibits deletion of the cluster. Can be either true or false.
description string
Description of the shard group.
folderId string
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
formatSchemas MdbClickhouseClusterFormatSchema[]
A set of protobuf or capnproto format schemas. The structure is documented below.
labels {[key: string]: string}
A set of key/value label pairs to assign to the ClickHouse cluster.
maintenanceWindow MdbClickhouseClusterMaintenanceWindow
mlModels MdbClickhouseClusterMlModel[]
A group of machine learning models. The structure is documented below
name string
Graphite rollup configuration name.
securityGroupIds string[]
A set of ids of security groups assigned to hosts of the cluster.
serviceAccountId string
ID of the service account used for access to Yandex Object Storage.
shardGroups MdbClickhouseClusterShardGroup[]
A group of clickhouse shards. The structure is documented below.
sqlDatabaseManagement boolean
Grants admin user database management permission.
sqlUserManagement boolean
Enables admin user with user management permission.
users MdbClickhouseClusterUser[]
A user of the ClickHouse cluster. The structure is documented below.
version string
Version of the ClickHouse server software.
zookeeper MdbClickhouseClusterZookeeper
Configuration of the ZooKeeper subcluster. The structure is documented below.
clickhouse This property is required. MdbClickhouseClusterClickhouseArgs
Configuration of the ClickHouse subcluster. The structure is documented below.
environment This property is required. str
Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.
hosts This property is required. Sequence[MdbClickhouseClusterHostArgs]
A host of the ClickHouse cluster. The structure is documented below.
network_id This property is required. str
ID of the network, to which the ClickHouse cluster belongs.
access MdbClickhouseClusterAccessArgs
Access policy to the ClickHouse cluster. The structure is documented below.
admin_password str
A password used to authorize as user admin when sql_user_management enabled.
backup_window_start MdbClickhouseClusterBackupWindowStartArgs
Time to start the daily backup, in the UTC timezone. The structure is documented below.
cloud_storage MdbClickhouseClusterCloudStorageArgs
copy_schema_on_new_hosts bool
Whether to copy schema on new ClickHouse hosts.
databases Sequence[MdbClickhouseClusterDatabaseArgs]
A database of the ClickHouse cluster. The structure is documented below.
deletion_protection bool
Inhibits deletion of the cluster. Can be either true or false.
description str
Description of the shard group.
folder_id str
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
format_schemas Sequence[MdbClickhouseClusterFormatSchemaArgs]
A set of protobuf or capnproto format schemas. The structure is documented below.
labels Mapping[str, str]
A set of key/value label pairs to assign to the ClickHouse cluster.
maintenance_window MdbClickhouseClusterMaintenanceWindowArgs
ml_models Sequence[MdbClickhouseClusterMlModelArgs]
A group of machine learning models. The structure is documented below
name str
Graphite rollup configuration name.
security_group_ids Sequence[str]
A set of ids of security groups assigned to hosts of the cluster.
service_account_id str
ID of the service account used for access to Yandex Object Storage.
shard_groups Sequence[MdbClickhouseClusterShardGroupArgs]
A group of clickhouse shards. The structure is documented below.
sql_database_management bool
Grants admin user database management permission.
sql_user_management bool
Enables admin user with user management permission.
users Sequence[MdbClickhouseClusterUserArgs]
A user of the ClickHouse cluster. The structure is documented below.
version str
Version of the ClickHouse server software.
zookeeper MdbClickhouseClusterZookeeperArgs
Configuration of the ZooKeeper subcluster. The structure is documented below.
clickhouse This property is required. Property Map
Configuration of the ClickHouse subcluster. The structure is documented below.
environment This property is required. String
Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.
hosts This property is required. List<Property Map>
A host of the ClickHouse cluster. The structure is documented below.
networkId This property is required. String
ID of the network, to which the ClickHouse cluster belongs.
access Property Map
Access policy to the ClickHouse cluster. The structure is documented below.
adminPassword String
A password used to authorize as user admin when sql_user_management enabled.
backupWindowStart Property Map
Time to start the daily backup, in the UTC timezone. The structure is documented below.
cloudStorage Property Map
copySchemaOnNewHosts Boolean
Whether to copy schema on new ClickHouse hosts.
databases List<Property Map>
A database of the ClickHouse cluster. The structure is documented below.
deletionProtection Boolean
Inhibits deletion of the cluster. Can be either true or false.
description String
Description of the shard group.
folderId String
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
formatSchemas List<Property Map>
A set of protobuf or capnproto format schemas. The structure is documented below.
labels Map<String>
A set of key/value label pairs to assign to the ClickHouse cluster.
maintenanceWindow Property Map
mlModels List<Property Map>
A group of machine learning models. The structure is documented below
name String
Graphite rollup configuration name.
securityGroupIds List<String>
A set of ids of security groups assigned to hosts of the cluster.
serviceAccountId String
ID of the service account used for access to Yandex Object Storage.
shardGroups List<Property Map>
A group of clickhouse shards. The structure is documented below.
sqlDatabaseManagement Boolean
Grants admin user database management permission.
sqlUserManagement Boolean
Enables admin user with user management permission.
users List<Property Map>
A user of the ClickHouse cluster. The structure is documented below.
version String
Version of the ClickHouse server software.
zookeeper Property Map
Configuration of the ZooKeeper subcluster. The structure is documented below.

Outputs

All input properties are implicitly available as output properties. Additionally, the MdbClickhouseCluster resource produces the following output properties:

CreatedAt string
Timestamp of cluster creation.
Health string
Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.
Id string
The provider-assigned unique ID for this managed resource.
Status string
Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
CreatedAt string
Timestamp of cluster creation.
Health string
Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.
Id string
The provider-assigned unique ID for this managed resource.
Status string
Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
createdAt String
Timestamp of cluster creation.
health String
Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.
id String
The provider-assigned unique ID for this managed resource.
status String
Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
createdAt string
Timestamp of cluster creation.
health string
Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.
id string
The provider-assigned unique ID for this managed resource.
status string
Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
created_at str
Timestamp of cluster creation.
health str
Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.
id str
The provider-assigned unique ID for this managed resource.
status str
Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
createdAt String
Timestamp of cluster creation.
health String
Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.
id String
The provider-assigned unique ID for this managed resource.
status String
Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.

Look up Existing MdbClickhouseCluster Resource

Get an existing MdbClickhouseCluster resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: MdbClickhouseClusterState, opts?: CustomResourceOptions): MdbClickhouseCluster
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        access: Optional[MdbClickhouseClusterAccessArgs] = None,
        admin_password: Optional[str] = None,
        backup_window_start: Optional[MdbClickhouseClusterBackupWindowStartArgs] = None,
        clickhouse: Optional[MdbClickhouseClusterClickhouseArgs] = None,
        cloud_storage: Optional[MdbClickhouseClusterCloudStorageArgs] = None,
        copy_schema_on_new_hosts: Optional[bool] = None,
        created_at: Optional[str] = None,
        databases: Optional[Sequence[MdbClickhouseClusterDatabaseArgs]] = None,
        deletion_protection: Optional[bool] = None,
        description: Optional[str] = None,
        environment: Optional[str] = None,
        folder_id: Optional[str] = None,
        format_schemas: Optional[Sequence[MdbClickhouseClusterFormatSchemaArgs]] = None,
        health: Optional[str] = None,
        hosts: Optional[Sequence[MdbClickhouseClusterHostArgs]] = None,
        labels: Optional[Mapping[str, str]] = None,
        maintenance_window: Optional[MdbClickhouseClusterMaintenanceWindowArgs] = None,
        ml_models: Optional[Sequence[MdbClickhouseClusterMlModelArgs]] = None,
        name: Optional[str] = None,
        network_id: Optional[str] = None,
        security_group_ids: Optional[Sequence[str]] = None,
        service_account_id: Optional[str] = None,
        shard_groups: Optional[Sequence[MdbClickhouseClusterShardGroupArgs]] = None,
        sql_database_management: Optional[bool] = None,
        sql_user_management: Optional[bool] = None,
        status: Optional[str] = None,
        users: Optional[Sequence[MdbClickhouseClusterUserArgs]] = None,
        version: Optional[str] = None,
        zookeeper: Optional[MdbClickhouseClusterZookeeperArgs] = None) -> MdbClickhouseCluster
func GetMdbClickhouseCluster(ctx *Context, name string, id IDInput, state *MdbClickhouseClusterState, opts ...ResourceOption) (*MdbClickhouseCluster, error)
public static MdbClickhouseCluster Get(string name, Input<string> id, MdbClickhouseClusterState? state, CustomResourceOptions? opts = null)
public static MdbClickhouseCluster get(String name, Output<String> id, MdbClickhouseClusterState state, CustomResourceOptions options)
resources:  _:    type: yandex:MdbClickhouseCluster    get:      id: ${id}
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
Access MdbClickhouseClusterAccess
Access policy to the ClickHouse cluster. The structure is documented below.
AdminPassword string
A password used to authorize as user admin when sql_user_management enabled.
BackupWindowStart MdbClickhouseClusterBackupWindowStart
Time to start the daily backup, in the UTC timezone. The structure is documented below.
Clickhouse MdbClickhouseClusterClickhouse
Configuration of the ClickHouse subcluster. The structure is documented below.
CloudStorage MdbClickhouseClusterCloudStorage
CopySchemaOnNewHosts bool
Whether to copy schema on new ClickHouse hosts.
CreatedAt string
Timestamp of cluster creation.
Databases List<MdbClickhouseClusterDatabase>
A database of the ClickHouse cluster. The structure is documented below.
DeletionProtection bool
Inhibits deletion of the cluster. Can be either true or false.
Description string
Description of the shard group.
Environment string
Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.
FolderId string
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
FormatSchemas List<MdbClickhouseClusterFormatSchema>
A set of protobuf or capnproto format schemas. The structure is documented below.
Health string
Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.
Hosts List<MdbClickhouseClusterHost>
A host of the ClickHouse cluster. The structure is documented below.
Labels Dictionary<string, string>
A set of key/value label pairs to assign to the ClickHouse cluster.
MaintenanceWindow MdbClickhouseClusterMaintenanceWindow
MlModels List<MdbClickhouseClusterMlModel>
A group of machine learning models. The structure is documented below
Name string
Graphite rollup configuration name.
NetworkId string
ID of the network, to which the ClickHouse cluster belongs.
SecurityGroupIds List<string>
A set of ids of security groups assigned to hosts of the cluster.
ServiceAccountId string
ID of the service account used for access to Yandex Object Storage.
ShardGroups List<MdbClickhouseClusterShardGroup>
A group of clickhouse shards. The structure is documented below.
SqlDatabaseManagement bool
Grants admin user database management permission.
SqlUserManagement bool
Enables admin user with user management permission.
Status string
Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
Users List<MdbClickhouseClusterUser>
A user of the ClickHouse cluster. The structure is documented below.
Version string
Version of the ClickHouse server software.
Zookeeper MdbClickhouseClusterZookeeper
Configuration of the ZooKeeper subcluster. The structure is documented below.
Access MdbClickhouseClusterAccessArgs
Access policy to the ClickHouse cluster. The structure is documented below.
AdminPassword string
A password used to authorize as user admin when sql_user_management enabled.
BackupWindowStart MdbClickhouseClusterBackupWindowStartArgs
Time to start the daily backup, in the UTC timezone. The structure is documented below.
Clickhouse MdbClickhouseClusterClickhouseArgs
Configuration of the ClickHouse subcluster. The structure is documented below.
CloudStorage MdbClickhouseClusterCloudStorageArgs
CopySchemaOnNewHosts bool
Whether to copy schema on new ClickHouse hosts.
CreatedAt string
Timestamp of cluster creation.
Databases []MdbClickhouseClusterDatabaseArgs
A database of the ClickHouse cluster. The structure is documented below.
DeletionProtection bool
Inhibits deletion of the cluster. Can be either true or false.
Description string
Description of the shard group.
Environment string
Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.
FolderId string
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
FormatSchemas []MdbClickhouseClusterFormatSchemaArgs
A set of protobuf or capnproto format schemas. The structure is documented below.
Health string
Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.
Hosts []MdbClickhouseClusterHostArgs
A host of the ClickHouse cluster. The structure is documented below.
Labels map[string]string
A set of key/value label pairs to assign to the ClickHouse cluster.
MaintenanceWindow MdbClickhouseClusterMaintenanceWindowArgs
MlModels []MdbClickhouseClusterMlModelArgs
A group of machine learning models. The structure is documented below
Name string
Graphite rollup configuration name.
NetworkId string
ID of the network, to which the ClickHouse cluster belongs.
SecurityGroupIds []string
A set of ids of security groups assigned to hosts of the cluster.
ServiceAccountId string
ID of the service account used for access to Yandex Object Storage.
ShardGroups []MdbClickhouseClusterShardGroupArgs
A group of clickhouse shards. The structure is documented below.
SqlDatabaseManagement bool
Grants admin user database management permission.
SqlUserManagement bool
Enables admin user with user management permission.
Status string
Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
Users []MdbClickhouseClusterUserArgs
A user of the ClickHouse cluster. The structure is documented below.
Version string
Version of the ClickHouse server software.
Zookeeper MdbClickhouseClusterZookeeperArgs
Configuration of the ZooKeeper subcluster. The structure is documented below.
access MdbClickhouseClusterAccess
Access policy to the ClickHouse cluster. The structure is documented below.
adminPassword String
A password used to authorize as user admin when sql_user_management enabled.
backupWindowStart MdbClickhouseClusterBackupWindowStart
Time to start the daily backup, in the UTC timezone. The structure is documented below.
clickhouse MdbClickhouseClusterClickhouse
Configuration of the ClickHouse subcluster. The structure is documented below.
cloudStorage MdbClickhouseClusterCloudStorage
copySchemaOnNewHosts Boolean
Whether to copy schema on new ClickHouse hosts.
createdAt String
Timestamp of cluster creation.
databases List<MdbClickhouseClusterDatabase>
A database of the ClickHouse cluster. The structure is documented below.
deletionProtection Boolean
Inhibits deletion of the cluster. Can be either true or false.
description String
Description of the shard group.
environment String
Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.
folderId String
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
formatSchemas List<MdbClickhouseClusterFormatSchema>
A set of protobuf or capnproto format schemas. The structure is documented below.
health String
Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.
hosts List<MdbClickhouseClusterHost>
A host of the ClickHouse cluster. The structure is documented below.
labels Map<String,String>
A set of key/value label pairs to assign to the ClickHouse cluster.
maintenanceWindow MdbClickhouseClusterMaintenanceWindow
mlModels List<MdbClickhouseClusterMlModel>
A group of machine learning models. The structure is documented below
name String
Graphite rollup configuration name.
networkId String
ID of the network, to which the ClickHouse cluster belongs.
securityGroupIds List<String>
A set of ids of security groups assigned to hosts of the cluster.
serviceAccountId String
ID of the service account used for access to Yandex Object Storage.
shardGroups List<MdbClickhouseClusterShardGroup>
A group of clickhouse shards. The structure is documented below.
sqlDatabaseManagement Boolean
Grants admin user database management permission.
sqlUserManagement Boolean
Enables admin user with user management permission.
status String
Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
users List<MdbClickhouseClusterUser>
A user of the ClickHouse cluster. The structure is documented below.
version String
Version of the ClickHouse server software.
zookeeper MdbClickhouseClusterZookeeper
Configuration of the ZooKeeper subcluster. The structure is documented below.
access MdbClickhouseClusterAccess
Access policy to the ClickHouse cluster. The structure is documented below.
adminPassword string
A password used to authorize as user admin when sql_user_management enabled.
backupWindowStart MdbClickhouseClusterBackupWindowStart
Time to start the daily backup, in the UTC timezone. The structure is documented below.
clickhouse MdbClickhouseClusterClickhouse
Configuration of the ClickHouse subcluster. The structure is documented below.
cloudStorage MdbClickhouseClusterCloudStorage
copySchemaOnNewHosts boolean
Whether to copy schema on new ClickHouse hosts.
createdAt string
Timestamp of cluster creation.
databases MdbClickhouseClusterDatabase[]
A database of the ClickHouse cluster. The structure is documented below.
deletionProtection boolean
Inhibits deletion of the cluster. Can be either true or false.
description string
Description of the shard group.
environment string
Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.
folderId string
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
formatSchemas MdbClickhouseClusterFormatSchema[]
A set of protobuf or capnproto format schemas. The structure is documented below.
health string
Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.
hosts MdbClickhouseClusterHost[]
A host of the ClickHouse cluster. The structure is documented below.
labels {[key: string]: string}
A set of key/value label pairs to assign to the ClickHouse cluster.
maintenanceWindow MdbClickhouseClusterMaintenanceWindow
mlModels MdbClickhouseClusterMlModel[]
A group of machine learning models. The structure is documented below
name string
Graphite rollup configuration name.
networkId string
ID of the network, to which the ClickHouse cluster belongs.
securityGroupIds string[]
A set of ids of security groups assigned to hosts of the cluster.
serviceAccountId string
ID of the service account used for access to Yandex Object Storage.
shardGroups MdbClickhouseClusterShardGroup[]
A group of clickhouse shards. The structure is documented below.
sqlDatabaseManagement boolean
Grants admin user database management permission.
sqlUserManagement boolean
Enables admin user with user management permission.
status string
Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
users MdbClickhouseClusterUser[]
A user of the ClickHouse cluster. The structure is documented below.
version string
Version of the ClickHouse server software.
zookeeper MdbClickhouseClusterZookeeper
Configuration of the ZooKeeper subcluster. The structure is documented below.
access MdbClickhouseClusterAccessArgs
Access policy to the ClickHouse cluster. The structure is documented below.
admin_password str
A password used to authorize as user admin when sql_user_management enabled.
backup_window_start MdbClickhouseClusterBackupWindowStartArgs
Time to start the daily backup, in the UTC timezone. The structure is documented below.
clickhouse MdbClickhouseClusterClickhouseArgs
Configuration of the ClickHouse subcluster. The structure is documented below.
cloud_storage MdbClickhouseClusterCloudStorageArgs
copy_schema_on_new_hosts bool
Whether to copy schema on new ClickHouse hosts.
created_at str
Timestamp of cluster creation.
databases Sequence[MdbClickhouseClusterDatabaseArgs]
A database of the ClickHouse cluster. The structure is documented below.
deletion_protection bool
Inhibits deletion of the cluster. Can be either true or false.
description str
Description of the shard group.
environment str
Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.
folder_id str
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
format_schemas Sequence[MdbClickhouseClusterFormatSchemaArgs]
A set of protobuf or capnproto format schemas. The structure is documented below.
health str
Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.
hosts Sequence[MdbClickhouseClusterHostArgs]
A host of the ClickHouse cluster. The structure is documented below.
labels Mapping[str, str]
A set of key/value label pairs to assign to the ClickHouse cluster.
maintenance_window MdbClickhouseClusterMaintenanceWindowArgs
ml_models Sequence[MdbClickhouseClusterMlModelArgs]
A group of machine learning models. The structure is documented below
name str
Graphite rollup configuration name.
network_id str
ID of the network, to which the ClickHouse cluster belongs.
security_group_ids Sequence[str]
A set of ids of security groups assigned to hosts of the cluster.
service_account_id str
ID of the service account used for access to Yandex Object Storage.
shard_groups Sequence[MdbClickhouseClusterShardGroupArgs]
A group of clickhouse shards. The structure is documented below.
sql_database_management bool
Grants admin user database management permission.
sql_user_management bool
Enables admin user with user management permission.
status str
Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
users Sequence[MdbClickhouseClusterUserArgs]
A user of the ClickHouse cluster. The structure is documented below.
version str
Version of the ClickHouse server software.
zookeeper MdbClickhouseClusterZookeeperArgs
Configuration of the ZooKeeper subcluster. The structure is documented below.
access Property Map
Access policy to the ClickHouse cluster. The structure is documented below.
adminPassword String
A password used to authorize as user admin when sql_user_management enabled.
backupWindowStart Property Map
Time to start the daily backup, in the UTC timezone. The structure is documented below.
clickhouse Property Map
Configuration of the ClickHouse subcluster. The structure is documented below.
cloudStorage Property Map
copySchemaOnNewHosts Boolean
Whether to copy schema on new ClickHouse hosts.
createdAt String
Timestamp of cluster creation.
databases List<Property Map>
A database of the ClickHouse cluster. The structure is documented below.
deletionProtection Boolean
Inhibits deletion of the cluster. Can be either true or false.
description String
Description of the shard group.
environment String
Deployment environment of the ClickHouse cluster. Can be either PRESTABLE or PRODUCTION.
folderId String
The ID of the folder that the resource belongs to. If it is not provided, the default provider folder is used.
formatSchemas List<Property Map>
A set of protobuf or capnproto format schemas. The structure is documented below.
health String
Aggregated health of the cluster. Can be ALIVE, DEGRADED, DEAD or HEALTH_UNKNOWN. For more information see health field of JSON representation in the official documentation.
hosts List<Property Map>
A host of the ClickHouse cluster. The structure is documented below.
labels Map<String>
A set of key/value label pairs to assign to the ClickHouse cluster.
maintenanceWindow Property Map
mlModels List<Property Map>
A group of machine learning models. The structure is documented below
name String
Graphite rollup configuration name.
networkId String
ID of the network, to which the ClickHouse cluster belongs.
securityGroupIds List<String>
A set of ids of security groups assigned to hosts of the cluster.
serviceAccountId String
ID of the service account used for access to Yandex Object Storage.
shardGroups List<Property Map>
A group of clickhouse shards. The structure is documented below.
sqlDatabaseManagement Boolean
Grants admin user database management permission.
sqlUserManagement Boolean
Enables admin user with user management permission.
status String
Status of the cluster. Can be CREATING, STARTING, RUNNING, UPDATING, STOPPING, STOPPED, ERROR or STATUS_UNKNOWN. For more information see status field of JSON representation in the official documentation.
users List<Property Map>
A user of the ClickHouse cluster. The structure is documented below.
version String
Version of the ClickHouse server software.
zookeeper Property Map
Configuration of the ZooKeeper subcluster. The structure is documented below.

Supporting Types

MdbClickhouseClusterAccess
, MdbClickhouseClusterAccessArgs

DataLens bool
Allow access for DataLens. Can be either true or false.
Metrika bool
Allow access for Yandex.Metrika. Can be either true or false.
Serverless bool
Allow access for Serverless. Can be either true or false.
WebSql bool
Allow access for Web SQL. Can be either true or false.
DataLens bool
Allow access for DataLens. Can be either true or false.
Metrika bool
Allow access for Yandex.Metrika. Can be either true or false.
Serverless bool
Allow access for Serverless. Can be either true or false.
WebSql bool
Allow access for Web SQL. Can be either true or false.
dataLens Boolean
Allow access for DataLens. Can be either true or false.
metrika Boolean
Allow access for Yandex.Metrika. Can be either true or false.
serverless Boolean
Allow access for Serverless. Can be either true or false.
webSql Boolean
Allow access for Web SQL. Can be either true or false.
dataLens boolean
Allow access for DataLens. Can be either true or false.
metrika boolean
Allow access for Yandex.Metrika. Can be either true or false.
serverless boolean
Allow access for Serverless. Can be either true or false.
webSql boolean
Allow access for Web SQL. Can be either true or false.
data_lens bool
Allow access for DataLens. Can be either true or false.
metrika bool
Allow access for Yandex.Metrika. Can be either true or false.
serverless bool
Allow access for Serverless. Can be either true or false.
web_sql bool
Allow access for Web SQL. Can be either true or false.
dataLens Boolean
Allow access for DataLens. Can be either true or false.
metrika Boolean
Allow access for Yandex.Metrika. Can be either true or false.
serverless Boolean
Allow access for Serverless. Can be either true or false.
webSql Boolean
Allow access for Web SQL. Can be either true or false.

MdbClickhouseClusterBackupWindowStart
, MdbClickhouseClusterBackupWindowStartArgs

Hours int
The hour at which backup will be started.
Minutes int
The minute at which backup will be started.
Hours int
The hour at which backup will be started.
Minutes int
The minute at which backup will be started.
hours Integer
The hour at which backup will be started.
minutes Integer
The minute at which backup will be started.
hours number
The hour at which backup will be started.
minutes number
The minute at which backup will be started.
hours int
The hour at which backup will be started.
minutes int
The minute at which backup will be started.
hours Number
The hour at which backup will be started.
minutes Number
The minute at which backup will be started.

MdbClickhouseClusterClickhouse
, MdbClickhouseClusterClickhouseArgs

Resources This property is required. MdbClickhouseClusterClickhouseResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
Config MdbClickhouseClusterClickhouseConfig
Main ClickHouse cluster configuration.
Resources This property is required. MdbClickhouseClusterClickhouseResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
Config MdbClickhouseClusterClickhouseConfig
Main ClickHouse cluster configuration.
resources This property is required. MdbClickhouseClusterClickhouseResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
config MdbClickhouseClusterClickhouseConfig
Main ClickHouse cluster configuration.
resources This property is required. MdbClickhouseClusterClickhouseResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
config MdbClickhouseClusterClickhouseConfig
Main ClickHouse cluster configuration.
resources This property is required. MdbClickhouseClusterClickhouseResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
config MdbClickhouseClusterClickhouseConfig
Main ClickHouse cluster configuration.
resources This property is required. Property Map
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
config Property Map
Main ClickHouse cluster configuration.

MdbClickhouseClusterClickhouseConfig
, MdbClickhouseClusterClickhouseConfigArgs

BackgroundPoolSize int
BackgroundSchedulePoolSize int
Compressions List<MdbClickhouseClusterClickhouseConfigCompression>
Data compression configuration. The structure is documented below.
GeobaseUri string
GraphiteRollups List<MdbClickhouseClusterClickhouseConfigGraphiteRollup>
Graphite rollup configuration. The structure is documented below.
Kafka MdbClickhouseClusterClickhouseConfigKafka
Kafka connection configuration. The structure is documented below.
KafkaTopics List<MdbClickhouseClusterClickhouseConfigKafkaTopic>
Kafka topic connection configuration. The structure is documented below.
KeepAliveTimeout int
LogLevel string
MarkCacheSize int
MaxConcurrentQueries int
MaxConnections int
MaxPartitionSizeToDrop int
MaxTableSizeToDrop int
MergeTree MdbClickhouseClusterClickhouseConfigMergeTree
MergeTree engine configuration. The structure is documented below.
MetricLogEnabled bool
MetricLogRetentionSize int
MetricLogRetentionTime int
PartLogRetentionSize int
PartLogRetentionTime int
QueryLogRetentionSize int
QueryLogRetentionTime int
QueryThreadLogEnabled bool
QueryThreadLogRetentionSize int
QueryThreadLogRetentionTime int
Rabbitmq MdbClickhouseClusterClickhouseConfigRabbitmq
RabbitMQ connection configuration. The structure is documented below.
TextLogEnabled bool
TextLogLevel string
TextLogRetentionSize int
TextLogRetentionTime int
Timezone string
TraceLogEnabled bool
TraceLogRetentionSize int
TraceLogRetentionTime int
UncompressedCacheSize int
BackgroundPoolSize int
BackgroundSchedulePoolSize int
Compressions []MdbClickhouseClusterClickhouseConfigCompression
Data compression configuration. The structure is documented below.
GeobaseUri string
GraphiteRollups []MdbClickhouseClusterClickhouseConfigGraphiteRollup
Graphite rollup configuration. The structure is documented below.
Kafka MdbClickhouseClusterClickhouseConfigKafka
Kafka connection configuration. The structure is documented below.
KafkaTopics []MdbClickhouseClusterClickhouseConfigKafkaTopic
Kafka topic connection configuration. The structure is documented below.
KeepAliveTimeout int
LogLevel string
MarkCacheSize int
MaxConcurrentQueries int
MaxConnections int
MaxPartitionSizeToDrop int
MaxTableSizeToDrop int
MergeTree MdbClickhouseClusterClickhouseConfigMergeTree
MergeTree engine configuration. The structure is documented below.
MetricLogEnabled bool
MetricLogRetentionSize int
MetricLogRetentionTime int
PartLogRetentionSize int
PartLogRetentionTime int
QueryLogRetentionSize int
QueryLogRetentionTime int
QueryThreadLogEnabled bool
QueryThreadLogRetentionSize int
QueryThreadLogRetentionTime int
Rabbitmq MdbClickhouseClusterClickhouseConfigRabbitmq
RabbitMQ connection configuration. The structure is documented below.
TextLogEnabled bool
TextLogLevel string
TextLogRetentionSize int
TextLogRetentionTime int
Timezone string
TraceLogEnabled bool
TraceLogRetentionSize int
TraceLogRetentionTime int
UncompressedCacheSize int
backgroundPoolSize Integer
backgroundSchedulePoolSize Integer
compressions List<MdbClickhouseClusterClickhouseConfigCompression>
Data compression configuration. The structure is documented below.
geobaseUri String
graphiteRollups List<MdbClickhouseClusterClickhouseConfigGraphiteRollup>
Graphite rollup configuration. The structure is documented below.
kafka MdbClickhouseClusterClickhouseConfigKafka
Kafka connection configuration. The structure is documented below.
kafkaTopics List<MdbClickhouseClusterClickhouseConfigKafkaTopic>
Kafka topic connection configuration. The structure is documented below.
keepAliveTimeout Integer
logLevel String
markCacheSize Integer
maxConcurrentQueries Integer
maxConnections Integer
maxPartitionSizeToDrop Integer
maxTableSizeToDrop Integer
mergeTree MdbClickhouseClusterClickhouseConfigMergeTree
MergeTree engine configuration. The structure is documented below.
metricLogEnabled Boolean
metricLogRetentionSize Integer
metricLogRetentionTime Integer
partLogRetentionSize Integer
partLogRetentionTime Integer
queryLogRetentionSize Integer
queryLogRetentionTime Integer
queryThreadLogEnabled Boolean
queryThreadLogRetentionSize Integer
queryThreadLogRetentionTime Integer
rabbitmq MdbClickhouseClusterClickhouseConfigRabbitmq
RabbitMQ connection configuration. The structure is documented below.
textLogEnabled Boolean
textLogLevel String
textLogRetentionSize Integer
textLogRetentionTime Integer
timezone String
traceLogEnabled Boolean
traceLogRetentionSize Integer
traceLogRetentionTime Integer
uncompressedCacheSize Integer
backgroundPoolSize number
backgroundSchedulePoolSize number
compressions MdbClickhouseClusterClickhouseConfigCompression[]
Data compression configuration. The structure is documented below.
geobaseUri string
graphiteRollups MdbClickhouseClusterClickhouseConfigGraphiteRollup[]
Graphite rollup configuration. The structure is documented below.
kafka MdbClickhouseClusterClickhouseConfigKafka
Kafka connection configuration. The structure is documented below.
kafkaTopics MdbClickhouseClusterClickhouseConfigKafkaTopic[]
Kafka topic connection configuration. The structure is documented below.
keepAliveTimeout number
logLevel string
markCacheSize number
maxConcurrentQueries number
maxConnections number
maxPartitionSizeToDrop number
maxTableSizeToDrop number
mergeTree MdbClickhouseClusterClickhouseConfigMergeTree
MergeTree engine configuration. The structure is documented below.
metricLogEnabled boolean
metricLogRetentionSize number
metricLogRetentionTime number
partLogRetentionSize number
partLogRetentionTime number
queryLogRetentionSize number
queryLogRetentionTime number
queryThreadLogEnabled boolean
queryThreadLogRetentionSize number
queryThreadLogRetentionTime number
rabbitmq MdbClickhouseClusterClickhouseConfigRabbitmq
RabbitMQ connection configuration. The structure is documented below.
textLogEnabled boolean
textLogLevel string
textLogRetentionSize number
textLogRetentionTime number
timezone string
traceLogEnabled boolean
traceLogRetentionSize number
traceLogRetentionTime number
uncompressedCacheSize number
background_pool_size int
background_schedule_pool_size int
compressions Sequence[MdbClickhouseClusterClickhouseConfigCompression]
Data compression configuration. The structure is documented below.
geobase_uri str
graphite_rollups Sequence[MdbClickhouseClusterClickhouseConfigGraphiteRollup]
Graphite rollup configuration. The structure is documented below.
kafka MdbClickhouseClusterClickhouseConfigKafka
Kafka connection configuration. The structure is documented below.
kafka_topics Sequence[MdbClickhouseClusterClickhouseConfigKafkaTopic]
Kafka topic connection configuration. The structure is documented below.
keep_alive_timeout int
log_level str
mark_cache_size int
max_concurrent_queries int
max_connections int
max_partition_size_to_drop int
max_table_size_to_drop int
merge_tree MdbClickhouseClusterClickhouseConfigMergeTree
MergeTree engine configuration. The structure is documented below.
metric_log_enabled bool
metric_log_retention_size int
metric_log_retention_time int
part_log_retention_size int
part_log_retention_time int
query_log_retention_size int
query_log_retention_time int
query_thread_log_enabled bool
query_thread_log_retention_size int
query_thread_log_retention_time int
rabbitmq MdbClickhouseClusterClickhouseConfigRabbitmq
RabbitMQ connection configuration. The structure is documented below.
text_log_enabled bool
text_log_level str
text_log_retention_size int
text_log_retention_time int
timezone str
trace_log_enabled bool
trace_log_retention_size int
trace_log_retention_time int
uncompressed_cache_size int
backgroundPoolSize Number
backgroundSchedulePoolSize Number
compressions List<Property Map>
Data compression configuration. The structure is documented below.
geobaseUri String
graphiteRollups List<Property Map>
Graphite rollup configuration. The structure is documented below.
kafka Property Map
Kafka connection configuration. The structure is documented below.
kafkaTopics List<Property Map>
Kafka topic connection configuration. The structure is documented below.
keepAliveTimeout Number
logLevel String
markCacheSize Number
maxConcurrentQueries Number
maxConnections Number
maxPartitionSizeToDrop Number
maxTableSizeToDrop Number
mergeTree Property Map
MergeTree engine configuration. The structure is documented below.
metricLogEnabled Boolean
metricLogRetentionSize Number
metricLogRetentionTime Number
partLogRetentionSize Number
partLogRetentionTime Number
queryLogRetentionSize Number
queryLogRetentionTime Number
queryThreadLogEnabled Boolean
queryThreadLogRetentionSize Number
queryThreadLogRetentionTime Number
rabbitmq Property Map
RabbitMQ connection configuration. The structure is documented below.
textLogEnabled Boolean
textLogLevel String
textLogRetentionSize Number
textLogRetentionTime Number
timezone String
traceLogEnabled Boolean
traceLogRetentionSize Number
traceLogRetentionTime Number
uncompressedCacheSize Number

MdbClickhouseClusterClickhouseConfigCompression
, MdbClickhouseClusterClickhouseConfigCompressionArgs

Method This property is required. string
Method: Compression method. Two methods are available: LZ4 and zstd.
MinPartSize This property is required. int
Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
MinPartSizeRatio This property is required. double
Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
Method This property is required. string
Method: Compression method. Two methods are available: LZ4 and zstd.
MinPartSize This property is required. int
Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
MinPartSizeRatio This property is required. float64
Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
method This property is required. String
Method: Compression method. Two methods are available: LZ4 and zstd.
minPartSize This property is required. Integer
Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
minPartSizeRatio This property is required. Double
Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
method This property is required. string
Method: Compression method. Two methods are available: LZ4 and zstd.
minPartSize This property is required. number
Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
minPartSizeRatio This property is required. number
Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
method This property is required. str
Method: Compression method. Two methods are available: LZ4 and zstd.
min_part_size This property is required. int
Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
min_part_size_ratio This property is required. float
Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.
method This property is required. String
Method: Compression method. Two methods are available: LZ4 and zstd.
minPartSize This property is required. Number
Min part size: Minimum size (in bytes) of a data part in a table. ClickHouse only applies the rule to tables with data parts greater than or equal to the Min part size value.
minPartSizeRatio This property is required. Number
Min part size ratio: Minimum table part size to total table size ratio. ClickHouse only applies the rule to tables in which this ratio is greater than or equal to the Min part size ratio value.

MdbClickhouseClusterClickhouseConfigGraphiteRollup
, MdbClickhouseClusterClickhouseConfigGraphiteRollupArgs

Name This property is required. string
Graphite rollup configuration name.
Patterns List<MdbClickhouseClusterClickhouseConfigGraphiteRollupPattern>
Set of thinning rules.
Name This property is required. string
Graphite rollup configuration name.
Patterns []MdbClickhouseClusterClickhouseConfigGraphiteRollupPattern
Set of thinning rules.
name This property is required. String
Graphite rollup configuration name.
patterns List<MdbClickhouseClusterClickhouseConfigGraphiteRollupPattern>
Set of thinning rules.
name This property is required. string
Graphite rollup configuration name.
patterns MdbClickhouseClusterClickhouseConfigGraphiteRollupPattern[]
Set of thinning rules.
name This property is required. str
Graphite rollup configuration name.
patterns Sequence[MdbClickhouseClusterClickhouseConfigGraphiteRollupPattern]
Set of thinning rules.
name This property is required. String
Graphite rollup configuration name.
patterns List<Property Map>
Set of thinning rules.

MdbClickhouseClusterClickhouseConfigGraphiteRollupPattern
, MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternArgs

Function This property is required. string
Aggregation function name.
Regexp string
Regular expression that the metric name must match.
Retentions List<MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention>
Retain parameters.
Function This property is required. string
Aggregation function name.
Regexp string
Regular expression that the metric name must match.
Retentions []MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention
Retain parameters.
function This property is required. String
Aggregation function name.
regexp String
Regular expression that the metric name must match.
retentions List<MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention>
Retain parameters.
function This property is required. string
Aggregation function name.
regexp string
Regular expression that the metric name must match.
retentions MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention[]
Retain parameters.
function This property is required. str
Aggregation function name.
regexp str
Regular expression that the metric name must match.
retentions Sequence[MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention]
Retain parameters.
function This property is required. String
Aggregation function name.
regexp String
Regular expression that the metric name must match.
retentions List<Property Map>
Retain parameters.

MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetention
, MdbClickhouseClusterClickhouseConfigGraphiteRollupPatternRetentionArgs

Age This property is required. int
Minimum data age in seconds.
Precision This property is required. int
Accuracy of determining the age of the data in seconds.
Age This property is required. int
Minimum data age in seconds.
Precision This property is required. int
Accuracy of determining the age of the data in seconds.
age This property is required. Integer
Minimum data age in seconds.
precision This property is required. Integer
Accuracy of determining the age of the data in seconds.
age This property is required. number
Minimum data age in seconds.
precision This property is required. number
Accuracy of determining the age of the data in seconds.
age This property is required. int
Minimum data age in seconds.
precision This property is required. int
Accuracy of determining the age of the data in seconds.
age This property is required. Number
Minimum data age in seconds.
precision This property is required. Number
Accuracy of determining the age of the data in seconds.

MdbClickhouseClusterClickhouseConfigKafka
, MdbClickhouseClusterClickhouseConfigKafkaArgs

SaslMechanism string
SASL mechanism used in kafka authentication.
SaslPassword string
User password on kafka server.
SaslUsername string
Username on kafka server.
SecurityProtocol string
Security protocol used to connect to kafka server.
SaslMechanism string
SASL mechanism used in kafka authentication.
SaslPassword string
User password on kafka server.
SaslUsername string
Username on kafka server.
SecurityProtocol string
Security protocol used to connect to kafka server.
saslMechanism String
SASL mechanism used in kafka authentication.
saslPassword String
User password on kafka server.
saslUsername String
Username on kafka server.
securityProtocol String
Security protocol used to connect to kafka server.
saslMechanism string
SASL mechanism used in kafka authentication.
saslPassword string
User password on kafka server.
saslUsername string
Username on kafka server.
securityProtocol string
Security protocol used to connect to kafka server.
sasl_mechanism str
SASL mechanism used in kafka authentication.
sasl_password str
User password on kafka server.
sasl_username str
Username on kafka server.
security_protocol str
Security protocol used to connect to kafka server.
saslMechanism String
SASL mechanism used in kafka authentication.
saslPassword String
User password on kafka server.
saslUsername String
Username on kafka server.
securityProtocol String
Security protocol used to connect to kafka server.

MdbClickhouseClusterClickhouseConfigKafkaTopic
, MdbClickhouseClusterClickhouseConfigKafkaTopicArgs

Name This property is required. string
Graphite rollup configuration name.
Settings MdbClickhouseClusterClickhouseConfigKafkaTopicSettings
Kafka connection settngs sanem as kafka block.
Name This property is required. string
Graphite rollup configuration name.
Settings MdbClickhouseClusterClickhouseConfigKafkaTopicSettings
Kafka connection settngs sanem as kafka block.
name This property is required. String
Graphite rollup configuration name.
settings MdbClickhouseClusterClickhouseConfigKafkaTopicSettings
Kafka connection settngs sanem as kafka block.
name This property is required. string
Graphite rollup configuration name.
settings MdbClickhouseClusterClickhouseConfigKafkaTopicSettings
Kafka connection settngs sanem as kafka block.
name This property is required. str
Graphite rollup configuration name.
settings MdbClickhouseClusterClickhouseConfigKafkaTopicSettings
Kafka connection settngs sanem as kafka block.
name This property is required. String
Graphite rollup configuration name.
settings Property Map
Kafka connection settngs sanem as kafka block.

MdbClickhouseClusterClickhouseConfigKafkaTopicSettings
, MdbClickhouseClusterClickhouseConfigKafkaTopicSettingsArgs

SaslMechanism string
SASL mechanism used in kafka authentication.
SaslPassword string
User password on kafka server.
SaslUsername string
Username on kafka server.
SecurityProtocol string
Security protocol used to connect to kafka server.
SaslMechanism string
SASL mechanism used in kafka authentication.
SaslPassword string
User password on kafka server.
SaslUsername string
Username on kafka server.
SecurityProtocol string
Security protocol used to connect to kafka server.
saslMechanism String
SASL mechanism used in kafka authentication.
saslPassword String
User password on kafka server.
saslUsername String
Username on kafka server.
securityProtocol String
Security protocol used to connect to kafka server.
saslMechanism string
SASL mechanism used in kafka authentication.
saslPassword string
User password on kafka server.
saslUsername string
Username on kafka server.
securityProtocol string
Security protocol used to connect to kafka server.
sasl_mechanism str
SASL mechanism used in kafka authentication.
sasl_password str
User password on kafka server.
sasl_username str
Username on kafka server.
security_protocol str
Security protocol used to connect to kafka server.
saslMechanism String
SASL mechanism used in kafka authentication.
saslPassword String
User password on kafka server.
saslUsername String
Username on kafka server.
securityProtocol String
Security protocol used to connect to kafka server.

MdbClickhouseClusterClickhouseConfigMergeTree
, MdbClickhouseClusterClickhouseConfigMergeTreeArgs

MaxBytesToMergeAtMinSpaceInPool int
Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
MaxReplicatedMergesInQueue int
Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge int
Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
PartsToDelayInsert int
Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
PartsToThrowInsert int
Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
ReplicatedDeduplicationWindow int
Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
ReplicatedDeduplicationWindowSeconds int
Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
MaxBytesToMergeAtMinSpaceInPool int
Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
MaxReplicatedMergesInQueue int
Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
NumberOfFreeEntriesInPoolToLowerMaxSizeOfMerge int
Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
PartsToDelayInsert int
Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
PartsToThrowInsert int
Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
ReplicatedDeduplicationWindow int
Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
ReplicatedDeduplicationWindowSeconds int
Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
maxBytesToMergeAtMinSpaceInPool Integer
Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
maxReplicatedMergesInQueue Integer
Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge Integer
Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
partsToDelayInsert Integer
Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
partsToThrowInsert Integer
Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
replicatedDeduplicationWindow Integer
Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
replicatedDeduplicationWindowSeconds Integer
Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
maxBytesToMergeAtMinSpaceInPool number
Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
maxReplicatedMergesInQueue number
Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge number
Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
partsToDelayInsert number
Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
partsToThrowInsert number
Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
replicatedDeduplicationWindow number
Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
replicatedDeduplicationWindowSeconds number
Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
max_bytes_to_merge_at_min_space_in_pool int
Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
max_replicated_merges_in_queue int
Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
number_of_free_entries_in_pool_to_lower_max_size_of_merge int
Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
parts_to_delay_insert int
Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
parts_to_throw_insert int
Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
replicated_deduplication_window int
Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
replicated_deduplication_window_seconds int
Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).
maxBytesToMergeAtMinSpaceInPool Number
Max bytes to merge at min space in pool: Maximum total size of a data part to merge when the number of free threads in the background pool is minimum.
maxReplicatedMergesInQueue Number
Max replicated merges in queue: Maximum number of merge tasks that can be in the ReplicatedMergeTree queue at the same time.
numberOfFreeEntriesInPoolToLowerMaxSizeOfMerge Number
Number of free entries in pool to lower max size of merge: Threshold value of free entries in the pool. If the number of entries in the pool falls below this value, ClickHouse reduces the maximum size of a data part to merge. This helps handle small merges faster, rather than filling the pool with lengthy merges.
partsToDelayInsert Number
Parts to delay insert: Number of active data parts in a table, on exceeding which ClickHouse starts artificially reduce the rate of inserting data into the table.
partsToThrowInsert Number
Parts to throw insert: Threshold value of active data parts in a table, on exceeding which ClickHouse throws the 'Too many parts ...' exception.
replicatedDeduplicationWindow Number
Replicated deduplication window: Number of recent hash blocks that ZooKeeper will store (the old ones will be deleted).
replicatedDeduplicationWindowSeconds Number
Replicated deduplication window seconds: Time during which ZooKeeper stores the hash blocks (the old ones wil be deleted).

MdbClickhouseClusterClickhouseConfigRabbitmq
, MdbClickhouseClusterClickhouseConfigRabbitmqArgs

Password string
RabbitMQ user password.
Username string
RabbitMQ username.
Password string
RabbitMQ user password.
Username string
RabbitMQ username.
password String
RabbitMQ user password.
username String
RabbitMQ username.
password string
RabbitMQ user password.
username string
RabbitMQ username.
password str
RabbitMQ user password.
username str
RabbitMQ username.
password String
RabbitMQ user password.
username String
RabbitMQ username.

MdbClickhouseClusterClickhouseResources
, MdbClickhouseClusterClickhouseResourcesArgs

DiskSize This property is required. int
Volume of the storage available to a ZooKeeper host, in gigabytes.
DiskTypeId This property is required. string
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
ResourcePresetId This property is required. string
DiskSize This property is required. int
Volume of the storage available to a ZooKeeper host, in gigabytes.
DiskTypeId This property is required. string
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
ResourcePresetId This property is required. string
diskSize This property is required. Integer
Volume of the storage available to a ZooKeeper host, in gigabytes.
diskTypeId This property is required. String
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resourcePresetId This property is required. String
diskSize This property is required. number
Volume of the storage available to a ZooKeeper host, in gigabytes.
diskTypeId This property is required. string
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resourcePresetId This property is required. string
disk_size This property is required. int
Volume of the storage available to a ZooKeeper host, in gigabytes.
disk_type_id This property is required. str
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resource_preset_id This property is required. str
diskSize This property is required. Number
Volume of the storage available to a ZooKeeper host, in gigabytes.
diskTypeId This property is required. String
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resourcePresetId This property is required. String

MdbClickhouseClusterCloudStorage
, MdbClickhouseClusterCloudStorageArgs

Enabled This property is required. bool
Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false.
Enabled This property is required. bool
Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false.
enabled This property is required. Boolean
Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false.
enabled This property is required. boolean
Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false.
enabled This property is required. bool
Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false.
enabled This property is required. Boolean
Whether to use Yandex Object Storage for storing ClickHouse data. Can be either true or false.

MdbClickhouseClusterDatabase
, MdbClickhouseClusterDatabaseArgs

Name This property is required. string
Graphite rollup configuration name.
Name This property is required. string
Graphite rollup configuration name.
name This property is required. String
Graphite rollup configuration name.
name This property is required. string
Graphite rollup configuration name.
name This property is required. str
Graphite rollup configuration name.
name This property is required. String
Graphite rollup configuration name.

MdbClickhouseClusterFormatSchema
, MdbClickhouseClusterFormatSchemaArgs

Name This property is required. string
Graphite rollup configuration name.
Type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
Uri This property is required. string
Model file URL. You can only use models stored in Yandex Object Storage.
Name This property is required. string
Graphite rollup configuration name.
Type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
Uri This property is required. string
Model file URL. You can only use models stored in Yandex Object Storage.
name This property is required. String
Graphite rollup configuration name.
type This property is required. String
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
uri This property is required. String
Model file URL. You can only use models stored in Yandex Object Storage.
name This property is required. string
Graphite rollup configuration name.
type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
uri This property is required. string
Model file URL. You can only use models stored in Yandex Object Storage.
name This property is required. str
Graphite rollup configuration name.
type This property is required. str
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
uri This property is required. str
Model file URL. You can only use models stored in Yandex Object Storage.
name This property is required. String
Graphite rollup configuration name.
type This property is required. String
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
uri This property is required. String
Model file URL. You can only use models stored in Yandex Object Storage.

MdbClickhouseClusterHost
, MdbClickhouseClusterHostArgs

Type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
Zone This property is required. string
The availability zone where the ClickHouse host will be created. For more information see the official documentation.
AssignPublicIp bool
Sets whether the host should get a public IP address on creation. Can be either true or false.
Fqdn string
The fully qualified domain name of the host.
ShardName string
The name of the shard to which the host belongs.
SubnetId string
The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
Type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
Zone This property is required. string
The availability zone where the ClickHouse host will be created. For more information see the official documentation.
AssignPublicIp bool
Sets whether the host should get a public IP address on creation. Can be either true or false.
Fqdn string
The fully qualified domain name of the host.
ShardName string
The name of the shard to which the host belongs.
SubnetId string
The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
type This property is required. String
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
zone This property is required. String
The availability zone where the ClickHouse host will be created. For more information see the official documentation.
assignPublicIp Boolean
Sets whether the host should get a public IP address on creation. Can be either true or false.
fqdn String
The fully qualified domain name of the host.
shardName String
The name of the shard to which the host belongs.
subnetId String
The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
zone This property is required. string
The availability zone where the ClickHouse host will be created. For more information see the official documentation.
assignPublicIp boolean
Sets whether the host should get a public IP address on creation. Can be either true or false.
fqdn string
The fully qualified domain name of the host.
shardName string
The name of the shard to which the host belongs.
subnetId string
The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
type This property is required. str
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
zone This property is required. str
The availability zone where the ClickHouse host will be created. For more information see the official documentation.
assign_public_ip bool
Sets whether the host should get a public IP address on creation. Can be either true or false.
fqdn str
The fully qualified domain name of the host.
shard_name str
The name of the shard to which the host belongs.
subnet_id str
The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.
type This property is required. String
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
zone This property is required. String
The availability zone where the ClickHouse host will be created. For more information see the official documentation.
assignPublicIp Boolean
Sets whether the host should get a public IP address on creation. Can be either true or false.
fqdn String
The fully qualified domain name of the host.
shardName String
The name of the shard to which the host belongs.
subnetId String
The ID of the subnet, to which the host belongs. The subnet must be a part of the network to which the cluster belongs.

MdbClickhouseClusterMaintenanceWindow
, MdbClickhouseClusterMaintenanceWindowArgs

Type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
Day string
Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN.
Hour int
Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
Type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
Day string
Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN.
Hour int
Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
type This property is required. String
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
day String
Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN.
hour Integer
Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
day string
Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN.
hour number
Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
type This property is required. str
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
day str
Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN.
hour int
Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.
type This property is required. String
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
day String
Day of week for maintenance window if window type is weekly. Possible values: MON, TUE, WED, THU, FRI, SAT, SUN.
hour Number
Hour of day in UTC time zone (1-24) for maintenance window if window type is weekly.

MdbClickhouseClusterMlModel
, MdbClickhouseClusterMlModelArgs

Name This property is required. string
Graphite rollup configuration name.
Type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
Uri This property is required. string
Model file URL. You can only use models stored in Yandex Object Storage.
Name This property is required. string
Graphite rollup configuration name.
Type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
Uri This property is required. string
Model file URL. You can only use models stored in Yandex Object Storage.
name This property is required. String
Graphite rollup configuration name.
type This property is required. String
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
uri This property is required. String
Model file URL. You can only use models stored in Yandex Object Storage.
name This property is required. string
Graphite rollup configuration name.
type This property is required. string
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
uri This property is required. string
Model file URL. You can only use models stored in Yandex Object Storage.
name This property is required. str
Graphite rollup configuration name.
type This property is required. str
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
uri This property is required. str
Model file URL. You can only use models stored in Yandex Object Storage.
name This property is required. String
Graphite rollup configuration name.
type This property is required. String
Type of maintenance window. Can be either ANYTIME or WEEKLY. A day and hour of window need to be specified with weekly window.
uri This property is required. String
Model file URL. You can only use models stored in Yandex Object Storage.

MdbClickhouseClusterShardGroup
, MdbClickhouseClusterShardGroupArgs

Name This property is required. string
Graphite rollup configuration name.
ShardNames This property is required. List<string>
List of shards names that belong to the shard group.
Description string
Description of the shard group.
Name This property is required. string
Graphite rollup configuration name.
ShardNames This property is required. []string
List of shards names that belong to the shard group.
Description string
Description of the shard group.
name This property is required. String
Graphite rollup configuration name.
shardNames This property is required. List<String>
List of shards names that belong to the shard group.
description String
Description of the shard group.
name This property is required. string
Graphite rollup configuration name.
shardNames This property is required. string[]
List of shards names that belong to the shard group.
description string
Description of the shard group.
name This property is required. str
Graphite rollup configuration name.
shard_names This property is required. Sequence[str]
List of shards names that belong to the shard group.
description str
Description of the shard group.
name This property is required. String
Graphite rollup configuration name.
shardNames This property is required. List<String>
List of shards names that belong to the shard group.
description String
Description of the shard group.

MdbClickhouseClusterUser
, MdbClickhouseClusterUserArgs

Name This property is required. string
Graphite rollup configuration name.
Password This property is required. string
RabbitMQ user password.
Permissions List<MdbClickhouseClusterUserPermission>
Set of permissions granted to the user. The structure is documented below.
Quotas List<MdbClickhouseClusterUserQuota>
Set of user quotas. The structure is documented below.
Settings MdbClickhouseClusterUserSettings
Kafka connection settngs sanem as kafka block.
Name This property is required. string
Graphite rollup configuration name.
Password This property is required. string
RabbitMQ user password.
Permissions []MdbClickhouseClusterUserPermission
Set of permissions granted to the user. The structure is documented below.
Quotas []MdbClickhouseClusterUserQuota
Set of user quotas. The structure is documented below.
Settings MdbClickhouseClusterUserSettings
Kafka connection settngs sanem as kafka block.
name This property is required. String
Graphite rollup configuration name.
password This property is required. String
RabbitMQ user password.
permissions List<MdbClickhouseClusterUserPermission>
Set of permissions granted to the user. The structure is documented below.
quotas List<MdbClickhouseClusterUserQuota>
Set of user quotas. The structure is documented below.
settings MdbClickhouseClusterUserSettings
Kafka connection settngs sanem as kafka block.
name This property is required. string
Graphite rollup configuration name.
password This property is required. string
RabbitMQ user password.
permissions MdbClickhouseClusterUserPermission[]
Set of permissions granted to the user. The structure is documented below.
quotas MdbClickhouseClusterUserQuota[]
Set of user quotas. The structure is documented below.
settings MdbClickhouseClusterUserSettings
Kafka connection settngs sanem as kafka block.
name This property is required. str
Graphite rollup configuration name.
password This property is required. str
RabbitMQ user password.
permissions Sequence[MdbClickhouseClusterUserPermission]
Set of permissions granted to the user. The structure is documented below.
quotas Sequence[MdbClickhouseClusterUserQuota]
Set of user quotas. The structure is documented below.
settings MdbClickhouseClusterUserSettings
Kafka connection settngs sanem as kafka block.
name This property is required. String
Graphite rollup configuration name.
password This property is required. String
RabbitMQ user password.
permissions List<Property Map>
Set of permissions granted to the user. The structure is documented below.
quotas List<Property Map>
Set of user quotas. The structure is documented below.
settings Property Map
Kafka connection settngs sanem as kafka block.

MdbClickhouseClusterUserPermission
, MdbClickhouseClusterUserPermissionArgs

DatabaseName This property is required. string
The name of the database that the permission grants access to.
DatabaseName This property is required. string
The name of the database that the permission grants access to.
databaseName This property is required. String
The name of the database that the permission grants access to.
databaseName This property is required. string
The name of the database that the permission grants access to.
database_name This property is required. str
The name of the database that the permission grants access to.
databaseName This property is required. String
The name of the database that the permission grants access to.

MdbClickhouseClusterUserQuota
, MdbClickhouseClusterUserQuotaArgs

IntervalDuration This property is required. int
Duration of interval for quota in milliseconds.
Errors int
The number of queries that threw exception.
ExecutionTime int
The total query execution time, in milliseconds (wall time).
Queries int
The total number of queries.
ReadRows int
The total number of source rows read from tables for running the query, on all remote servers.
ResultRows int
The total number of rows given as the result.
IntervalDuration This property is required. int
Duration of interval for quota in milliseconds.
Errors int
The number of queries that threw exception.
ExecutionTime int
The total query execution time, in milliseconds (wall time).
Queries int
The total number of queries.
ReadRows int
The total number of source rows read from tables for running the query, on all remote servers.
ResultRows int
The total number of rows given as the result.
intervalDuration This property is required. Integer
Duration of interval for quota in milliseconds.
errors Integer
The number of queries that threw exception.
executionTime Integer
The total query execution time, in milliseconds (wall time).
queries Integer
The total number of queries.
readRows Integer
The total number of source rows read from tables for running the query, on all remote servers.
resultRows Integer
The total number of rows given as the result.
intervalDuration This property is required. number
Duration of interval for quota in milliseconds.
errors number
The number of queries that threw exception.
executionTime number
The total query execution time, in milliseconds (wall time).
queries number
The total number of queries.
readRows number
The total number of source rows read from tables for running the query, on all remote servers.
resultRows number
The total number of rows given as the result.
interval_duration This property is required. int
Duration of interval for quota in milliseconds.
errors int
The number of queries that threw exception.
execution_time int
The total query execution time, in milliseconds (wall time).
queries int
The total number of queries.
read_rows int
The total number of source rows read from tables for running the query, on all remote servers.
result_rows int
The total number of rows given as the result.
intervalDuration This property is required. Number
Duration of interval for quota in milliseconds.
errors Number
The number of queries that threw exception.
executionTime Number
The total query execution time, in milliseconds (wall time).
queries Number
The total number of queries.
readRows Number
The total number of source rows read from tables for running the query, on all remote servers.
resultRows Number
The total number of rows given as the result.

MdbClickhouseClusterUserSettings
, MdbClickhouseClusterUserSettingsArgs

AddHttpCorsHeader bool
Include CORS headers in HTTP responces.
AllowDdl bool
Allows or denies DDL queries.
Compile bool
Enable compilation of queries.
CompileExpressions bool
Turn on expression compilation.
ConnectTimeout int
Connect timeout in milliseconds on the socket used for communicating with the client.
CountDistinctImplementation string
Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
DistinctOverflowMode string
Sets behaviour on overflow when using DISTINCT. Possible values:
DistributedAggregationMemoryEfficient bool
Determine the behavior of distributed subqueries.
DistributedDdlTaskTimeout int
Timeout for DDL queries, in milliseconds.
DistributedProductMode string
Changes the behaviour of distributed subqueries.
EmptyResultForAggregationByEmptySet bool
Allows to retunr empty result.
EnableHttpCompression bool
Enables or disables data compression in the response to an HTTP request.
FallbackToStaleReplicasForDistributedQueries bool
Forces a query to an out-of-date replica if updated data is not available.
ForceIndexByDate bool
Disables query execution if the index can’t be used by date.
ForcePrimaryKey bool
Disables query execution if indexing by the primary key is not possible.
GroupByOverflowMode string
Sets behaviour on overflow while GROUP BY operation. Possible values:
GroupByTwoLevelThreshold int
Sets the threshold of the number of keys, after that the two-level aggregation should be used.
GroupByTwoLevelThresholdBytes int
Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
HttpConnectionTimeout int
Timeout for HTTP connection in milliseconds.
HttpHeadersProgressInterval int
Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
HttpReceiveTimeout int
Timeout for HTTP connection in milliseconds.
HttpSendTimeout int
Timeout for HTTP connection in milliseconds.
InputFormatDefaultsForOmittedFields bool
When performing INSERT queries, replace omitted input column values with default values of the respective columns.
InputFormatValuesInterpretExpressions bool
Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
InsertQuorum int
Enables the quorum writes.
InsertQuorumTimeout int
Write to a quorum timeout in milliseconds.
JoinOverflowMode string
Sets behaviour on overflow in JOIN. Possible values:
JoinUseNulls bool
Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
JoinedSubqueryRequiresAlias bool
Require aliases for subselects and table functions in FROM that more than one table is present.
LowCardinalityAllowInNativeFormat bool
Allows or restricts using the LowCardinality data type with the Native format.
MaxAstDepth int
Maximum abstract syntax tree depth.
MaxAstElements int
Maximum abstract syntax tree elements.
MaxBlockSize int
A recommendation for what size of the block (in a count of rows) to load from tables.
MaxBytesBeforeExternalGroupBy int
Limit in bytes for using memoru for GROUP BY before using swap on disk.
MaxBytesBeforeExternalSort int
This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
MaxBytesInDistinct int
Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
MaxBytesInJoin int
Limit on maximum size of the hash table for JOIN, in bytes.
MaxBytesInSet int
Limit on the number of bytes in the set resulting from the execution of the IN section.
MaxBytesToRead int
Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
MaxBytesToSort int
Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
MaxBytesToTransfer int
Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
MaxColumnsToRead int
Limits the maximum number of columns that can be read from a table in a single query.
MaxExecutionTime int
Limits the maximum query execution time in milliseconds.
MaxExpandedAstElements int
Maximum abstract syntax tree depth after after expansion of aliases.
MaxInsertBlockSize int
The size of blocks (in a count of rows) to form for insertion into a table.
MaxMemoryUsage int
Limits the maximum memory usage (in bytes) for processing queries on a single server.
MaxMemoryUsageForUser int
Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
MaxNetworkBandwidth int
Limits the speed of the data exchange over the network in bytes per second.
MaxNetworkBandwidthForUser int
Limits the speed of the data exchange over the network in bytes per second.
MaxQuerySize int
The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
MaxReplicaDelayForDistributedQueries int
Disables lagging replicas for distributed queries.
MaxResultBytes int
Limits the number of bytes in the result.
MaxResultRows int
Limits the number of rows in the result.
MaxRowsInDistinct int
Limits the maximum number of different rows when using DISTINCT.
MaxRowsInJoin int
Limit on maximum size of the hash table for JOIN, in rows.
MaxRowsInSet int
Limit on the number of rows in the set resulting from the execution of the IN section.
MaxRowsToGroupBy int
Limits the maximum number of unique keys received from aggregation function.
MaxRowsToRead int
Limits the maximum number of rows that can be read from a table when running a query.
MaxRowsToSort int
Limits the maximum number of rows that can be read from a table for sorting.
MaxRowsToTransfer int
Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
MaxTemporaryColumns int
Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
MaxTemporaryNonConstColumns int
Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
MaxThreads int
The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
MergeTreeMaxBytesToUseCache int
If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
MergeTreeMaxRowsToUseCache int
If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
MergeTreeMinBytesForConcurrentRead int
If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
MergeTreeMinRowsForConcurrentRead int
If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
MinBytesToUseDirectIo int
The minimum data volume required for using direct I/O access to the storage disk.
MinCountToCompile int
How many times to potentially use a compiled chunk of code before running compilation.
MinCountToCompileExpression int
A query waits for expression compilation process to complete prior to continuing execution.
MinExecutionSpeed int
Minimal execution speed in rows per second.
MinExecutionSpeedBytes int
Minimal execution speed in bytes per second.
MinInsertBlockSizeBytes int
Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
MinInsertBlockSizeRows int
Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
OutputFormatJsonQuote64bitIntegers bool
If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
OutputFormatJsonQuoteDenormals bool
Enables +nan, -nan, +inf, -inf outputs in JSON output format.
Priority int
Query priority.
QuotaMode string
Quota accounting mode.
ReadOverflowMode string
Sets behaviour on overflow while read. Possible values:
Readonly int
Restricts permissions for reading data, write data and change settings queries.
ReceiveTimeout int
Receive timeout in milliseconds on the socket used for communicating with the client.
ReplicationAlterPartitionsSync int
For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
ResultOverflowMode string
Sets behaviour on overflow in result. Possible values:
SelectSequentialConsistency bool
Enables or disables sequential consistency for SELECT queries.
SendProgressInHttpHeaders bool
Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
SendTimeout int
Send timeout in milliseconds on the socket used for communicating with the client.
SetOverflowMode string
Sets behaviour on overflow in the set resulting. Possible values:
SkipUnavailableShards bool
Enables or disables silently skipping of unavailable shards.
SortOverflowMode string
Sets behaviour on overflow while sort. Possible values:
TimeoutOverflowMode string
Sets behaviour on overflow. Possible values:
TransferOverflowMode string
Sets behaviour on overflow. Possible values:
TransformNullIn bool
Enables equality of NULL values for IN operator.
UseUncompressedCache bool
Whether to use a cache of uncompressed blocks.
AddHttpCorsHeader bool
Include CORS headers in HTTP responces.
AllowDdl bool
Allows or denies DDL queries.
Compile bool
Enable compilation of queries.
CompileExpressions bool
Turn on expression compilation.
ConnectTimeout int
Connect timeout in milliseconds on the socket used for communicating with the client.
CountDistinctImplementation string
Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
DistinctOverflowMode string
Sets behaviour on overflow when using DISTINCT. Possible values:
DistributedAggregationMemoryEfficient bool
Determine the behavior of distributed subqueries.
DistributedDdlTaskTimeout int
Timeout for DDL queries, in milliseconds.
DistributedProductMode string
Changes the behaviour of distributed subqueries.
EmptyResultForAggregationByEmptySet bool
Allows to retunr empty result.
EnableHttpCompression bool
Enables or disables data compression in the response to an HTTP request.
FallbackToStaleReplicasForDistributedQueries bool
Forces a query to an out-of-date replica if updated data is not available.
ForceIndexByDate bool
Disables query execution if the index can’t be used by date.
ForcePrimaryKey bool
Disables query execution if indexing by the primary key is not possible.
GroupByOverflowMode string
Sets behaviour on overflow while GROUP BY operation. Possible values:
GroupByTwoLevelThreshold int
Sets the threshold of the number of keys, after that the two-level aggregation should be used.
GroupByTwoLevelThresholdBytes int
Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
HttpConnectionTimeout int
Timeout for HTTP connection in milliseconds.
HttpHeadersProgressInterval int
Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
HttpReceiveTimeout int
Timeout for HTTP connection in milliseconds.
HttpSendTimeout int
Timeout for HTTP connection in milliseconds.
InputFormatDefaultsForOmittedFields bool
When performing INSERT queries, replace omitted input column values with default values of the respective columns.
InputFormatValuesInterpretExpressions bool
Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
InsertQuorum int
Enables the quorum writes.
InsertQuorumTimeout int
Write to a quorum timeout in milliseconds.
JoinOverflowMode string
Sets behaviour on overflow in JOIN. Possible values:
JoinUseNulls bool
Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
JoinedSubqueryRequiresAlias bool
Require aliases for subselects and table functions in FROM that more than one table is present.
LowCardinalityAllowInNativeFormat bool
Allows or restricts using the LowCardinality data type with the Native format.
MaxAstDepth int
Maximum abstract syntax tree depth.
MaxAstElements int
Maximum abstract syntax tree elements.
MaxBlockSize int
A recommendation for what size of the block (in a count of rows) to load from tables.
MaxBytesBeforeExternalGroupBy int
Limit in bytes for using memoru for GROUP BY before using swap on disk.
MaxBytesBeforeExternalSort int
This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
MaxBytesInDistinct int
Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
MaxBytesInJoin int
Limit on maximum size of the hash table for JOIN, in bytes.
MaxBytesInSet int
Limit on the number of bytes in the set resulting from the execution of the IN section.
MaxBytesToRead int
Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
MaxBytesToSort int
Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
MaxBytesToTransfer int
Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
MaxColumnsToRead int
Limits the maximum number of columns that can be read from a table in a single query.
MaxExecutionTime int
Limits the maximum query execution time in milliseconds.
MaxExpandedAstElements int
Maximum abstract syntax tree depth after after expansion of aliases.
MaxInsertBlockSize int
The size of blocks (in a count of rows) to form for insertion into a table.
MaxMemoryUsage int
Limits the maximum memory usage (in bytes) for processing queries on a single server.
MaxMemoryUsageForUser int
Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
MaxNetworkBandwidth int
Limits the speed of the data exchange over the network in bytes per second.
MaxNetworkBandwidthForUser int
Limits the speed of the data exchange over the network in bytes per second.
MaxQuerySize int
The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
MaxReplicaDelayForDistributedQueries int
Disables lagging replicas for distributed queries.
MaxResultBytes int
Limits the number of bytes in the result.
MaxResultRows int
Limits the number of rows in the result.
MaxRowsInDistinct int
Limits the maximum number of different rows when using DISTINCT.
MaxRowsInJoin int
Limit on maximum size of the hash table for JOIN, in rows.
MaxRowsInSet int
Limit on the number of rows in the set resulting from the execution of the IN section.
MaxRowsToGroupBy int
Limits the maximum number of unique keys received from aggregation function.
MaxRowsToRead int
Limits the maximum number of rows that can be read from a table when running a query.
MaxRowsToSort int
Limits the maximum number of rows that can be read from a table for sorting.
MaxRowsToTransfer int
Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
MaxTemporaryColumns int
Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
MaxTemporaryNonConstColumns int
Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
MaxThreads int
The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
MergeTreeMaxBytesToUseCache int
If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
MergeTreeMaxRowsToUseCache int
If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
MergeTreeMinBytesForConcurrentRead int
If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
MergeTreeMinRowsForConcurrentRead int
If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
MinBytesToUseDirectIo int
The minimum data volume required for using direct I/O access to the storage disk.
MinCountToCompile int
How many times to potentially use a compiled chunk of code before running compilation.
MinCountToCompileExpression int
A query waits for expression compilation process to complete prior to continuing execution.
MinExecutionSpeed int
Minimal execution speed in rows per second.
MinExecutionSpeedBytes int
Minimal execution speed in bytes per second.
MinInsertBlockSizeBytes int
Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
MinInsertBlockSizeRows int
Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
OutputFormatJsonQuote64bitIntegers bool
If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
OutputFormatJsonQuoteDenormals bool
Enables +nan, -nan, +inf, -inf outputs in JSON output format.
Priority int
Query priority.
QuotaMode string
Quota accounting mode.
ReadOverflowMode string
Sets behaviour on overflow while read. Possible values:
Readonly int
Restricts permissions for reading data, write data and change settings queries.
ReceiveTimeout int
Receive timeout in milliseconds on the socket used for communicating with the client.
ReplicationAlterPartitionsSync int
For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
ResultOverflowMode string
Sets behaviour on overflow in result. Possible values:
SelectSequentialConsistency bool
Enables or disables sequential consistency for SELECT queries.
SendProgressInHttpHeaders bool
Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
SendTimeout int
Send timeout in milliseconds on the socket used for communicating with the client.
SetOverflowMode string
Sets behaviour on overflow in the set resulting. Possible values:
SkipUnavailableShards bool
Enables or disables silently skipping of unavailable shards.
SortOverflowMode string
Sets behaviour on overflow while sort. Possible values:
TimeoutOverflowMode string
Sets behaviour on overflow. Possible values:
TransferOverflowMode string
Sets behaviour on overflow. Possible values:
TransformNullIn bool
Enables equality of NULL values for IN operator.
UseUncompressedCache bool
Whether to use a cache of uncompressed blocks.
addHttpCorsHeader Boolean
Include CORS headers in HTTP responces.
allowDdl Boolean
Allows or denies DDL queries.
compile Boolean
Enable compilation of queries.
compileExpressions Boolean
Turn on expression compilation.
connectTimeout Integer
Connect timeout in milliseconds on the socket used for communicating with the client.
countDistinctImplementation String
Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
distinctOverflowMode String
Sets behaviour on overflow when using DISTINCT. Possible values:
distributedAggregationMemoryEfficient Boolean
Determine the behavior of distributed subqueries.
distributedDdlTaskTimeout Integer
Timeout for DDL queries, in milliseconds.
distributedProductMode String
Changes the behaviour of distributed subqueries.
emptyResultForAggregationByEmptySet Boolean
Allows to retunr empty result.
enableHttpCompression Boolean
Enables or disables data compression in the response to an HTTP request.
fallbackToStaleReplicasForDistributedQueries Boolean
Forces a query to an out-of-date replica if updated data is not available.
forceIndexByDate Boolean
Disables query execution if the index can’t be used by date.
forcePrimaryKey Boolean
Disables query execution if indexing by the primary key is not possible.
groupByOverflowMode String
Sets behaviour on overflow while GROUP BY operation. Possible values:
groupByTwoLevelThreshold Integer
Sets the threshold of the number of keys, after that the two-level aggregation should be used.
groupByTwoLevelThresholdBytes Integer
Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
httpConnectionTimeout Integer
Timeout for HTTP connection in milliseconds.
httpHeadersProgressInterval Integer
Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
httpReceiveTimeout Integer
Timeout for HTTP connection in milliseconds.
httpSendTimeout Integer
Timeout for HTTP connection in milliseconds.
inputFormatDefaultsForOmittedFields Boolean
When performing INSERT queries, replace omitted input column values with default values of the respective columns.
inputFormatValuesInterpretExpressions Boolean
Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
insertQuorum Integer
Enables the quorum writes.
insertQuorumTimeout Integer
Write to a quorum timeout in milliseconds.
joinOverflowMode String
Sets behaviour on overflow in JOIN. Possible values:
joinUseNulls Boolean
Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
joinedSubqueryRequiresAlias Boolean
Require aliases for subselects and table functions in FROM that more than one table is present.
lowCardinalityAllowInNativeFormat Boolean
Allows or restricts using the LowCardinality data type with the Native format.
maxAstDepth Integer
Maximum abstract syntax tree depth.
maxAstElements Integer
Maximum abstract syntax tree elements.
maxBlockSize Integer
A recommendation for what size of the block (in a count of rows) to load from tables.
maxBytesBeforeExternalGroupBy Integer
Limit in bytes for using memoru for GROUP BY before using swap on disk.
maxBytesBeforeExternalSort Integer
This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
maxBytesInDistinct Integer
Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
maxBytesInJoin Integer
Limit on maximum size of the hash table for JOIN, in bytes.
maxBytesInSet Integer
Limit on the number of bytes in the set resulting from the execution of the IN section.
maxBytesToRead Integer
Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
maxBytesToSort Integer
Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
maxBytesToTransfer Integer
Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
maxColumnsToRead Integer
Limits the maximum number of columns that can be read from a table in a single query.
maxExecutionTime Integer
Limits the maximum query execution time in milliseconds.
maxExpandedAstElements Integer
Maximum abstract syntax tree depth after after expansion of aliases.
maxInsertBlockSize Integer
The size of blocks (in a count of rows) to form for insertion into a table.
maxMemoryUsage Integer
Limits the maximum memory usage (in bytes) for processing queries on a single server.
maxMemoryUsageForUser Integer
Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
maxNetworkBandwidth Integer
Limits the speed of the data exchange over the network in bytes per second.
maxNetworkBandwidthForUser Integer
Limits the speed of the data exchange over the network in bytes per second.
maxQuerySize Integer
The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
maxReplicaDelayForDistributedQueries Integer
Disables lagging replicas for distributed queries.
maxResultBytes Integer
Limits the number of bytes in the result.
maxResultRows Integer
Limits the number of rows in the result.
maxRowsInDistinct Integer
Limits the maximum number of different rows when using DISTINCT.
maxRowsInJoin Integer
Limit on maximum size of the hash table for JOIN, in rows.
maxRowsInSet Integer
Limit on the number of rows in the set resulting from the execution of the IN section.
maxRowsToGroupBy Integer
Limits the maximum number of unique keys received from aggregation function.
maxRowsToRead Integer
Limits the maximum number of rows that can be read from a table when running a query.
maxRowsToSort Integer
Limits the maximum number of rows that can be read from a table for sorting.
maxRowsToTransfer Integer
Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
maxTemporaryColumns Integer
Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
maxTemporaryNonConstColumns Integer
Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
maxThreads Integer
The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
mergeTreeMaxBytesToUseCache Integer
If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
mergeTreeMaxRowsToUseCache Integer
If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
mergeTreeMinBytesForConcurrentRead Integer
If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
mergeTreeMinRowsForConcurrentRead Integer
If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
minBytesToUseDirectIo Integer
The minimum data volume required for using direct I/O access to the storage disk.
minCountToCompile Integer
How many times to potentially use a compiled chunk of code before running compilation.
minCountToCompileExpression Integer
A query waits for expression compilation process to complete prior to continuing execution.
minExecutionSpeed Integer
Minimal execution speed in rows per second.
minExecutionSpeedBytes Integer
Minimal execution speed in bytes per second.
minInsertBlockSizeBytes Integer
Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
minInsertBlockSizeRows Integer
Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
outputFormatJsonQuote64bitIntegers Boolean
If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
outputFormatJsonQuoteDenormals Boolean
Enables +nan, -nan, +inf, -inf outputs in JSON output format.
priority Integer
Query priority.
quotaMode String
Quota accounting mode.
readOverflowMode String
Sets behaviour on overflow while read. Possible values:
readonly Integer
Restricts permissions for reading data, write data and change settings queries.
receiveTimeout Integer
Receive timeout in milliseconds on the socket used for communicating with the client.
replicationAlterPartitionsSync Integer
For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
resultOverflowMode String
Sets behaviour on overflow in result. Possible values:
selectSequentialConsistency Boolean
Enables or disables sequential consistency for SELECT queries.
sendProgressInHttpHeaders Boolean
Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
sendTimeout Integer
Send timeout in milliseconds on the socket used for communicating with the client.
setOverflowMode String
Sets behaviour on overflow in the set resulting. Possible values:
skipUnavailableShards Boolean
Enables or disables silently skipping of unavailable shards.
sortOverflowMode String
Sets behaviour on overflow while sort. Possible values:
timeoutOverflowMode String
Sets behaviour on overflow. Possible values:
transferOverflowMode String
Sets behaviour on overflow. Possible values:
transformNullIn Boolean
Enables equality of NULL values for IN operator.
useUncompressedCache Boolean
Whether to use a cache of uncompressed blocks.
addHttpCorsHeader boolean
Include CORS headers in HTTP responces.
allowDdl boolean
Allows or denies DDL queries.
compile boolean
Enable compilation of queries.
compileExpressions boolean
Turn on expression compilation.
connectTimeout number
Connect timeout in milliseconds on the socket used for communicating with the client.
countDistinctImplementation string
Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
distinctOverflowMode string
Sets behaviour on overflow when using DISTINCT. Possible values:
distributedAggregationMemoryEfficient boolean
Determine the behavior of distributed subqueries.
distributedDdlTaskTimeout number
Timeout for DDL queries, in milliseconds.
distributedProductMode string
Changes the behaviour of distributed subqueries.
emptyResultForAggregationByEmptySet boolean
Allows to retunr empty result.
enableHttpCompression boolean
Enables or disables data compression in the response to an HTTP request.
fallbackToStaleReplicasForDistributedQueries boolean
Forces a query to an out-of-date replica if updated data is not available.
forceIndexByDate boolean
Disables query execution if the index can’t be used by date.
forcePrimaryKey boolean
Disables query execution if indexing by the primary key is not possible.
groupByOverflowMode string
Sets behaviour on overflow while GROUP BY operation. Possible values:
groupByTwoLevelThreshold number
Sets the threshold of the number of keys, after that the two-level aggregation should be used.
groupByTwoLevelThresholdBytes number
Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
httpConnectionTimeout number
Timeout for HTTP connection in milliseconds.
httpHeadersProgressInterval number
Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
httpReceiveTimeout number
Timeout for HTTP connection in milliseconds.
httpSendTimeout number
Timeout for HTTP connection in milliseconds.
inputFormatDefaultsForOmittedFields boolean
When performing INSERT queries, replace omitted input column values with default values of the respective columns.
inputFormatValuesInterpretExpressions boolean
Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
insertQuorum number
Enables the quorum writes.
insertQuorumTimeout number
Write to a quorum timeout in milliseconds.
joinOverflowMode string
Sets behaviour on overflow in JOIN. Possible values:
joinUseNulls boolean
Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
joinedSubqueryRequiresAlias boolean
Require aliases for subselects and table functions in FROM that more than one table is present.
lowCardinalityAllowInNativeFormat boolean
Allows or restricts using the LowCardinality data type with the Native format.
maxAstDepth number
Maximum abstract syntax tree depth.
maxAstElements number
Maximum abstract syntax tree elements.
maxBlockSize number
A recommendation for what size of the block (in a count of rows) to load from tables.
maxBytesBeforeExternalGroupBy number
Limit in bytes for using memoru for GROUP BY before using swap on disk.
maxBytesBeforeExternalSort number
This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
maxBytesInDistinct number
Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
maxBytesInJoin number
Limit on maximum size of the hash table for JOIN, in bytes.
maxBytesInSet number
Limit on the number of bytes in the set resulting from the execution of the IN section.
maxBytesToRead number
Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
maxBytesToSort number
Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
maxBytesToTransfer number
Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
maxColumnsToRead number
Limits the maximum number of columns that can be read from a table in a single query.
maxExecutionTime number
Limits the maximum query execution time in milliseconds.
maxExpandedAstElements number
Maximum abstract syntax tree depth after after expansion of aliases.
maxInsertBlockSize number
The size of blocks (in a count of rows) to form for insertion into a table.
maxMemoryUsage number
Limits the maximum memory usage (in bytes) for processing queries on a single server.
maxMemoryUsageForUser number
Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
maxNetworkBandwidth number
Limits the speed of the data exchange over the network in bytes per second.
maxNetworkBandwidthForUser number
Limits the speed of the data exchange over the network in bytes per second.
maxQuerySize number
The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
maxReplicaDelayForDistributedQueries number
Disables lagging replicas for distributed queries.
maxResultBytes number
Limits the number of bytes in the result.
maxResultRows number
Limits the number of rows in the result.
maxRowsInDistinct number
Limits the maximum number of different rows when using DISTINCT.
maxRowsInJoin number
Limit on maximum size of the hash table for JOIN, in rows.
maxRowsInSet number
Limit on the number of rows in the set resulting from the execution of the IN section.
maxRowsToGroupBy number
Limits the maximum number of unique keys received from aggregation function.
maxRowsToRead number
Limits the maximum number of rows that can be read from a table when running a query.
maxRowsToSort number
Limits the maximum number of rows that can be read from a table for sorting.
maxRowsToTransfer number
Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
maxTemporaryColumns number
Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
maxTemporaryNonConstColumns number
Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
maxThreads number
The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
mergeTreeMaxBytesToUseCache number
If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
mergeTreeMaxRowsToUseCache number
If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
mergeTreeMinBytesForConcurrentRead number
If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
mergeTreeMinRowsForConcurrentRead number
If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
minBytesToUseDirectIo number
The minimum data volume required for using direct I/O access to the storage disk.
minCountToCompile number
How many times to potentially use a compiled chunk of code before running compilation.
minCountToCompileExpression number
A query waits for expression compilation process to complete prior to continuing execution.
minExecutionSpeed number
Minimal execution speed in rows per second.
minExecutionSpeedBytes number
Minimal execution speed in bytes per second.
minInsertBlockSizeBytes number
Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
minInsertBlockSizeRows number
Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
outputFormatJsonQuote64bitIntegers boolean
If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
outputFormatJsonQuoteDenormals boolean
Enables +nan, -nan, +inf, -inf outputs in JSON output format.
priority number
Query priority.
quotaMode string
Quota accounting mode.
readOverflowMode string
Sets behaviour on overflow while read. Possible values:
readonly number
Restricts permissions for reading data, write data and change settings queries.
receiveTimeout number
Receive timeout in milliseconds on the socket used for communicating with the client.
replicationAlterPartitionsSync number
For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
resultOverflowMode string
Sets behaviour on overflow in result. Possible values:
selectSequentialConsistency boolean
Enables or disables sequential consistency for SELECT queries.
sendProgressInHttpHeaders boolean
Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
sendTimeout number
Send timeout in milliseconds on the socket used for communicating with the client.
setOverflowMode string
Sets behaviour on overflow in the set resulting. Possible values:
skipUnavailableShards boolean
Enables or disables silently skipping of unavailable shards.
sortOverflowMode string
Sets behaviour on overflow while sort. Possible values:
timeoutOverflowMode string
Sets behaviour on overflow. Possible values:
transferOverflowMode string
Sets behaviour on overflow. Possible values:
transformNullIn boolean
Enables equality of NULL values for IN operator.
useUncompressedCache boolean
Whether to use a cache of uncompressed blocks.
add_http_cors_header bool
Include CORS headers in HTTP responces.
allow_ddl bool
Allows or denies DDL queries.
compile bool
Enable compilation of queries.
compile_expressions bool
Turn on expression compilation.
connect_timeout int
Connect timeout in milliseconds on the socket used for communicating with the client.
count_distinct_implementation str
Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
distinct_overflow_mode str
Sets behaviour on overflow when using DISTINCT. Possible values:
distributed_aggregation_memory_efficient bool
Determine the behavior of distributed subqueries.
distributed_ddl_task_timeout int
Timeout for DDL queries, in milliseconds.
distributed_product_mode str
Changes the behaviour of distributed subqueries.
empty_result_for_aggregation_by_empty_set bool
Allows to retunr empty result.
enable_http_compression bool
Enables or disables data compression in the response to an HTTP request.
fallback_to_stale_replicas_for_distributed_queries bool
Forces a query to an out-of-date replica if updated data is not available.
force_index_by_date bool
Disables query execution if the index can’t be used by date.
force_primary_key bool
Disables query execution if indexing by the primary key is not possible.
group_by_overflow_mode str
Sets behaviour on overflow while GROUP BY operation. Possible values:
group_by_two_level_threshold int
Sets the threshold of the number of keys, after that the two-level aggregation should be used.
group_by_two_level_threshold_bytes int
Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
http_connection_timeout int
Timeout for HTTP connection in milliseconds.
http_headers_progress_interval int
Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
http_receive_timeout int
Timeout for HTTP connection in milliseconds.
http_send_timeout int
Timeout for HTTP connection in milliseconds.
input_format_defaults_for_omitted_fields bool
When performing INSERT queries, replace omitted input column values with default values of the respective columns.
input_format_values_interpret_expressions bool
Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
insert_quorum int
Enables the quorum writes.
insert_quorum_timeout int
Write to a quorum timeout in milliseconds.
join_overflow_mode str
Sets behaviour on overflow in JOIN. Possible values:
join_use_nulls bool
Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
joined_subquery_requires_alias bool
Require aliases for subselects and table functions in FROM that more than one table is present.
low_cardinality_allow_in_native_format bool
Allows or restricts using the LowCardinality data type with the Native format.
max_ast_depth int
Maximum abstract syntax tree depth.
max_ast_elements int
Maximum abstract syntax tree elements.
max_block_size int
A recommendation for what size of the block (in a count of rows) to load from tables.
max_bytes_before_external_group_by int
Limit in bytes for using memoru for GROUP BY before using swap on disk.
max_bytes_before_external_sort int
This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
max_bytes_in_distinct int
Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
max_bytes_in_join int
Limit on maximum size of the hash table for JOIN, in bytes.
max_bytes_in_set int
Limit on the number of bytes in the set resulting from the execution of the IN section.
max_bytes_to_read int
Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
max_bytes_to_sort int
Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
max_bytes_to_transfer int
Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
max_columns_to_read int
Limits the maximum number of columns that can be read from a table in a single query.
max_execution_time int
Limits the maximum query execution time in milliseconds.
max_expanded_ast_elements int
Maximum abstract syntax tree depth after after expansion of aliases.
max_insert_block_size int
The size of blocks (in a count of rows) to form for insertion into a table.
max_memory_usage int
Limits the maximum memory usage (in bytes) for processing queries on a single server.
max_memory_usage_for_user int
Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
max_network_bandwidth int
Limits the speed of the data exchange over the network in bytes per second.
max_network_bandwidth_for_user int
Limits the speed of the data exchange over the network in bytes per second.
max_query_size int
The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
max_replica_delay_for_distributed_queries int
Disables lagging replicas for distributed queries.
max_result_bytes int
Limits the number of bytes in the result.
max_result_rows int
Limits the number of rows in the result.
max_rows_in_distinct int
Limits the maximum number of different rows when using DISTINCT.
max_rows_in_join int
Limit on maximum size of the hash table for JOIN, in rows.
max_rows_in_set int
Limit on the number of rows in the set resulting from the execution of the IN section.
max_rows_to_group_by int
Limits the maximum number of unique keys received from aggregation function.
max_rows_to_read int
Limits the maximum number of rows that can be read from a table when running a query.
max_rows_to_sort int
Limits the maximum number of rows that can be read from a table for sorting.
max_rows_to_transfer int
Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
max_temporary_columns int
Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
max_temporary_non_const_columns int
Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
max_threads int
The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
merge_tree_max_bytes_to_use_cache int
If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
merge_tree_max_rows_to_use_cache int
If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
merge_tree_min_bytes_for_concurrent_read int
If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
merge_tree_min_rows_for_concurrent_read int
If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
min_bytes_to_use_direct_io int
The minimum data volume required for using direct I/O access to the storage disk.
min_count_to_compile int
How many times to potentially use a compiled chunk of code before running compilation.
min_count_to_compile_expression int
A query waits for expression compilation process to complete prior to continuing execution.
min_execution_speed int
Minimal execution speed in rows per second.
min_execution_speed_bytes int
Minimal execution speed in bytes per second.
min_insert_block_size_bytes int
Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
min_insert_block_size_rows int
Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
output_format_json_quote64bit_integers bool
If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
output_format_json_quote_denormals bool
Enables +nan, -nan, +inf, -inf outputs in JSON output format.
priority int
Query priority.
quota_mode str
Quota accounting mode.
read_overflow_mode str
Sets behaviour on overflow while read. Possible values:
readonly int
Restricts permissions for reading data, write data and change settings queries.
receive_timeout int
Receive timeout in milliseconds on the socket used for communicating with the client.
replication_alter_partitions_sync int
For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
result_overflow_mode str
Sets behaviour on overflow in result. Possible values:
select_sequential_consistency bool
Enables or disables sequential consistency for SELECT queries.
send_progress_in_http_headers bool
Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
send_timeout int
Send timeout in milliseconds on the socket used for communicating with the client.
set_overflow_mode str
Sets behaviour on overflow in the set resulting. Possible values:
skip_unavailable_shards bool
Enables or disables silently skipping of unavailable shards.
sort_overflow_mode str
Sets behaviour on overflow while sort. Possible values:
timeout_overflow_mode str
Sets behaviour on overflow. Possible values:
transfer_overflow_mode str
Sets behaviour on overflow. Possible values:
transform_null_in bool
Enables equality of NULL values for IN operator.
use_uncompressed_cache bool
Whether to use a cache of uncompressed blocks.
addHttpCorsHeader Boolean
Include CORS headers in HTTP responces.
allowDdl Boolean
Allows or denies DDL queries.
compile Boolean
Enable compilation of queries.
compileExpressions Boolean
Turn on expression compilation.
connectTimeout Number
Connect timeout in milliseconds on the socket used for communicating with the client.
countDistinctImplementation String
Specifies which of the uniq* functions should be used to perform the COUNT(DISTINCT …) construction.
distinctOverflowMode String
Sets behaviour on overflow when using DISTINCT. Possible values:
distributedAggregationMemoryEfficient Boolean
Determine the behavior of distributed subqueries.
distributedDdlTaskTimeout Number
Timeout for DDL queries, in milliseconds.
distributedProductMode String
Changes the behaviour of distributed subqueries.
emptyResultForAggregationByEmptySet Boolean
Allows to retunr empty result.
enableHttpCompression Boolean
Enables or disables data compression in the response to an HTTP request.
fallbackToStaleReplicasForDistributedQueries Boolean
Forces a query to an out-of-date replica if updated data is not available.
forceIndexByDate Boolean
Disables query execution if the index can’t be used by date.
forcePrimaryKey Boolean
Disables query execution if indexing by the primary key is not possible.
groupByOverflowMode String
Sets behaviour on overflow while GROUP BY operation. Possible values:
groupByTwoLevelThreshold Number
Sets the threshold of the number of keys, after that the two-level aggregation should be used.
groupByTwoLevelThresholdBytes Number
Sets the threshold of the number of bytes, after that the two-level aggregation should be used.
httpConnectionTimeout Number
Timeout for HTTP connection in milliseconds.
httpHeadersProgressInterval Number
Sets minimal interval between notifications about request process in HTTP header X-ClickHouse-Progress.
httpReceiveTimeout Number
Timeout for HTTP connection in milliseconds.
httpSendTimeout Number
Timeout for HTTP connection in milliseconds.
inputFormatDefaultsForOmittedFields Boolean
When performing INSERT queries, replace omitted input column values with default values of the respective columns.
inputFormatValuesInterpretExpressions Boolean
Enables or disables the full SQL parser if the fast stream parser can’t parse the data.
insertQuorum Number
Enables the quorum writes.
insertQuorumTimeout Number
Write to a quorum timeout in milliseconds.
joinOverflowMode String
Sets behaviour on overflow in JOIN. Possible values:
joinUseNulls Boolean
Sets the type of JOIN behaviour. When merging tables, empty cells may appear. ClickHouse fills them differently based on this setting.
joinedSubqueryRequiresAlias Boolean
Require aliases for subselects and table functions in FROM that more than one table is present.
lowCardinalityAllowInNativeFormat Boolean
Allows or restricts using the LowCardinality data type with the Native format.
maxAstDepth Number
Maximum abstract syntax tree depth.
maxAstElements Number
Maximum abstract syntax tree elements.
maxBlockSize Number
A recommendation for what size of the block (in a count of rows) to load from tables.
maxBytesBeforeExternalGroupBy Number
Limit in bytes for using memoru for GROUP BY before using swap on disk.
maxBytesBeforeExternalSort Number
This setting is equivalent of the max_bytes_before_external_group_by setting, except for it is for sort operation (ORDER BY), not aggregation.
maxBytesInDistinct Number
Limits the maximum size of a hash table in bytes (uncompressed data) when using DISTINCT.
maxBytesInJoin Number
Limit on maximum size of the hash table for JOIN, in bytes.
maxBytesInSet Number
Limit on the number of bytes in the set resulting from the execution of the IN section.
maxBytesToRead Number
Limits the maximum number of bytes (uncompressed data) that can be read from a table when running a query.
maxBytesToSort Number
Limits the maximum number of bytes (uncompressed data) that can be read from a table for sorting.
maxBytesToTransfer Number
Limits the maximum number of bytes (uncompressed data) that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
maxColumnsToRead Number
Limits the maximum number of columns that can be read from a table in a single query.
maxExecutionTime Number
Limits the maximum query execution time in milliseconds.
maxExpandedAstElements Number
Maximum abstract syntax tree depth after after expansion of aliases.
maxInsertBlockSize Number
The size of blocks (in a count of rows) to form for insertion into a table.
maxMemoryUsage Number
Limits the maximum memory usage (in bytes) for processing queries on a single server.
maxMemoryUsageForUser Number
Limits the maximum memory usage (in bytes) for processing of user's queries on a single server.
maxNetworkBandwidth Number
Limits the speed of the data exchange over the network in bytes per second.
maxNetworkBandwidthForUser Number
Limits the speed of the data exchange over the network in bytes per second.
maxQuerySize Number
The maximum part of a query that can be taken to RAM for parsing with the SQL parser.
maxReplicaDelayForDistributedQueries Number
Disables lagging replicas for distributed queries.
maxResultBytes Number
Limits the number of bytes in the result.
maxResultRows Number
Limits the number of rows in the result.
maxRowsInDistinct Number
Limits the maximum number of different rows when using DISTINCT.
maxRowsInJoin Number
Limit on maximum size of the hash table for JOIN, in rows.
maxRowsInSet Number
Limit on the number of rows in the set resulting from the execution of the IN section.
maxRowsToGroupBy Number
Limits the maximum number of unique keys received from aggregation function.
maxRowsToRead Number
Limits the maximum number of rows that can be read from a table when running a query.
maxRowsToSort Number
Limits the maximum number of rows that can be read from a table for sorting.
maxRowsToTransfer Number
Limits the maximum number of rows that can be passed to a remote server or saved in a temporary table when using GLOBAL IN.
maxTemporaryColumns Number
Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, including constant columns.
maxTemporaryNonConstColumns Number
Limits the maximum number of temporary columns that must be kept in RAM at the same time when running a query, excluding constant columns.
maxThreads Number
The maximum number of query processing threads, excluding threads for retrieving data from remote servers.
mergeTreeMaxBytesToUseCache Number
If ClickHouse should read more than merge_tree_max_bytes_to_use_cache bytes in one query, it doesn’t use the cache of uncompressed blocks.
mergeTreeMaxRowsToUseCache Number
If ClickHouse should read more than merge_tree_max_rows_to_use_cache rows in one query, it doesn’t use the cache of uncompressed blocks.
mergeTreeMinBytesForConcurrentRead Number
If the number of bytes to read from one file of a MergeTree-engine table exceeds merge_tree_min_bytes_for_concurrent_read, then ClickHouse tries to concurrently read from this file in several threads.
mergeTreeMinRowsForConcurrentRead Number
If the number of rows to be read from a file of a MergeTree table exceeds merge_tree_min_rows_for_concurrent_read then ClickHouse tries to perform a concurrent reading from this file on several threads.
minBytesToUseDirectIo Number
The minimum data volume required for using direct I/O access to the storage disk.
minCountToCompile Number
How many times to potentially use a compiled chunk of code before running compilation.
minCountToCompileExpression Number
A query waits for expression compilation process to complete prior to continuing execution.
minExecutionSpeed Number
Minimal execution speed in rows per second.
minExecutionSpeedBytes Number
Minimal execution speed in bytes per second.
minInsertBlockSizeBytes Number
Sets the minimum number of bytes in the block which can be inserted into a table by an INSERT query.
minInsertBlockSizeRows Number
Sets the minimum number of rows in the block which can be inserted into a table by an INSERT query.
outputFormatJsonQuote64bitIntegers Boolean
If the value is true, integers appear in quotes when using JSON* Int64 and UInt64 formats (for compatibility with most JavaScript implementations); otherwise, integers are output without the quotes.
outputFormatJsonQuoteDenormals Boolean
Enables +nan, -nan, +inf, -inf outputs in JSON output format.
priority Number
Query priority.
quotaMode String
Quota accounting mode.
readOverflowMode String
Sets behaviour on overflow while read. Possible values:
readonly Number
Restricts permissions for reading data, write data and change settings queries.
receiveTimeout Number
Receive timeout in milliseconds on the socket used for communicating with the client.
replicationAlterPartitionsSync Number
For ALTER ... ATTACH|DETACH|DROP queries, you can use the replication_alter_partitions_sync setting to set up waiting.
resultOverflowMode String
Sets behaviour on overflow in result. Possible values:
selectSequentialConsistency Boolean
Enables or disables sequential consistency for SELECT queries.
sendProgressInHttpHeaders Boolean
Enables or disables X-ClickHouse-Progress HTTP response headers in clickhouse-server responses.
sendTimeout Number
Send timeout in milliseconds on the socket used for communicating with the client.
setOverflowMode String
Sets behaviour on overflow in the set resulting. Possible values:
skipUnavailableShards Boolean
Enables or disables silently skipping of unavailable shards.
sortOverflowMode String
Sets behaviour on overflow while sort. Possible values:
timeoutOverflowMode String
Sets behaviour on overflow. Possible values:
transferOverflowMode String
Sets behaviour on overflow. Possible values:
transformNullIn Boolean
Enables equality of NULL values for IN operator.
useUncompressedCache Boolean
Whether to use a cache of uncompressed blocks.

MdbClickhouseClusterZookeeper
, MdbClickhouseClusterZookeeperArgs

Resources MdbClickhouseClusterZookeeperResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
Resources MdbClickhouseClusterZookeeperResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
resources MdbClickhouseClusterZookeeperResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
resources MdbClickhouseClusterZookeeperResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
resources MdbClickhouseClusterZookeeperResources
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.
resources Property Map
Resources allocated to hosts of the ZooKeeper subcluster. The structure is documented below.

MdbClickhouseClusterZookeeperResources
, MdbClickhouseClusterZookeeperResourcesArgs

DiskSize int
Volume of the storage available to a ZooKeeper host, in gigabytes.
DiskTypeId string
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
ResourcePresetId string
DiskSize int
Volume of the storage available to a ZooKeeper host, in gigabytes.
DiskTypeId string
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
ResourcePresetId string
diskSize Integer
Volume of the storage available to a ZooKeeper host, in gigabytes.
diskTypeId String
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resourcePresetId String
diskSize number
Volume of the storage available to a ZooKeeper host, in gigabytes.
diskTypeId string
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resourcePresetId string
disk_size int
Volume of the storage available to a ZooKeeper host, in gigabytes.
disk_type_id str
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resource_preset_id str
diskSize Number
Volume of the storage available to a ZooKeeper host, in gigabytes.
diskTypeId String
Type of the storage of ZooKeeper hosts. For more information see the official documentation.
resourcePresetId String

Import

A cluster can be imported using the id of the resource, e.g.

 $ pulumi import yandex:index/mdbClickhouseCluster:MdbClickhouseCluster foo cluster_id
Copy

To learn more about importing existing cloud resources, see Importing resources.

Package Details

Repository
Yandex pulumi/pulumi-yandex
License
Apache-2.0
Notes
This Pulumi package is based on the yandex Terraform Provider.