1. Packages
  2. Databricks Provider
  3. API Docs
  4. SqlEndpoint
Databricks v1.67.0 published on Thursday, Apr 17, 2025 by Pulumi

databricks.SqlEndpoint

Explore with Pulumi AI

This resource is used to manage Databricks SQL warehouses. To create SQL warehouses you must have databricks_sql_access on your databricks.Group or databricks_user.

Example Usage

import * as pulumi from "@pulumi/pulumi";
import * as databricks from "@pulumi/databricks";

const me = databricks.getCurrentUser({});
const _this = new databricks.SqlEndpoint("this", {
    name: me.then(me => `Endpoint of ${me.alphanumeric}`),
    clusterSize: "Small",
    maxNumClusters: 1,
    tags: {
        customTags: [{
            key: "City",
            value: "Amsterdam",
        }],
    },
});
Copy
import pulumi
import pulumi_databricks as databricks

me = databricks.get_current_user()
this = databricks.SqlEndpoint("this",
    name=f"Endpoint of {me.alphanumeric}",
    cluster_size="Small",
    max_num_clusters=1,
    tags={
        "custom_tags": [{
            "key": "City",
            "value": "Amsterdam",
        }],
    })
Copy
package main

import (
	"fmt"

	"github.com/pulumi/pulumi-databricks/sdk/go/databricks"
	"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)

func main() {
	pulumi.Run(func(ctx *pulumi.Context) error {
		me, err := databricks.GetCurrentUser(ctx, map[string]interface{}{}, nil)
		if err != nil {
			return err
		}
		_, err = databricks.NewSqlEndpoint(ctx, "this", &databricks.SqlEndpointArgs{
			Name:           pulumi.Sprintf("Endpoint of %v", me.Alphanumeric),
			ClusterSize:    pulumi.String("Small"),
			MaxNumClusters: pulumi.Int(1),
			Tags: &databricks.SqlEndpointTagsArgs{
				CustomTags: databricks.SqlEndpointTagsCustomTagArray{
					&databricks.SqlEndpointTagsCustomTagArgs{
						Key:   pulumi.String("City"),
						Value: pulumi.String("Amsterdam"),
					},
				},
			},
		})
		if err != nil {
			return err
		}
		return nil
	})
}
Copy
using System.Collections.Generic;
using System.Linq;
using Pulumi;
using Databricks = Pulumi.Databricks;

return await Deployment.RunAsync(() => 
{
    var me = Databricks.GetCurrentUser.Invoke();

    var @this = new Databricks.SqlEndpoint("this", new()
    {
        Name = $"Endpoint of {me.Apply(getCurrentUserResult => getCurrentUserResult.Alphanumeric)}",
        ClusterSize = "Small",
        MaxNumClusters = 1,
        Tags = new Databricks.Inputs.SqlEndpointTagsArgs
        {
            CustomTags = new[]
            {
                new Databricks.Inputs.SqlEndpointTagsCustomTagArgs
                {
                    Key = "City",
                    Value = "Amsterdam",
                },
            },
        },
    });

});
Copy
package generated_program;

import com.pulumi.Context;
import com.pulumi.Pulumi;
import com.pulumi.core.Output;
import com.pulumi.databricks.DatabricksFunctions;
import com.pulumi.databricks.SqlEndpoint;
import com.pulumi.databricks.SqlEndpointArgs;
import com.pulumi.databricks.inputs.SqlEndpointTagsArgs;
import java.util.List;
import java.util.ArrayList;
import java.util.Map;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;

public class App {
    public static void main(String[] args) {
        Pulumi.run(App::stack);
    }

    public static void stack(Context ctx) {
        final var me = DatabricksFunctions.getCurrentUser(%!v(PANIC=Format method: runtime error: invalid memory address or nil pointer dereference);

        var this_ = new SqlEndpoint("this", SqlEndpointArgs.builder()
            .name(String.format("Endpoint of %s", me.alphanumeric()))
            .clusterSize("Small")
            .maxNumClusters(1)
            .tags(SqlEndpointTagsArgs.builder()
                .customTags(SqlEndpointTagsCustomTagArgs.builder()
                    .key("City")
                    .value("Amsterdam")
                    .build())
                .build())
            .build());

    }
}
Copy
resources:
  this:
    type: databricks:SqlEndpoint
    properties:
      name: Endpoint of ${me.alphanumeric}
      clusterSize: Small
      maxNumClusters: 1
      tags:
        customTags:
          - key: City
            value: Amsterdam
variables:
  me:
    fn::invoke:
      function: databricks:getCurrentUser
      arguments: {}
Copy

Access control

  • databricks.Permissions can control which groups or individual users can Can Use or Can Manage SQL warehouses.
  • databricks_sql_access on databricks.Group or databricks_user.

The following resources are often used in the same context:

  • End to end workspace management guide.
  • databricks.InstanceProfile to manage AWS EC2 instance profiles that users can launch databricks.Cluster and access data, like databricks_mount.
  • databricks.SqlDashboard to manage Databricks SQL Dashboards.
  • databricks.SqlGlobalConfig to configure the security policy, databricks_instance_profile, and data access properties for all databricks.SqlEndpoint of workspace.
  • databricks.SqlPermissions to manage data object access control lists in Databricks workspaces for things like tables, views, databases, and more.

Create SqlEndpoint Resource

Resources are created with functions called constructors. To learn more about declaring and configuring resources, see Resources.

Constructor syntax

new SqlEndpoint(name: string, args: SqlEndpointArgs, opts?: CustomResourceOptions);
@overload
def SqlEndpoint(resource_name: str,
                args: SqlEndpointArgs,
                opts: Optional[ResourceOptions] = None)

@overload
def SqlEndpoint(resource_name: str,
                opts: Optional[ResourceOptions] = None,
                cluster_size: Optional[str] = None,
                instance_profile_arn: Optional[str] = None,
                channel: Optional[SqlEndpointChannelArgs] = None,
                data_source_id: Optional[str] = None,
                enable_photon: Optional[bool] = None,
                enable_serverless_compute: Optional[bool] = None,
                auto_stop_mins: Optional[int] = None,
                max_num_clusters: Optional[int] = None,
                min_num_clusters: Optional[int] = None,
                name: Optional[str] = None,
                spot_instance_policy: Optional[str] = None,
                tags: Optional[SqlEndpointTagsArgs] = None,
                warehouse_type: Optional[str] = None)
func NewSqlEndpoint(ctx *Context, name string, args SqlEndpointArgs, opts ...ResourceOption) (*SqlEndpoint, error)
public SqlEndpoint(string name, SqlEndpointArgs args, CustomResourceOptions? opts = null)
public SqlEndpoint(String name, SqlEndpointArgs args)
public SqlEndpoint(String name, SqlEndpointArgs args, CustomResourceOptions options)
type: databricks:SqlEndpoint
properties: # The arguments to resource properties.
options: # Bag of options to control resource's behavior.

Parameters

name This property is required. string
The unique name of the resource.
args This property is required. SqlEndpointArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
resource_name This property is required. str
The unique name of the resource.
args This property is required. SqlEndpointArgs
The arguments to resource properties.
opts ResourceOptions
Bag of options to control resource's behavior.
ctx Context
Context object for the current deployment.
name This property is required. string
The unique name of the resource.
args This property is required. SqlEndpointArgs
The arguments to resource properties.
opts ResourceOption
Bag of options to control resource's behavior.
name This property is required. string
The unique name of the resource.
args This property is required. SqlEndpointArgs
The arguments to resource properties.
opts CustomResourceOptions
Bag of options to control resource's behavior.
name This property is required. String
The unique name of the resource.
args This property is required. SqlEndpointArgs
The arguments to resource properties.
options CustomResourceOptions
Bag of options to control resource's behavior.

Constructor example

The following reference example uses placeholder values for all input properties.

var sqlEndpointResource = new Databricks.SqlEndpoint("sqlEndpointResource", new()
{
    ClusterSize = "string",
    InstanceProfileArn = "string",
    Channel = new Databricks.Inputs.SqlEndpointChannelArgs
    {
        DbsqlVersion = "string",
        Name = "string",
    },
    DataSourceId = "string",
    EnablePhoton = false,
    EnableServerlessCompute = false,
    AutoStopMins = 0,
    MaxNumClusters = 0,
    MinNumClusters = 0,
    Name = "string",
    SpotInstancePolicy = "string",
    Tags = new Databricks.Inputs.SqlEndpointTagsArgs
    {
        CustomTags = new[]
        {
            new Databricks.Inputs.SqlEndpointTagsCustomTagArgs
            {
                Key = "string",
                Value = "string",
            },
        },
    },
    WarehouseType = "string",
});
Copy
example, err := databricks.NewSqlEndpoint(ctx, "sqlEndpointResource", &databricks.SqlEndpointArgs{
	ClusterSize:        pulumi.String("string"),
	InstanceProfileArn: pulumi.String("string"),
	Channel: &databricks.SqlEndpointChannelArgs{
		DbsqlVersion: pulumi.String("string"),
		Name:         pulumi.String("string"),
	},
	DataSourceId:            pulumi.String("string"),
	EnablePhoton:            pulumi.Bool(false),
	EnableServerlessCompute: pulumi.Bool(false),
	AutoStopMins:            pulumi.Int(0),
	MaxNumClusters:          pulumi.Int(0),
	MinNumClusters:          pulumi.Int(0),
	Name:                    pulumi.String("string"),
	SpotInstancePolicy:      pulumi.String("string"),
	Tags: &databricks.SqlEndpointTagsArgs{
		CustomTags: databricks.SqlEndpointTagsCustomTagArray{
			&databricks.SqlEndpointTagsCustomTagArgs{
				Key:   pulumi.String("string"),
				Value: pulumi.String("string"),
			},
		},
	},
	WarehouseType: pulumi.String("string"),
})
Copy
var sqlEndpointResource = new SqlEndpoint("sqlEndpointResource", SqlEndpointArgs.builder()
    .clusterSize("string")
    .instanceProfileArn("string")
    .channel(SqlEndpointChannelArgs.builder()
        .dbsqlVersion("string")
        .name("string")
        .build())
    .dataSourceId("string")
    .enablePhoton(false)
    .enableServerlessCompute(false)
    .autoStopMins(0)
    .maxNumClusters(0)
    .minNumClusters(0)
    .name("string")
    .spotInstancePolicy("string")
    .tags(SqlEndpointTagsArgs.builder()
        .customTags(SqlEndpointTagsCustomTagArgs.builder()
            .key("string")
            .value("string")
            .build())
        .build())
    .warehouseType("string")
    .build());
Copy
sql_endpoint_resource = databricks.SqlEndpoint("sqlEndpointResource",
    cluster_size="string",
    instance_profile_arn="string",
    channel={
        "dbsql_version": "string",
        "name": "string",
    },
    data_source_id="string",
    enable_photon=False,
    enable_serverless_compute=False,
    auto_stop_mins=0,
    max_num_clusters=0,
    min_num_clusters=0,
    name="string",
    spot_instance_policy="string",
    tags={
        "custom_tags": [{
            "key": "string",
            "value": "string",
        }],
    },
    warehouse_type="string")
Copy
const sqlEndpointResource = new databricks.SqlEndpoint("sqlEndpointResource", {
    clusterSize: "string",
    instanceProfileArn: "string",
    channel: {
        dbsqlVersion: "string",
        name: "string",
    },
    dataSourceId: "string",
    enablePhoton: false,
    enableServerlessCompute: false,
    autoStopMins: 0,
    maxNumClusters: 0,
    minNumClusters: 0,
    name: "string",
    spotInstancePolicy: "string",
    tags: {
        customTags: [{
            key: "string",
            value: "string",
        }],
    },
    warehouseType: "string",
});
Copy
type: databricks:SqlEndpoint
properties:
    autoStopMins: 0
    channel:
        dbsqlVersion: string
        name: string
    clusterSize: string
    dataSourceId: string
    enablePhoton: false
    enableServerlessCompute: false
    instanceProfileArn: string
    maxNumClusters: 0
    minNumClusters: 0
    name: string
    spotInstancePolicy: string
    tags:
        customTags:
            - key: string
              value: string
    warehouseType: string
Copy

SqlEndpoint Resource Properties

To learn more about resource properties and how to use them, see Inputs and Outputs in the Architecture and Concepts docs.

Inputs

In Python, inputs that are objects can be passed either as argument classes or as dictionary literals.

The SqlEndpoint resource accepts the following input properties:

ClusterSize This property is required. string
The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
AutoStopMins int
Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
Channel SqlEndpointChannel
block, consisting of following fields:
DataSourceId string
ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
EnablePhoton bool
Whether to enable Photon. This field is optional and is enabled by default.
EnableServerlessCompute bool

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

InstanceProfileArn string
MaxNumClusters int
Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
MinNumClusters int
Minimum number of clusters available when a SQL warehouse is running. The default is 1.
Name string
Name of the SQL warehouse. Must be unique.
SpotInstancePolicy string
The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.
Tags SqlEndpointTags
Databricks tags all endpoint resources with these tags.
WarehouseType string
SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.
ClusterSize This property is required. string
The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
AutoStopMins int
Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
Channel SqlEndpointChannelArgs
block, consisting of following fields:
DataSourceId string
ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
EnablePhoton bool
Whether to enable Photon. This field is optional and is enabled by default.
EnableServerlessCompute bool

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

InstanceProfileArn string
MaxNumClusters int
Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
MinNumClusters int
Minimum number of clusters available when a SQL warehouse is running. The default is 1.
Name string
Name of the SQL warehouse. Must be unique.
SpotInstancePolicy string
The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.
Tags SqlEndpointTagsArgs
Databricks tags all endpoint resources with these tags.
WarehouseType string
SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.
clusterSize This property is required. String
The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
autoStopMins Integer
Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
channel SqlEndpointChannel
block, consisting of following fields:
dataSourceId String
ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
enablePhoton Boolean
Whether to enable Photon. This field is optional and is enabled by default.
enableServerlessCompute Boolean

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

instanceProfileArn String
maxNumClusters Integer
Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
minNumClusters Integer
Minimum number of clusters available when a SQL warehouse is running. The default is 1.
name String
Name of the SQL warehouse. Must be unique.
spotInstancePolicy String
The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.
tags SqlEndpointTags
Databricks tags all endpoint resources with these tags.
warehouseType String
SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.
clusterSize This property is required. string
The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
autoStopMins number
Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
channel SqlEndpointChannel
block, consisting of following fields:
dataSourceId string
ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
enablePhoton boolean
Whether to enable Photon. This field is optional and is enabled by default.
enableServerlessCompute boolean

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

instanceProfileArn string
maxNumClusters number
Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
minNumClusters number
Minimum number of clusters available when a SQL warehouse is running. The default is 1.
name string
Name of the SQL warehouse. Must be unique.
spotInstancePolicy string
The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.
tags SqlEndpointTags
Databricks tags all endpoint resources with these tags.
warehouseType string
SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.
cluster_size This property is required. str
The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
auto_stop_mins int
Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
channel SqlEndpointChannelArgs
block, consisting of following fields:
data_source_id str
ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
enable_photon bool
Whether to enable Photon. This field is optional and is enabled by default.
enable_serverless_compute bool

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

instance_profile_arn str
max_num_clusters int
Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
min_num_clusters int
Minimum number of clusters available when a SQL warehouse is running. The default is 1.
name str
Name of the SQL warehouse. Must be unique.
spot_instance_policy str
The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.
tags SqlEndpointTagsArgs
Databricks tags all endpoint resources with these tags.
warehouse_type str
SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.
clusterSize This property is required. String
The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
autoStopMins Number
Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
channel Property Map
block, consisting of following fields:
dataSourceId String
ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
enablePhoton Boolean
Whether to enable Photon. This field is optional and is enabled by default.
enableServerlessCompute Boolean

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

instanceProfileArn String
maxNumClusters Number
Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
minNumClusters Number
Minimum number of clusters available when a SQL warehouse is running. The default is 1.
name String
Name of the SQL warehouse. Must be unique.
spotInstancePolicy String
The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.
tags Property Map
Databricks tags all endpoint resources with these tags.
warehouseType String
SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

Outputs

All input properties are implicitly available as output properties. Additionally, the SqlEndpoint resource produces the following output properties:

CreatorName string
The username of the user who created the endpoint.
Healths List<SqlEndpointHealth>
Health status of the endpoint.
Id string
The provider-assigned unique ID for this managed resource.
JdbcUrl string
JDBC connection string.
NumActiveSessions int
The current number of clusters used by the endpoint.
NumClusters int
The current number of clusters used by the endpoint.
OdbcParams SqlEndpointOdbcParams
ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.
State string
The current state of the endpoint.
CreatorName string
The username of the user who created the endpoint.
Healths []SqlEndpointHealth
Health status of the endpoint.
Id string
The provider-assigned unique ID for this managed resource.
JdbcUrl string
JDBC connection string.
NumActiveSessions int
The current number of clusters used by the endpoint.
NumClusters int
The current number of clusters used by the endpoint.
OdbcParams SqlEndpointOdbcParams
ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.
State string
The current state of the endpoint.
creatorName String
The username of the user who created the endpoint.
healths List<SqlEndpointHealth>
Health status of the endpoint.
id String
The provider-assigned unique ID for this managed resource.
jdbcUrl String
JDBC connection string.
numActiveSessions Integer
The current number of clusters used by the endpoint.
numClusters Integer
The current number of clusters used by the endpoint.
odbcParams SqlEndpointOdbcParams
ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.
state String
The current state of the endpoint.
creatorName string
The username of the user who created the endpoint.
healths SqlEndpointHealth[]
Health status of the endpoint.
id string
The provider-assigned unique ID for this managed resource.
jdbcUrl string
JDBC connection string.
numActiveSessions number
The current number of clusters used by the endpoint.
numClusters number
The current number of clusters used by the endpoint.
odbcParams SqlEndpointOdbcParams
ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.
state string
The current state of the endpoint.
creator_name str
The username of the user who created the endpoint.
healths Sequence[SqlEndpointHealth]
Health status of the endpoint.
id str
The provider-assigned unique ID for this managed resource.
jdbc_url str
JDBC connection string.
num_active_sessions int
The current number of clusters used by the endpoint.
num_clusters int
The current number of clusters used by the endpoint.
odbc_params SqlEndpointOdbcParams
ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.
state str
The current state of the endpoint.
creatorName String
The username of the user who created the endpoint.
healths List<Property Map>
Health status of the endpoint.
id String
The provider-assigned unique ID for this managed resource.
jdbcUrl String
JDBC connection string.
numActiveSessions Number
The current number of clusters used by the endpoint.
numClusters Number
The current number of clusters used by the endpoint.
odbcParams Property Map
ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.
state String
The current state of the endpoint.

Look up Existing SqlEndpoint Resource

Get an existing SqlEndpoint resource’s state with the given name, ID, and optional extra properties used to qualify the lookup.

public static get(name: string, id: Input<ID>, state?: SqlEndpointState, opts?: CustomResourceOptions): SqlEndpoint
@staticmethod
def get(resource_name: str,
        id: str,
        opts: Optional[ResourceOptions] = None,
        auto_stop_mins: Optional[int] = None,
        channel: Optional[SqlEndpointChannelArgs] = None,
        cluster_size: Optional[str] = None,
        creator_name: Optional[str] = None,
        data_source_id: Optional[str] = None,
        enable_photon: Optional[bool] = None,
        enable_serverless_compute: Optional[bool] = None,
        healths: Optional[Sequence[SqlEndpointHealthArgs]] = None,
        instance_profile_arn: Optional[str] = None,
        jdbc_url: Optional[str] = None,
        max_num_clusters: Optional[int] = None,
        min_num_clusters: Optional[int] = None,
        name: Optional[str] = None,
        num_active_sessions: Optional[int] = None,
        num_clusters: Optional[int] = None,
        odbc_params: Optional[SqlEndpointOdbcParamsArgs] = None,
        spot_instance_policy: Optional[str] = None,
        state: Optional[str] = None,
        tags: Optional[SqlEndpointTagsArgs] = None,
        warehouse_type: Optional[str] = None) -> SqlEndpoint
func GetSqlEndpoint(ctx *Context, name string, id IDInput, state *SqlEndpointState, opts ...ResourceOption) (*SqlEndpoint, error)
public static SqlEndpoint Get(string name, Input<string> id, SqlEndpointState? state, CustomResourceOptions? opts = null)
public static SqlEndpoint get(String name, Output<String> id, SqlEndpointState state, CustomResourceOptions options)
resources:  _:    type: databricks:SqlEndpoint    get:      id: ${id}
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
resource_name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
name This property is required.
The unique name of the resulting resource.
id This property is required.
The unique provider ID of the resource to lookup.
state
Any extra arguments used during the lookup.
opts
A bag of options that control this resource's behavior.
The following state arguments are supported:
AutoStopMins int
Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
Channel SqlEndpointChannel
block, consisting of following fields:
ClusterSize string
The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
CreatorName string
The username of the user who created the endpoint.
DataSourceId string
ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
EnablePhoton bool
Whether to enable Photon. This field is optional and is enabled by default.
EnableServerlessCompute bool

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

Healths List<SqlEndpointHealth>
Health status of the endpoint.
InstanceProfileArn string
JdbcUrl string
JDBC connection string.
MaxNumClusters int
Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
MinNumClusters int
Minimum number of clusters available when a SQL warehouse is running. The default is 1.
Name string
Name of the SQL warehouse. Must be unique.
NumActiveSessions int
The current number of clusters used by the endpoint.
NumClusters int
The current number of clusters used by the endpoint.
OdbcParams SqlEndpointOdbcParams
ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.
SpotInstancePolicy string
The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.
State string
The current state of the endpoint.
Tags SqlEndpointTags
Databricks tags all endpoint resources with these tags.
WarehouseType string
SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.
AutoStopMins int
Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
Channel SqlEndpointChannelArgs
block, consisting of following fields:
ClusterSize string
The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
CreatorName string
The username of the user who created the endpoint.
DataSourceId string
ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
EnablePhoton bool
Whether to enable Photon. This field is optional and is enabled by default.
EnableServerlessCompute bool

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

Healths []SqlEndpointHealthArgs
Health status of the endpoint.
InstanceProfileArn string
JdbcUrl string
JDBC connection string.
MaxNumClusters int
Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
MinNumClusters int
Minimum number of clusters available when a SQL warehouse is running. The default is 1.
Name string
Name of the SQL warehouse. Must be unique.
NumActiveSessions int
The current number of clusters used by the endpoint.
NumClusters int
The current number of clusters used by the endpoint.
OdbcParams SqlEndpointOdbcParamsArgs
ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.
SpotInstancePolicy string
The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.
State string
The current state of the endpoint.
Tags SqlEndpointTagsArgs
Databricks tags all endpoint resources with these tags.
WarehouseType string
SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.
autoStopMins Integer
Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
channel SqlEndpointChannel
block, consisting of following fields:
clusterSize String
The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
creatorName String
The username of the user who created the endpoint.
dataSourceId String
ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
enablePhoton Boolean
Whether to enable Photon. This field is optional and is enabled by default.
enableServerlessCompute Boolean

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

healths List<SqlEndpointHealth>
Health status of the endpoint.
instanceProfileArn String
jdbcUrl String
JDBC connection string.
maxNumClusters Integer
Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
minNumClusters Integer
Minimum number of clusters available when a SQL warehouse is running. The default is 1.
name String
Name of the SQL warehouse. Must be unique.
numActiveSessions Integer
The current number of clusters used by the endpoint.
numClusters Integer
The current number of clusters used by the endpoint.
odbcParams SqlEndpointOdbcParams
ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.
spotInstancePolicy String
The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.
state String
The current state of the endpoint.
tags SqlEndpointTags
Databricks tags all endpoint resources with these tags.
warehouseType String
SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.
autoStopMins number
Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
channel SqlEndpointChannel
block, consisting of following fields:
clusterSize string
The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
creatorName string
The username of the user who created the endpoint.
dataSourceId string
ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
enablePhoton boolean
Whether to enable Photon. This field is optional and is enabled by default.
enableServerlessCompute boolean

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

healths SqlEndpointHealth[]
Health status of the endpoint.
instanceProfileArn string
jdbcUrl string
JDBC connection string.
maxNumClusters number
Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
minNumClusters number
Minimum number of clusters available when a SQL warehouse is running. The default is 1.
name string
Name of the SQL warehouse. Must be unique.
numActiveSessions number
The current number of clusters used by the endpoint.
numClusters number
The current number of clusters used by the endpoint.
odbcParams SqlEndpointOdbcParams
ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.
spotInstancePolicy string
The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.
state string
The current state of the endpoint.
tags SqlEndpointTags
Databricks tags all endpoint resources with these tags.
warehouseType string
SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.
auto_stop_mins int
Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
channel SqlEndpointChannelArgs
block, consisting of following fields:
cluster_size str
The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
creator_name str
The username of the user who created the endpoint.
data_source_id str
ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
enable_photon bool
Whether to enable Photon. This field is optional and is enabled by default.
enable_serverless_compute bool

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

healths Sequence[SqlEndpointHealthArgs]
Health status of the endpoint.
instance_profile_arn str
jdbc_url str
JDBC connection string.
max_num_clusters int
Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
min_num_clusters int
Minimum number of clusters available when a SQL warehouse is running. The default is 1.
name str
Name of the SQL warehouse. Must be unique.
num_active_sessions int
The current number of clusters used by the endpoint.
num_clusters int
The current number of clusters used by the endpoint.
odbc_params SqlEndpointOdbcParamsArgs
ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.
spot_instance_policy str
The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.
state str
The current state of the endpoint.
tags SqlEndpointTagsArgs
Databricks tags all endpoint resources with these tags.
warehouse_type str
SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.
autoStopMins Number
Time in minutes until an idle SQL warehouse terminates all clusters and stops. This field is optional. The default is 120, set to 0 to disable the auto stop.
channel Property Map
block, consisting of following fields:
clusterSize String
The size of the clusters allocated to the endpoint: "2X-Small", "X-Small", "Small", "Medium", "Large", "X-Large", "2X-Large", "3X-Large", "4X-Large".
creatorName String
The username of the user who created the endpoint.
dataSourceId String
ID of the data source for this endpoint. This is used to bind an Databricks SQL query to an endpoint.
enablePhoton Boolean
Whether to enable Photon. This field is optional and is enabled by default.
enableServerlessCompute Boolean

Whether this SQL warehouse is a serverless endpoint. See below for details about the default values. To avoid ambiguity, especially for organizations with many workspaces, Databricks recommends that you always set this field explicitly.

  • For AWS, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between September 1, 2022 and April 30, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. If your account needs updated terms of use, workspace admins are prompted in the Databricks SQL UI. A workspace must meet the requirements and might require an update to its instance profile role to add a trust relationship.

  • For Azure, If omitted, the default is false for most workspaces. However, if this workspace used the SQL Warehouses API to create a warehouse between November 1, 2022 and May 19, 2023, the default remains the previous behavior which is default to true if the workspace is enabled for serverless and fits the requirements for serverless SQL warehouses. A workspace must meet the requirements and might require an update to its Azure storage firewall.

healths List<Property Map>
Health status of the endpoint.
instanceProfileArn String
jdbcUrl String
JDBC connection string.
maxNumClusters Number
Maximum number of clusters available when a SQL warehouse is running. This field is required. If multi-cluster load balancing is not enabled, this is default to 1.
minNumClusters Number
Minimum number of clusters available when a SQL warehouse is running. The default is 1.
name String
Name of the SQL warehouse. Must be unique.
numActiveSessions Number
The current number of clusters used by the endpoint.
numClusters Number
The current number of clusters used by the endpoint.
odbcParams Property Map
ODBC connection params: odbc_params.hostname, odbc_params.path, odbc_params.protocol, and odbc_params.port.
spotInstancePolicy String
The spot policy to use for allocating instances to clusters: COST_OPTIMIZED or RELIABILITY_OPTIMIZED. This field is optional. Default is COST_OPTIMIZED.
state String
The current state of the endpoint.
tags Property Map
Databricks tags all endpoint resources with these tags.
warehouseType String
SQL warehouse type. See for AWS or Azure. Set to PRO or CLASSIC. If the field enable_serverless_compute has the value true either explicitly or through the default logic (see that field above for details), the default is PRO, which is required for serverless SQL warehouses. Otherwise, the default is CLASSIC.

Supporting Types

SqlEndpointChannel
, SqlEndpointChannelArgs

DbsqlVersion string
Name string
Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.
DbsqlVersion string
Name string
Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.
dbsqlVersion String
name String
Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.
dbsqlVersion string
name string
Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.
dbsql_version str
name str
Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.
dbsqlVersion String
name String
Name of the Databricks SQL release channel. Possible values are: CHANNEL_NAME_PREVIEW and CHANNEL_NAME_CURRENT. Default is CHANNEL_NAME_CURRENT.

SqlEndpointHealth
, SqlEndpointHealthArgs

SqlEndpointHealthFailureReason
, SqlEndpointHealthFailureReasonArgs

Code string
Parameters Dictionary<string, string>
Type string
Code string
Parameters map[string]string
Type string
code String
parameters Map<String,String>
type String
code string
parameters {[key: string]: string}
type string
code str
parameters Mapping[str, str]
type str
code String
parameters Map<String>
type String

SqlEndpointOdbcParams
, SqlEndpointOdbcParamsArgs

Hostname string
Path string
Port int
Protocol string
Hostname string
Path string
Port int
Protocol string
hostname String
path String
port Integer
protocol String
hostname string
path string
port number
protocol string
hostname str
path str
port int
protocol str
hostname String
path String
port Number
protocol String

SqlEndpointTags
, SqlEndpointTagsArgs

SqlEndpointTagsCustomTag
, SqlEndpointTagsCustomTagArgs

Key This property is required. string
Value This property is required. string
Key This property is required. string
Value This property is required. string
key This property is required. String
value This property is required. String
key This property is required. string
value This property is required. string
key This property is required. str
value This property is required. str
key This property is required. String
value This property is required. String

Import

You can import a databricks_sql_endpoint resource with ID like the following:

bash

$ pulumi import databricks:index/sqlEndpoint:SqlEndpoint this <endpoint-id>
Copy

To learn more about importing existing cloud resources, see Importing resources.

Package Details

Repository
databricks pulumi/pulumi-databricks
License
Apache-2.0
Notes
This Pulumi package is based on the databricks Terraform Provider.