[This documentation is auto-generated]
This package provides a simplified interface for the Databricks REST API.
The interface is autogenerated on instantiation using the underlying client
library used in the official databricks-cli
python package.
Install using
pip install databricks-api
The docs here describe the interface for version 0.12.0 of
the databricks-cli
package for API version 2.0.
Assuming there are no new major or minor versions to the databricks-cli
package
structure, this package should continue to work without a required update.
The databricks-api
package contains a DatabricksAPI
class which provides
instance attributes for the databricks-cli
ApiClient
, as well as each of
the available service instances. The attributes of a DatabricksAPI
instance are:
- DatabricksAPI.client <databricks_cli.sdk.api_client.ApiClient>
- DatabricksAPI.jobs <databricks_cli.sdk.service.JobsService>
- DatabricksAPI.cluster <databricks_cli.sdk.service.ClusterService>
- DatabricksAPI.policy <databricks_cli.sdk.service.PolicyService>
- DatabricksAPI.managed_library <databricks_cli.sdk.service.ManagedLibraryService>
- DatabricksAPI.dbfs <databricks_cli.sdk.service.DbfsService>
- DatabricksAPI.workspace <databricks_cli.sdk.service.WorkspaceService>
- DatabricksAPI.secret <databricks_cli.sdk.service.SecretService>
- DatabricksAPI.groups <databricks_cli.sdk.service.GroupsService>
- DatabricksAPI.token <databricks_cli.sdk.service.TokenService>
- DatabricksAPI.instance_pool <databricks_cli.sdk.service.InstancePoolService>
- DatabricksAPI.delta_pipelines <databricks_cli.sdk.service.DeltaPipelinesService>
To instantiate the client, provide the databricks host and either a token or
user and password. Also shown is the full signature of the
underlying ApiClient.__init__
from databricks_api import DatabricksAPI
# Provide a host and token
db = DatabricksAPI(
host="example.cloud.databricks.com",
token="dpapi123..."
)
# OR a host and user and password
db = DatabricksAPI(
host="example.cloud.databricks.com",
user="me@example.com",
password="password"
)
# Full __init__ signature
db = DatabricksAPI(
user=None,
password=None,
host=None,
token=None,
apiVersion=2.0,
default_headers={},
verify=True,
command_name=''
)
Refer to the official documentation on the functionality and required arguments of each method below.
Each of the service instance attributes provides the following public methods:
db.jobs.cancel_run(
run_id,
headers=None,
)
db.jobs.create_job(
name=None,
existing_cluster_id=None,
new_cluster=None,
libraries=None,
email_notifications=None,
timeout_seconds=None,
max_retries=None,
min_retry_interval_millis=None,
retry_on_timeout=None,
schedule=None,
notebook_task=None,
spark_jar_task=None,
spark_python_task=None,
spark_submit_task=None,
max_concurrent_runs=None,
headers=None,
)
db.jobs.delete_job(
job_id,
headers=None,
)
db.jobs.delete_run(
run_id=None,
headers=None,
)
db.jobs.export_run(
run_id,
views_to_export=None,
headers=None,
)
db.jobs.get_job(
job_id,
headers=None,
)
db.jobs.get_run(
run_id=None,
headers=None,
)
db.jobs.get_run_output(
run_id,
headers=None,
)
db.jobs.list_jobs(headers=None)
db.jobs.list_runs(
job_id=None,
active_only=None,
completed_only=None,
offset=None,
limit=None,
headers=None,
)
db.jobs.reset_job(
job_id,
new_settings,
headers=None,
)
db.jobs.run_now(
job_id=None,
jar_params=None,
notebook_params=None,
python_params=None,
spark_submit_params=None,
headers=None,
)
db.jobs.submit_run(
run_name=None,
existing_cluster_id=None,
new_cluster=None,
libraries=None,
notebook_task=None,
spark_jar_task=None,
spark_python_task=None,
spark_submit_task=None,
timeout_seconds=None,
headers=None,
)
db.cluster.create_cluster(
num_workers=None,
autoscale=None,
cluster_name=None,
spark_version=None,
spark_conf=None,
aws_attributes=None,
node_type_id=None,
driver_node_type_id=None,
ssh_public_keys=None,
custom_tags=None,
cluster_log_conf=None,
init_scripts=None,
spark_env_vars=None,
autotermination_minutes=None,
enable_elastic_disk=None,
cluster_source=None,
instance_pool_id=None,
headers=None,
)
db.cluster.delete_cluster(
cluster_id,
headers=None,
)
db.cluster.edit_cluster(
cluster_id,
num_workers=None,
autoscale=None,
cluster_name=None,
spark_version=None,
spark_conf=None,
aws_attributes=None,
node_type_id=None,
driver_node_type_id=None,
ssh_public_keys=None,
custom_tags=None,
cluster_log_conf=None,
init_scripts=None,
spark_env_vars=None,
autotermination_minutes=None,
enable_elastic_disk=None,
cluster_source=None,
instance_pool_id=None,
headers=None,
)
db.cluster.get_cluster(
cluster_id,
headers=None,
)
db.cluster.get_events(
cluster_id,
start_time=None,
end_time=None,
order=None,
event_types=None,
offset=None,
limit=None,
headers=None,
)
db.cluster.list_available_zones(headers=None)
db.cluster.list_clusters(headers=None)
db.cluster.list_node_types(headers=None)
db.cluster.list_spark_versions(headers=None)
db.cluster.permanent_delete_cluster(
cluster_id,
headers=None,
)
db.cluster.pin_cluster(
cluster_id,
headers=None,
)
db.cluster.resize_cluster(
cluster_id,
num_workers=None,
autoscale=None,
headers=None,
)
db.cluster.restart_cluster(
cluster_id,
headers=None,
)
db.cluster.start_cluster(
cluster_id,
headers=None,
)
db.cluster.unpin_cluster(
cluster_id,
headers=None,
)
db.policy.create_policy(
policy_name,
definition,
headers=None,
)
db.policy.delete_policy(
policy_id,
headers=None,
)
db.policy.edit_policy(
policy_id,
policy_name,
definition,
headers=None,
)
db.policy.get_policy(
policy_id,
headers=None,
)
db.policy.list_policies(headers=None)
db.managed_library.all_cluster_statuses(headers=None)
db.managed_library.cluster_status(
cluster_id,
headers=None,
)
db.managed_library.install_libraries(
cluster_id,
libraries=None,
headers=None,
)
db.managed_library.uninstall_libraries(
cluster_id,
libraries=None,
headers=None,
)
db.dbfs.add_block(
handle,
data,
headers=None,
)
db.dbfs.close(
handle,
headers=None,
)
db.dbfs.create(
path,
overwrite=None,
headers=None,
)
db.dbfs.delete(
path,
recursive=None,
headers=None,
)
db.dbfs.get_status(
path,
headers=None,
)
db.dbfs.list(
path,
headers=None,
)
db.dbfs.mkdirs(
path,
headers=None,
)
db.dbfs.move(
source_path,
destination_path,
headers=None,
)
db.dbfs.put(
path,
contents=None,
overwrite=None,
headers=None,
)
db.dbfs.read(
path,
offset=None,
length=None,
headers=None,
)
db.workspace.delete(
path,
recursive=None,
headers=None,
)
db.workspace.export_workspace(
path,
format=None,
direct_download=None,
headers=None,
)
db.workspace.get_status(
path,
headers=None,
)
db.workspace.import_workspace(
path,
format=None,
language=None,
content=None,
overwrite=None,
headers=None,
)
db.workspace.list(
path,
headers=None,
)
db.workspace.mkdirs(
path,
headers=None,
)
db.secret.create_scope(
scope,
initial_manage_principal=None,
scope_backend_type=None,
headers=None,
)
db.secret.delete_acl(
scope,
principal,
headers=None,
)
db.secret.delete_scope(
scope,
headers=None,
)
db.secret.delete_secret(
scope,
key,
headers=None,
)
db.secret.get_acl(
scope,
principal,
headers=None,
)
db.secret.list_acls(
scope,
headers=None,
)
db.secret.list_scopes(headers=None)
db.secret.list_secrets(
scope,
headers=None,
)
db.secret.put_acl(
scope,
principal,
permission,
headers=None,
)
db.secret.put_secret(
scope,
key,
string_value=None,
bytes_value=None,
headers=None,
)
db.groups.add_to_group(
parent_name,
user_name=None,
group_name=None,
headers=None,
)
db.groups.create_group(
group_name,
headers=None,
)
db.groups.get_group_members(
group_name,
headers=None,
)
db.groups.get_groups(headers=None)
db.groups.get_groups_for_principal(
user_name=None,
group_name=None,
headers=None,
)
db.groups.remove_from_group(
parent_name,
user_name=None,
group_name=None,
headers=None,
)
db.groups.remove_group(
group_name,
headers=None,
)
db.token.create_token(
lifetime_seconds=None,
comment=None,
headers=None,
)
db.token.list_tokens(headers=None)
db.token.revoke_token(
token_id,
headers=None,
)
db.instance_pool.create_instance_pool(
instance_pool_name=None,
min_idle_instances=None,
max_capacity=None,
aws_attributes=None,
node_type_id=None,
custom_tags=None,
idle_instance_autotermination_minutes=None,
enable_elastic_disk=None,
disk_spec=None,
preloaded_spark_versions=None,
headers=None,
)
db.instance_pool.delete_instance_pool(
instance_pool_id=None,
headers=None,
)
db.instance_pool.edit_instance_pool(
instance_pool_id,
instance_pool_name=None,
min_idle_instances=None,
max_capacity=None,
aws_attributes=None,
node_type_id=None,
custom_tags=None,
idle_instance_autotermination_minutes=None,
enable_elastic_disk=None,
disk_spec=None,
preloaded_spark_versions=None,
headers=None,
)
db.instance_pool.get_instance_pool(
instance_pool_id=None,
headers=None,
)
db.instance_pool.list_instance_pools(headers=None)
db.delta_pipelines.create(
id=None,
name=None,
storage=None,
configuration=None,
clusters=None,
libraries=None,
trigger=None,
filters=None,
allow_duplicate_names=None,
headers=None,
)
db.delta_pipelines.delete(
pipeline_id=None,
headers=None,
)
db.delta_pipelines.deploy(
pipeline_id=None,
id=None,
name=None,
storage=None,
configuration=None,
clusters=None,
libraries=None,
trigger=None,
filters=None,
allow_duplicate_names=None,
headers=None,
)
db.delta_pipelines.get(
pipeline_id=None,
headers=None,
)
db.delta_pipelines.reset(
pipeline_id=None,
headers=None,
)
db.delta_pipelines.run(
pipeline_id=None,
headers=None,
)
db.delta_pipelines.stop(
pipeline_id=None,
headers=None,
)