diff --git a/.codegen/changelog.md.tmpl b/.codegen/changelog.md.tmpl index c74cebc6..697782ea 100644 --- a/.codegen/changelog.md.tmpl +++ b/.codegen/changelog.md.tmpl @@ -22,6 +22,6 @@ Dependency updates: {{- define "what" -}} {{if eq .X "method" -}} - `{{.Method.Service.CamelName}}{{.Method.PascalName}}()` function + [`{{.AsFlat.SnakeName}}()`](https://databrickslabs.github.io/databricks-sdk-r/reference/{{.AsFlat.SnakeName}}.html) function {{- end}} {{- end -}} diff --git a/.codegen/service.R.tmpl b/.codegen/service.R.tmpl index b3ee9f4d..3ad6a25f 100644 --- a/.codegen/service.R.tmpl +++ b/.codegen/service.R.tmpl @@ -10,15 +10,20 @@ NULL {{end}}{{if .Pagination}}#' #' @return `data.frame` with all of the response pages. {{end}}#' -#' @rdname {{.Service.CamelName}}{{.PascalName}} +#' @rdname {{.AsFlat.SnakeName}} +#' @alias {{.Service.CamelName}}{{.PascalName}} #' @export {{- $hasRequiredFields := and .Request (gt (len .Request.RequiredFields) 0) }} -{{.Service.CamelName}}{{.PascalName}} <- function(client{{- if .Request}}{{range .Request.RequiredFields}}, {{.SnakeName}}{{end -}} +{{.AsFlat.SnakeName}} <- function(client{{- if .Request}}{{range .Request.RequiredFields}}, {{.SnakeName}}{{end -}} {{- range .Request.NonRequiredFields}}, {{.SnakeName}}=NULL{{end}} {{- end}}) { {{- template "method-serialize" .}} {{template "method-call" .}} } + +#' @rdname {{.AsFlat.SnakeName}} +#' @export +{{.Service.CamelName}}{{.PascalName}} <- {{.AsFlat.SnakeName}} {{end}} {{- range .Methods}}{{if and .Wait (not .IsCrudRead)}}{{.Comment "#' " 80}} #' @param client Required. Instance of DatabricksClient() @@ -34,10 +39,10 @@ NULL #'{{range .Request.Fields}} #' @param {{.SnakeName}} {{if .Required}}Required. {{end}}{{with .Summary}}{{.}}{{else}}This field has no description yet.{{end}}{{end}} {{end}}#' -#' @rdname {{.Service.CamelName}}{{.PascalName}}AndWait +#' @rdname {{.AsFlat.SnakeName}}_and_wait #' @export {{- $hasRequiredFields := and .Request (gt (len .Request.RequiredFields) 0) }} -{{.Service.CamelName}}{{.PascalName}}AndWait <- function(client{{- if .Request}}{{range .Request.RequiredFields}}, {{.SnakeName}}{{end -}} +{{.AsFlat.SnakeName}}_and_wait <- function(client{{- if .Request}}{{range .Request.RequiredFields}}, {{.SnakeName}}{{end -}} {{- range .Request.NonRequiredFields}}, {{.SnakeName}}=NULL{{end}} {{- end}}, timeout={{.Wait.Timeout}}, callback=cli_reporter) { {{- template "method-serialize" .}} diff --git a/.github/workflows/push.yaml b/.github/workflows/push.yaml index f2487985..5f538102 100644 --- a/.github/workflows/push.yaml +++ b/.github/workflows/push.yaml @@ -26,10 +26,11 @@ jobs: r-version: release use-public-rspm: true - - name: Install dependencies - run: | - Rscript -e "if (!require(devtools)) install.packages('devtools', repos = 'https://cran.rstudio.com')" - Rscript -e "devtools::install_dev_deps('.')" + - uses: r-lib/actions/setup-r-dependencies@v2 + with: + extra-packages: devtools + env: + R_COMPILE_AND_INSTALL_PACKAGES: never - name: Run tests run: | diff --git a/NAMESPACE b/NAMESPACE index a9b177fe..a05bbbe3 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -4,11 +4,15 @@ export(DatabricksClient) export(accountAccessControlProxyGetAssignableRolesForResource) export(accountAccessControlProxyGetRuleSet) export(accountAccessControlProxyUpdateRuleSet) +export(add_dbfs_block) +export(add_instance_profile) export(alertsCreate) export(alertsDelete) export(alertsGet) export(alertsList) export(alertsUpdate) +export(all_cluster_library_statuses) +export(approve_model_transition_request) export(appsCreate) export(appsDeleteApp) export(appsGetApp) @@ -17,18 +21,29 @@ export(appsGetApps) export(appsGetEvents) export(artifactAllowlistsGet) export(artifactAllowlistsUpdate) +export(assign_metastore) export(automaticClusterUpdateGet) export(automaticClusterUpdateUpdate) +export(build_serving_endpoint_logs) +export(cancel_command_execution) +export(cancel_command_execution_and_wait) +export(cancel_job_all_runs) +export(cancel_job_run) +export(cancel_job_run_and_wait) +export(cancel_lakehouse_monitor_refresh) +export(cancel_statement_execution) export(catalogsCreate) export(catalogsDelete) export(catalogsGet) export(catalogsList) export(catalogsUpdate) +export(change_cluster_owner) export(cleanRoomsCreate) export(cleanRoomsDelete) export(cleanRoomsGet) export(cleanRoomsList) export(cleanRoomsUpdate) +export(close_dbfs) export(clusterPoliciesCreate) export(clusterPoliciesDelete) export(clusterPoliciesEdit) @@ -38,13 +53,11 @@ export(clusterPoliciesGetPermissions) export(clusterPoliciesList) export(clusterPoliciesSetPermissions) export(clusterPoliciesUpdatePermissions) +export(cluster_library_status) export(clustersChangeOwner) export(clustersCreate) -export(clustersCreateAndWait) export(clustersDelete) -export(clustersDeleteAndWait) export(clustersEdit) -export(clustersEditAndWait) export(clustersEvents) export(clustersGet) export(clustersGetPermissionLevels) @@ -55,33 +68,87 @@ export(clustersListZones) export(clustersPermanentDelete) export(clustersPin) export(clustersResize) -export(clustersResizeAndWait) export(clustersRestart) -export(clustersRestartAndWait) export(clustersSetPermissions) export(clustersSparkVersions) export(clustersStart) -export(clustersStartAndWait) export(clustersUnpin) export(clustersUpdatePermissions) export(commandExecutionCancel) -export(commandExecutionCancelAndWait) export(commandExecutionCommandStatus) export(commandExecutionContextStatus) export(commandExecutionCreate) -export(commandExecutionCreateAndWait) export(commandExecutionDestroy) export(commandExecutionExecute) -export(commandExecutionExecuteAndWait) +export(command_execution_status) export(connectionsCreate) export(connectionsDelete) export(connectionsGet) export(connectionsList) export(connectionsUpdate) +export(context_command_execution_status) +export(create_alert) +export(create_app) +export(create_catalog) +export(create_clean_room) +export(create_cluster) +export(create_cluster_and_wait) +export(create_cluster_policy) +export(create_command_execution) +export(create_command_execution_and_wait) +export(create_connection) +export(create_dashboard) +export(create_dashboard_widget) +export(create_dbfs) +export(create_experiment) +export(create_experiment_run) +export(create_external_location) +export(create_file_directory) +export(create_function) +export(create_git_credential) +export(create_global_init_script) +export(create_group) +export(create_instance_pool) +export(create_ip_access_list) +export(create_job) +export(create_lakehouse_monitor) +export(create_lakeview) +export(create_metastore) +export(create_model) +export(create_model_comment) +export(create_model_transition_request) +export(create_model_version) +export(create_model_webhook) +export(create_obo_token) +export(create_online_table) +export(create_pipeline) +export(create_provider) +export(create_query) +export(create_query_visualization) +export(create_recipient) +export(create_registered_model) +export(create_repo) +export(create_schema) +export(create_secret_scope) +export(create_service_principal) +export(create_serving_endpoint) +export(create_serving_endpoint_and_wait) +export(create_share) +export(create_storage_credential) +export(create_table_constraint) +export(create_token) +export(create_user) +export(create_vector_search_endpoint) +export(create_vector_search_endpoint_and_wait) +export(create_vector_search_index) +export(create_volume) +export(create_warehouse) +export(create_warehouse_and_wait) export(credentialsManagerExchangeToken) export(cspEnablementGet) export(cspEnablementUpdate) export(currentUserMe) +export(current_metastore) export(dashboardWidgetsCreate) export(dashboardWidgetsDelete) export(dashboardWidgetsUpdate) @@ -108,8 +175,90 @@ export(dbsqlPermissionsTransferOwnership) export(defaultNamespaceDelete) export(defaultNamespaceGet) export(defaultNamespaceUpdate) +export(delete_alert) +export(delete_app) +export(delete_catalog) +export(delete_clean_room) +export(delete_cluster) +export(delete_cluster_and_wait) +export(delete_cluster_policy) +export(delete_connection) +export(delete_dashboard) +export(delete_dashboard_widget) +export(delete_dbfs) +export(delete_default_namespace) +export(delete_experiment) +export(delete_experiment_run) +export(delete_experiment_runs) +export(delete_experiment_tag) +export(delete_external_location) +export(delete_file) +export(delete_file_directory) +export(delete_function) +export(delete_git_credential) +export(delete_global_init_script) +export(delete_group) +export(delete_instance_pool) +export(delete_ip_access_list) +export(delete_job) +export(delete_job_run) +export(delete_lakehouse_monitor) +export(delete_metastore) +export(delete_model) +export(delete_model_comment) +export(delete_model_tag) +export(delete_model_transition_request) +export(delete_model_version) +export(delete_model_version_tag) +export(delete_model_webhook) +export(delete_notebook) +export(delete_online_table) +export(delete_pipeline) +export(delete_provider) +export(delete_query) +export(delete_query_visualization) +export(delete_recipient) +export(delete_registered_model) +export(delete_registered_model_alias) +export(delete_repo) +export(delete_restrict_workspace_admin) +export(delete_schema) +export(delete_secret) +export(delete_secret_acl) +export(delete_secret_scope) +export(delete_service_principal) +export(delete_serving_endpoint) +export(delete_share) +export(delete_storage_credential) +export(delete_table) +export(delete_table_constraint) +export(delete_token) +export(delete_token_management) +export(delete_user) +export(delete_vector_search_endpoint) +export(delete_vector_search_index) +export(delete_vector_search_index_data) +export(delete_volume) +export(delete_warehouse) +export(destroy_command_execution) +export(disable_system_schema) +export(download_file) +export(edit_cluster) +export(edit_cluster_and_wait) +export(edit_cluster_policy) +export(edit_instance_pool) +export(edit_instance_profile) +export(edit_warehouse) +export(edit_warehouse_and_wait) +export(enable_system_schema) export(esmEnablementGet) export(esmEnablementUpdate) +export(events_cluster) +export(exchange_credentials_manager_token) +export(execute_command) +export(execute_command_and_wait) +export(execute_statement) +export(exists_table) export(experimentsCreateExperiment) export(experimentsCreateRun) export(experimentsDeleteExperiment) @@ -140,6 +289,9 @@ export(experimentsSetTag) export(experimentsUpdateExperiment) export(experimentsUpdatePermissions) export(experimentsUpdateRun) +export(export_job_run) +export(export_notebook) +export(export_serving_endpoint_metrics) export(externalLocationsCreate) export(externalLocationsDelete) export(externalLocationsGet) @@ -158,6 +310,114 @@ export(functionsDelete) export(functionsGet) export(functionsList) export(functionsUpdate) +export(get_account_access_control_proxy_assignable_roles_for_resource) +export(get_account_access_control_proxy_rule_set) +export(get_alert) +export(get_app) +export(get_app_apps) +export(get_app_deployment_status) +export(get_app_events) +export(get_artifact_allowlist) +export(get_automatic_cluster_update) +export(get_catalog) +export(get_clean_room) +export(get_cluster) +export(get_cluster_permission_levels) +export(get_cluster_permissions) +export(get_cluster_policy) +export(get_cluster_policy_family) +export(get_cluster_policy_permission_levels) +export(get_cluster_policy_permissions) +export(get_connection) +export(get_csp_enablement) +export(get_dashboard) +export(get_dbfs_status) +export(get_dbsql_permission) +export(get_default_namespace) +export(get_esm_enablement) +export(get_experiment) +export(get_experiment_by_name) +export(get_experiment_history) +export(get_experiment_permission_levels) +export(get_experiment_permissions) +export(get_experiment_run) +export(get_external_location) +export(get_file_directory_metadata) +export(get_file_metadata) +export(get_function) +export(get_git_credential) +export(get_global_init_script) +export(get_grant) +export(get_grant_effective) +export(get_group) +export(get_instance_pool) +export(get_instance_pool_permission_levels) +export(get_instance_pool_permissions) +export(get_ip_access_list) +export(get_job) +export(get_job_permission_levels) +export(get_job_permissions) +export(get_job_run) +export(get_job_run_and_wait) +export(get_job_run_output) +export(get_lakehouse_monitor) +export(get_lakehouse_monitor_refresh) +export(get_lakeview) +export(get_lakeview_published) +export(get_metastore) +export(get_model) +export(get_model_latest_versions) +export(get_model_permission_levels) +export(get_model_permissions) +export(get_model_version) +export(get_model_version_by_alias) +export(get_model_version_download_uri) +export(get_notebook_permission_levels) +export(get_notebook_permissions) +export(get_notebook_status) +export(get_online_table) +export(get_permission) +export(get_permission_levels) +export(get_pipeline) +export(get_pipeline_permission_levels) +export(get_pipeline_permissions) +export(get_pipeline_update) +export(get_provider) +export(get_query) +export(get_recipient) +export(get_recipient_activation_url_info) +export(get_registered_model) +export(get_repo) +export(get_repo_permission_levels) +export(get_repo_permissions) +export(get_restrict_workspace_admin) +export(get_schema) +export(get_secret) +export(get_secret_acl) +export(get_service_principal) +export(get_serving_endpoint) +export(get_serving_endpoint_permission_levels) +export(get_serving_endpoint_permissions) +export(get_share) +export(get_statement_execution) +export(get_statement_execution_result_chunk_n) +export(get_storage_credential) +export(get_table) +export(get_token_management) +export(get_token_management_permission_levels) +export(get_token_management_permissions) +export(get_user) +export(get_user_permission_levels) +export(get_user_permissions) +export(get_vector_search_endpoint) +export(get_vector_search_index) +export(get_warehouse) +export(get_warehouse_permission_levels) +export(get_warehouse_permissions) +export(get_warehouse_workspace_config) +export(get_workspace_binding) +export(get_workspace_binding_bindings) +export(get_workspace_conf_status) export(gitCredentialsCreate) export(gitCredentialsDelete) export(gitCredentialsGet) @@ -177,6 +437,8 @@ export(groupsGet) export(groupsList) export(groupsPatch) export(groupsUpdate) +export(import_notebook) +export(install_cluster_library) export(instancePoolsCreate) export(instancePoolsDelete) export(instancePoolsEdit) @@ -198,7 +460,6 @@ export(ipAccessListsReplace) export(ipAccessListsUpdate) export(jobsCancelAllRuns) export(jobsCancelRun) -export(jobsCancelRunAndWait) export(jobsCreate) export(jobsDelete) export(jobsDeleteRun) @@ -207,18 +468,14 @@ export(jobsGet) export(jobsGetPermissionLevels) export(jobsGetPermissions) export(jobsGetRun) -export(jobsGetRunAndWait) export(jobsGetRunOutput) export(jobsList) export(jobsListRuns) export(jobsRepairRun) -export(jobsRepairRunAndWait) export(jobsReset) export(jobsRunNow) -export(jobsRunNowAndWait) export(jobsSetPermissions) export(jobsSubmit) -export(jobsSubmitAndWait) export(jobsUpdate) export(jobsUpdatePermissions) export(lakehouseMonitorsCancelRefresh) @@ -239,6 +496,73 @@ export(librariesAllClusterStatuses) export(librariesClusterStatus) export(librariesInstall) export(librariesUninstall) +export(list_alerts) +export(list_catalogs) +export(list_clean_rooms) +export(list_cluster_node_types) +export(list_cluster_policies) +export(list_cluster_policy_families) +export(list_cluster_zones) +export(list_clusters) +export(list_connections) +export(list_dashboards) +export(list_data_sources) +export(list_dbfs) +export(list_experiment_artifacts) +export(list_experiment_experiments) +export(list_external_locations) +export(list_file_directory_contents) +export(list_functions) +export(list_git_credentials) +export(list_global_init_scripts) +export(list_groups) +export(list_instance_pools) +export(list_instance_profiles) +export(list_ip_access_lists) +export(list_job_runs) +export(list_jobs) +export(list_lakehouse_monitor_refreshes) +export(list_metastores) +export(list_model_models) +export(list_model_transition_requests) +export(list_model_versions) +export(list_model_webhooks) +export(list_notebooks) +export(list_pipeline_events) +export(list_pipeline_pipelines) +export(list_pipeline_updates) +export(list_provider_shares) +export(list_providers) +export(list_queries) +export(list_query_history) +export(list_recipients) +export(list_registered_models) +export(list_repos) +export(list_schemas) +export(list_secret_acls) +export(list_secret_scopes) +export(list_secret_secrets) +export(list_service_principals) +export(list_serving_endpoints) +export(list_shares) +export(list_storage_credentials) +export(list_system_schemas) +export(list_table_summaries) +export(list_tables) +export(list_token_management) +export(list_tokens) +export(list_users) +export(list_vector_search_endpoint_endpoints) +export(list_vector_search_index_indexes) +export(list_volumes) +export(list_warehouses) +export(log_experiment_batch) +export(log_experiment_inputs) +export(log_experiment_metric) +export(log_experiment_model) +export(log_experiment_param) +export(logs_serving_endpoint) +export(me) export(metastoresAssign) export(metastoresCreate) export(metastoresCurrent) @@ -249,6 +573,9 @@ export(metastoresSummary) export(metastoresUnassign) export(metastoresUpdate) export(metastoresUpdateAssignment) +export(migrate_permission_migration_permissions) +export(mkdirs_dbfs) +export(mkdirs_notebook) export(modelRegistryApproveTransitionRequest) export(modelRegistryCreateComment) export(modelRegistryCreateModel) @@ -290,14 +617,21 @@ export(modelVersionsGet) export(modelVersionsGetByAlias) export(modelVersionsList) export(modelVersionsUpdate) +export(move_dbfs) export(onlineTablesCreate) export(onlineTablesDelete) export(onlineTablesGet) +export(patch_group) +export(patch_service_principal) +export(patch_serving_endpoint) +export(patch_user) +export(permanent_cluster_delete) export(permissionMigrationMigratePermissions) export(permissionsGet) export(permissionsGetPermissionLevels) export(permissionsSet) export(permissionsUpdate) +export(pin_cluster) export(pipelinesCreate) export(pipelinesDelete) export(pipelinesGet) @@ -310,7 +644,6 @@ export(pipelinesListUpdates) export(pipelinesSetPermissions) export(pipelinesStartUpdate) export(pipelinesStop) -export(pipelinesStopAndWait) export(pipelinesUpdate) export(pipelinesUpdatePermissions) export(policyFamiliesGet) @@ -321,6 +654,11 @@ export(providersGet) export(providersList) export(providersListShares) export(providersUpdate) +export(publish_lakeview) +export(put_dbfs) +export(put_secret) +export(put_secret_acl) +export(put_serving_endpoint) export(queriesCreate) export(queriesDelete) export(queriesGet) @@ -331,6 +669,10 @@ export(queryHistoryList) export(queryVisualizationsCreate) export(queryVisualizationsDelete) export(queryVisualizationsUpdate) +export(query_serving_endpoint) +export(query_vector_search_index) +export(read_dbfs) +export(read_volume) export(recipientActivationGetActivationUrlInfo) export(recipientActivationRetrieveToken) export(recipientsCreate) @@ -347,6 +689,12 @@ export(registeredModelsGet) export(registeredModelsList) export(registeredModelsSetAlias) export(registeredModelsUpdate) +export(reject_model_transition_request) +export(remove_instance_profile) +export(rename_model) +export(repair_job_run) +export(repair_job_run_and_wait) +export(replace_ip_access_list) export(reposCreate) export(reposDelete) export(reposGet) @@ -356,14 +704,33 @@ export(reposList) export(reposSetPermissions) export(reposUpdate) export(reposUpdatePermissions) +export(reset_job) +export(resize_cluster) +export(resize_cluster_and_wait) +export(restart_cluster) +export(restart_cluster_and_wait) +export(restore_dashboard) +export(restore_experiment) +export(restore_experiment_run) +export(restore_experiment_runs) +export(restore_query) export(restrictWorkspaceAdminsDelete) export(restrictWorkspaceAdminsGet) export(restrictWorkspaceAdminsUpdate) +export(retrieve_recipient_activation_token) +export(rotate_recipient_token) +export(run_job_now) +export(run_job_now_and_wait) +export(run_lakehouse_monitor_refresh) export(schemasCreate) export(schemasDelete) export(schemasGet) export(schemasList) export(schemasUpdate) +export(search_experiment_experiments) +export(search_experiment_runs) +export(search_model_models) +export(search_model_versions) export(secretsCreateScope) export(secretsDeleteAcl) export(secretsDeleteScope) @@ -383,7 +750,6 @@ export(servicePrincipalsPatch) export(servicePrincipalsUpdate) export(servingEndpointsBuildLogs) export(servingEndpointsCreate) -export(servingEndpointsCreateAndWait) export(servingEndpointsDelete) export(servingEndpointsExportMetrics) export(servingEndpointsGet) @@ -396,8 +762,30 @@ export(servingEndpointsPut) export(servingEndpointsQuery) export(servingEndpointsSetPermissions) export(servingEndpointsUpdateConfig) -export(servingEndpointsUpdateConfigAndWait) export(servingEndpointsUpdatePermissions) +export(set_cluster_permissions) +export(set_cluster_policy_permissions) +export(set_dbsql_permission) +export(set_experiment_permissions) +export(set_experiment_tag) +export(set_instance_pool_permissions) +export(set_job_permissions) +export(set_model_permissions) +export(set_model_tag) +export(set_model_version_tag) +export(set_notebook_permissions) +export(set_permission) +export(set_pipeline_permissions) +export(set_registered_model_alias) +export(set_repo_permissions) +export(set_serving_endpoint_permissions) +export(set_token_management_permissions) +export(set_user_permissions) +export(set_warehouse_permissions) +export(set_warehouse_workspace_config) +export(set_workspace_conf_status) +export(share_permissions) +export(share_recipient_permissions) export(sharesCreate) export(sharesDelete) export(sharesGet) @@ -405,16 +793,30 @@ export(sharesList) export(sharesSharePermissions) export(sharesUpdate) export(sharesUpdatePermissions) +export(spark_cluster_versions) +export(start_cluster) +export(start_cluster_and_wait) +export(start_pipeline_update) +export(start_warehouse) +export(start_warehouse_and_wait) export(statementExecutionCancelExecution) export(statementExecutionExecuteStatement) export(statementExecutionGetStatement) export(statementExecutionGetStatementResultChunkN) +export(stop_pipeline) +export(stop_pipeline_and_wait) +export(stop_warehouse) +export(stop_warehouse_and_wait) export(storageCredentialsCreate) export(storageCredentialsDelete) export(storageCredentialsGet) export(storageCredentialsList) export(storageCredentialsUpdate) export(storageCredentialsValidate) +export(submit_job) +export(submit_job_and_wait) +export(summary_metastore) +export(sync_vector_search_index) export(systemSchemasDisable) export(systemSchemasEnable) export(systemSchemasList) @@ -426,6 +828,7 @@ export(tablesGet) export(tablesList) export(tablesListSummaries) export(tablesUpdate) +export(test_model_registry_webhook) export(tokenManagementCreateOboToken) export(tokenManagementDelete) export(tokenManagementGet) @@ -437,6 +840,78 @@ export(tokenManagementUpdatePermissions) export(tokensCreate) export(tokensDelete) export(tokensList) +export(transfer_dbsql_permission_ownership) +export(transition_model_stage) +export(trash_lakeview) +export(unassign_metastore) +export(uninstall_cluster_library) +export(unpin_cluster) +export(update_account_access_control_proxy_rule_set) +export(update_alert) +export(update_artifact_allowlist) +export(update_automatic_cluster) +export(update_catalog) +export(update_clean_room) +export(update_cluster_permissions) +export(update_cluster_policy_permissions) +export(update_connection) +export(update_csp_enablement) +export(update_dashboard) +export(update_dashboard_widget) +export(update_default_namespace) +export(update_esm_enablement) +export(update_experiment) +export(update_experiment_permissions) +export(update_experiment_run) +export(update_external_location) +export(update_function) +export(update_git_credential) +export(update_global_init_script) +export(update_grant) +export(update_group) +export(update_instance_pool_permissions) +export(update_ip_access_list) +export(update_job) +export(update_job_permissions) +export(update_lakehouse_monitor) +export(update_lakeview) +export(update_metastore) +export(update_metastore_assignment) +export(update_model) +export(update_model_comment) +export(update_model_permissions) +export(update_model_version) +export(update_model_webhook) +export(update_notebook_permissions) +export(update_permission) +export(update_pipeline) +export(update_pipeline_permissions) +export(update_provider) +export(update_query) +export(update_query_visualization) +export(update_recipient) +export(update_registered_model) +export(update_repo) +export(update_repo_permissions) +export(update_restrict_workspace_admin) +export(update_schema) +export(update_service_principal) +export(update_serving_endpoint_config) +export(update_serving_endpoint_config_and_wait) +export(update_serving_endpoint_permissions) +export(update_share) +export(update_share_permissions) +export(update_storage_credential) +export(update_table) +export(update_token_management_permissions) +export(update_user) +export(update_user_permissions) +export(update_volume) +export(update_warehouse_permissions) +export(update_workspace_binding) +export(update_workspace_binding_bindings) +export(upload_file) +export(upsert_vector_search_index_data) export(usersCreate) export(usersDelete) export(usersGet) @@ -447,8 +922,8 @@ export(usersPatch) export(usersSetPermissions) export(usersUpdate) export(usersUpdatePermissions) +export(validate_storage_credential) export(vectorSearchEndpointsCreateEndpoint) -export(vectorSearchEndpointsCreateEndpointAndWait) export(vectorSearchEndpointsDeleteEndpoint) export(vectorSearchEndpointsGetEndpoint) export(vectorSearchEndpointsListEndpoints) @@ -466,10 +941,8 @@ export(volumesList) export(volumesRead) export(volumesUpdate) export(warehousesCreate) -export(warehousesCreateAndWait) export(warehousesDelete) export(warehousesEdit) -export(warehousesEditAndWait) export(warehousesGet) export(warehousesGetPermissionLevels) export(warehousesGetPermissions) @@ -478,9 +951,7 @@ export(warehousesList) export(warehousesSetPermissions) export(warehousesSetWorkspaceWarehouseConfig) export(warehousesStart) -export(warehousesStartAndWait) export(warehousesStop) -export(warehousesStopAndWait) export(warehousesUpdatePermissions) export(workspaceBindingsGet) export(workspaceBindingsGetBindings) diff --git a/R/account_access_control_proxy.R b/R/account_access_control_proxy.R index 8bff196c..1c370bd8 100755 --- a/R/account_access_control_proxy.R +++ b/R/account_access_control_proxy.R @@ -12,13 +12,19 @@ NULL #' #' @param resource Required. The resource name for which assignable roles will be listed. #' -#' @rdname accountAccessControlProxyGetAssignableRolesForResource +#' @rdname get_account_access_control_proxy_assignable_roles_for_resource +#' @alias accountAccessControlProxyGetAssignableRolesForResource #' @export -accountAccessControlProxyGetAssignableRolesForResource <- function(client, resource) { +get_account_access_control_proxy_assignable_roles_for_resource <- function(client, + resource) { query <- list(resource = resource) client$do("GET", "/api/2.0/preview/accounts/access-control/assignable-roles", query = query) } + +#' @rdname get_account_access_control_proxy_assignable_roles_for_resource +#' @export +accountAccessControlProxyGetAssignableRolesForResource <- get_account_access_control_proxy_assignable_roles_for_resource #' Get a rule set. #' #' Get a rule set by its name. A rule set is always attached to a resource and @@ -29,12 +35,17 @@ accountAccessControlProxyGetAssignableRolesForResource <- function(client, resou #' @param etag Required. Etag used for versioning. #' @param name Required. The ruleset name associated with the request. #' -#' @rdname accountAccessControlProxyGetRuleSet +#' @rdname get_account_access_control_proxy_rule_set +#' @alias accountAccessControlProxyGetRuleSet #' @export -accountAccessControlProxyGetRuleSet <- function(client, name, etag) { +get_account_access_control_proxy_rule_set <- function(client, name, etag) { query <- list(etag = etag, name = name) client$do("GET", "/api/2.0/preview/accounts/access-control/rule-sets", query = query) } + +#' @rdname get_account_access_control_proxy_rule_set +#' @export +accountAccessControlProxyGetRuleSet <- get_account_access_control_proxy_rule_set #' Update a rule set. #' #' Replace the rules of a rule set. First, use a GET rule set request to read @@ -45,12 +56,17 @@ accountAccessControlProxyGetRuleSet <- function(client, name, etag) { #' @param name Required. Name of the rule set. #' @param rule_set Required. This field has no description yet. #' -#' @rdname accountAccessControlProxyUpdateRuleSet +#' @rdname update_account_access_control_proxy_rule_set +#' @alias accountAccessControlProxyUpdateRuleSet #' @export -accountAccessControlProxyUpdateRuleSet <- function(client, name, rule_set) { +update_account_access_control_proxy_rule_set <- function(client, name, rule_set) { body <- list(name = name, rule_set = rule_set) client$do("PUT", "/api/2.0/preview/accounts/access-control/rule-sets", body = body) } +#' @rdname update_account_access_control_proxy_rule_set +#' @export +accountAccessControlProxyUpdateRuleSet <- update_account_access_control_proxy_rule_set + diff --git a/R/alerts.R b/R/alerts.R index ce5723ab..e5757f3e 100755 --- a/R/alerts.R +++ b/R/alerts.R @@ -16,13 +16,18 @@ NULL #' @param query_id Required. Query ID. #' @param rearm Number of seconds after being triggered before the alert rearms itself and can be triggered again. #' -#' @rdname alertsCreate +#' @rdname create_alert +#' @alias alertsCreate #' @export -alertsCreate <- function(client, name, options, query_id, parent = NULL, rearm = NULL) { +create_alert <- function(client, name, options, query_id, parent = NULL, rearm = NULL) { body <- list(name = name, options = options, parent = parent, query_id = query_id, rearm = rearm) client$do("POST", "/api/2.0/preview/sql/alerts", body = body) } + +#' @rdname create_alert +#' @export +alertsCreate <- create_alert #' Delete an alert. #' #' Deletes an alert. Deleted alerts are no longer accessible and cannot be @@ -32,12 +37,17 @@ alertsCreate <- function(client, name, options, query_id, parent = NULL, rearm = #' #' @param alert_id Required. This field has no description yet. #' -#' @rdname alertsDelete +#' @rdname delete_alert +#' @alias alertsDelete #' @export -alertsDelete <- function(client, alert_id) { +delete_alert <- function(client, alert_id) { client$do("DELETE", paste("/api/2.0/preview/sql/alerts/", alert_id, sep = "")) } + +#' @rdname delete_alert +#' @export +alertsDelete <- delete_alert #' Get an alert. #' #' Gets an alert. @@ -45,22 +55,32 @@ alertsDelete <- function(client, alert_id) { #' #' @param alert_id Required. This field has no description yet. #' -#' @rdname alertsGet +#' @rdname get_alert +#' @alias alertsGet #' @export -alertsGet <- function(client, alert_id) { +get_alert <- function(client, alert_id) { client$do("GET", paste("/api/2.0/preview/sql/alerts/", alert_id, sep = "")) } + +#' @rdname get_alert +#' @export +alertsGet <- get_alert #' Get alerts. #' #' Gets a list of alerts. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname alertsList +#' @rdname list_alerts +#' @alias alertsList #' @export -alertsList <- function(client) { +list_alerts <- function(client) { client$do("GET", "/api/2.0/preview/sql/alerts") } + +#' @rdname list_alerts +#' @export +alertsList <- list_alerts #' Update an alert. #' #' Updates an alert. @@ -72,13 +92,18 @@ alertsList <- function(client) { #' @param query_id Required. Query ID. #' @param rearm Number of seconds after being triggered before the alert rearms itself and can be triggered again. #' -#' @rdname alertsUpdate +#' @rdname update_alert +#' @alias alertsUpdate #' @export -alertsUpdate <- function(client, alert_id, name, options, query_id, rearm = NULL) { +update_alert <- function(client, alert_id, name, options, query_id, rearm = NULL) { body <- list(name = name, options = options, query_id = query_id, rearm = rearm) client$do("PUT", paste("/api/2.0/preview/sql/alerts/", alert_id, sep = ""), body = body) } +#' @rdname update_alert +#' @export +alertsUpdate <- update_alert + diff --git a/R/apps.R b/R/apps.R index cfa8671c..966e72aa 100755 --- a/R/apps.R +++ b/R/apps.R @@ -11,12 +11,17 @@ NULL #' @param manifest Required. Manifest that specifies the application requirements. #' @param resources Information passed at app deployment time to fulfill app dependencies. #' -#' @rdname appsCreate +#' @rdname create_app +#' @alias appsCreate #' @export -appsCreate <- function(client, manifest, resources = NULL) { +create_app <- function(client, manifest, resources = NULL) { body <- list(manifest = manifest, resources = resources) client$do("POST", "/api/2.0/preview/apps/deployments", body = body) } + +#' @rdname create_app +#' @export +appsCreate <- create_app #' Delete an application. #' #' Delete an application definition @@ -24,12 +29,17 @@ appsCreate <- function(client, manifest, resources = NULL) { #' #' @param name Required. The name of an application. #' -#' @rdname appsDeleteApp +#' @rdname delete_app +#' @alias appsDeleteApp #' @export -appsDeleteApp <- function(client, name) { +delete_app <- function(client, name) { client$do("DELETE", paste("/api/2.0/preview/apps/instances/", name, sep = "")) } + +#' @rdname delete_app +#' @export +appsDeleteApp <- delete_app #' Get definition for an application. #' #' Get an application definition @@ -37,12 +47,17 @@ appsDeleteApp <- function(client, name) { #' #' @param name Required. The name of an application. #' -#' @rdname appsGetApp +#' @rdname get_app +#' @alias appsGetApp #' @export -appsGetApp <- function(client, name) { +get_app <- function(client, name) { client$do("GET", paste("/api/2.0/preview/apps/instances/", name, sep = "")) } + +#' @rdname get_app +#' @export +appsGetApp <- get_app #' Get deployment status for an application. #' #' Get deployment status for an application @@ -51,23 +66,33 @@ appsGetApp <- function(client, name) { #' @param deployment_id Required. The deployment id for an application. #' @param include_app_log Boolean flag to include application logs. #' -#' @rdname appsGetAppDeploymentStatus +#' @rdname get_app_deployment_status +#' @alias appsGetAppDeploymentStatus #' @export -appsGetAppDeploymentStatus <- function(client, deployment_id, include_app_log = NULL) { +get_app_deployment_status <- function(client, deployment_id, include_app_log = NULL) { query <- list(include_app_log = include_app_log) client$do("GET", paste("/api/2.0/preview/apps/deployments/", deployment_id, sep = ""), query = query) } + +#' @rdname get_app_deployment_status +#' @export +appsGetAppDeploymentStatus <- get_app_deployment_status #' List all applications. #' #' List all available applications #' @param client Required. Instance of DatabricksClient() #' -#' @rdname appsGetApps +#' @rdname get_app_apps +#' @alias appsGetApps #' @export -appsGetApps <- function(client) { +get_app_apps <- function(client) { client$do("GET", "/api/2.0/preview/apps/instances") } + +#' @rdname get_app_apps +#' @export +appsGetApps <- get_app_apps #' Get deployment events for an application. #' #' Get deployment events for an application @@ -75,13 +100,18 @@ appsGetApps <- function(client) { #' #' @param name Required. The name of an application. #' -#' @rdname appsGetEvents +#' @rdname get_app_events +#' @alias appsGetEvents #' @export -appsGetEvents <- function(client, name) { +get_app_events <- function(client, name) { client$do("GET", paste("/api/2.0/preview/apps/", name, "/events", , sep = "")) } +#' @rdname get_app_events +#' @export +appsGetEvents <- get_app_events + diff --git a/R/artifact_allowlists.R b/R/artifact_allowlists.R index 401ffde1..5d38dd5c 100755 --- a/R/artifact_allowlists.R +++ b/R/artifact_allowlists.R @@ -11,13 +11,18 @@ NULL #' #' @param artifact_type Required. The artifact type of the allowlist. #' -#' @rdname artifactAllowlistsGet +#' @rdname get_artifact_allowlist +#' @alias artifactAllowlistsGet #' @export -artifactAllowlistsGet <- function(client, artifact_type) { +get_artifact_allowlist <- function(client, artifact_type) { client$do("GET", paste("/api/2.1/unity-catalog/artifact-allowlists/", artifact_type, sep = "")) } + +#' @rdname get_artifact_allowlist +#' @export +artifactAllowlistsGet <- get_artifact_allowlist #' Set an artifact allowlist. #' #' Set the artifact allowlist of a certain artifact type. The whole artifact @@ -28,12 +33,17 @@ artifactAllowlistsGet <- function(client, artifact_type) { #' @param artifact_matchers Required. A list of allowed artifact match patterns. #' @param artifact_type Required. The artifact type of the allowlist. #' -#' @rdname artifactAllowlistsUpdate +#' @rdname update_artifact_allowlist +#' @alias artifactAllowlistsUpdate #' @export -artifactAllowlistsUpdate <- function(client, artifact_type, artifact_matchers) { +update_artifact_allowlist <- function(client, artifact_type, artifact_matchers) { body <- list(artifact_matchers = artifact_matchers) client$do("PUT", paste("/api/2.1/unity-catalog/artifact-allowlists/", artifact_type, sep = ""), body = body) } +#' @rdname update_artifact_allowlist +#' @export +artifactAllowlistsUpdate <- update_artifact_allowlist + diff --git a/R/automatic_cluster_update.R b/R/automatic_cluster_update.R index d0df1ae2..26f59028 100755 --- a/R/automatic_cluster_update.R +++ b/R/automatic_cluster_update.R @@ -10,13 +10,18 @@ NULL #' #' @param etag etag used for versioning. #' -#' @rdname automaticClusterUpdateGet +#' @rdname get_automatic_cluster_update +#' @alias automaticClusterUpdateGet #' @export -automaticClusterUpdateGet <- function(client, etag = NULL) { +get_automatic_cluster_update <- function(client, etag = NULL) { query <- list(etag = etag) client$do("GET", "/api/2.0/settings/types/automatic_cluster_update/names/default", query = query) } + +#' @rdname get_automatic_cluster_update +#' @export +automaticClusterUpdateGet <- get_automatic_cluster_update #' Update the automatic cluster update setting. #' #' Updates the automatic cluster update setting for the workspace. A fresh etag @@ -30,12 +35,17 @@ automaticClusterUpdateGet <- function(client, etag = NULL) { #' @param field_mask Required. Field mask is required to be passed into the PATCH request. #' @param setting Required. This field has no description yet. #' -#' @rdname automaticClusterUpdateUpdate +#' @rdname update_automatic_cluster +#' @alias automaticClusterUpdateUpdate #' @export -automaticClusterUpdateUpdate <- function(client, allow_missing, setting, field_mask) { +update_automatic_cluster <- function(client, allow_missing, setting, field_mask) { body <- list(allow_missing = allow_missing, field_mask = field_mask, setting = setting) client$do("PATCH", "/api/2.0/settings/types/automatic_cluster_update/names/default", body = body) } +#' @rdname update_automatic_cluster +#' @export +automaticClusterUpdateUpdate <- update_automatic_cluster + diff --git a/R/catalogs.R b/R/catalogs.R index c0fe0f3b..4ea3573a 100755 --- a/R/catalogs.R +++ b/R/catalogs.R @@ -18,15 +18,20 @@ NULL #' @param share_name The name of the share under the share provider. #' @param storage_root Storage root URL for managed tables within catalog. #' -#' @rdname catalogsCreate +#' @rdname create_catalog +#' @alias catalogsCreate #' @export -catalogsCreate <- function(client, name, comment = NULL, connection_name = NULL, +create_catalog <- function(client, name, comment = NULL, connection_name = NULL, options = NULL, properties = NULL, provider_name = NULL, share_name = NULL, storage_root = NULL) { body <- list(comment = comment, connection_name = connection_name, name = name, options = options, properties = properties, provider_name = provider_name, share_name = share_name, storage_root = storage_root) client$do("POST", "/api/2.1/unity-catalog/catalogs", body = body) } + +#' @rdname create_catalog +#' @export +catalogsCreate <- create_catalog #' Delete a catalog. #' #' Deletes the catalog that matches the supplied name. The caller must be a @@ -36,13 +41,18 @@ catalogsCreate <- function(client, name, comment = NULL, connection_name = NULL, #' @param force Force deletion even if the catalog is not empty. #' @param name Required. The name of the catalog. #' -#' @rdname catalogsDelete +#' @rdname delete_catalog +#' @alias catalogsDelete #' @export -catalogsDelete <- function(client, name, force = NULL) { +delete_catalog <- function(client, name, force = NULL) { query <- list(force = force) client$do("DELETE", paste("/api/2.1/unity-catalog/catalogs/", name, sep = ""), query = query) } + +#' @rdname delete_catalog +#' @export +catalogsDelete <- delete_catalog #' Get a catalog. #' #' Gets the specified catalog in a metastore. The caller must be a metastore @@ -53,12 +63,17 @@ catalogsDelete <- function(client, name, force = NULL) { #' @param include_browse Whether to include catalogs in the response for which the principal can only access selective metadata for. #' @param name Required. The name of the catalog. #' -#' @rdname catalogsGet +#' @rdname get_catalog +#' @alias catalogsGet #' @export -catalogsGet <- function(client, name, include_browse = NULL) { +get_catalog <- function(client, name, include_browse = NULL) { query <- list(include_browse = include_browse) client$do("GET", paste("/api/2.1/unity-catalog/catalogs/", name, sep = ""), query = query) } + +#' @rdname get_catalog +#' @export +catalogsGet <- get_catalog #' List catalogs. #' #' Gets an array of catalogs in the metastore. If the caller is the metastore @@ -72,15 +87,20 @@ catalogsGet <- function(client, name, include_browse = NULL) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname catalogsList +#' @rdname list_catalogs +#' @alias catalogsList #' @export -catalogsList <- function(client, include_browse = NULL) { +list_catalogs <- function(client, include_browse = NULL) { query <- list(include_browse = include_browse) json <- client$do("GET", "/api/2.1/unity-catalog/catalogs", query = query) return(json$catalogs) } + +#' @rdname list_catalogs +#' @export +catalogsList <- list_catalogs #' Update a catalog. #' #' Updates the catalog that matches the supplied name. The caller must be either @@ -96,9 +116,10 @@ catalogsList <- function(client, include_browse = NULL) { #' @param owner Username of current owner of catalog. #' @param properties A map of key-value properties attached to the securable. #' -#' @rdname catalogsUpdate +#' @rdname update_catalog +#' @alias catalogsUpdate #' @export -catalogsUpdate <- function(client, name, comment = NULL, enable_predictive_optimization = NULL, +update_catalog <- function(client, name, comment = NULL, enable_predictive_optimization = NULL, isolation_mode = NULL, new_name = NULL, owner = NULL, properties = NULL) { body <- list(comment = comment, enable_predictive_optimization = enable_predictive_optimization, isolation_mode = isolation_mode, new_name = new_name, owner = owner, properties = properties) @@ -106,6 +127,10 @@ catalogsUpdate <- function(client, name, comment = NULL, enable_predictive_optim body = body) } +#' @rdname update_catalog +#' @export +catalogsUpdate <- update_catalog + diff --git a/R/clean_rooms.R b/R/clean_rooms.R index 976343f3..f7e6d4ec 100755 --- a/R/clean_rooms.R +++ b/R/clean_rooms.R @@ -13,12 +13,17 @@ NULL #' @param name Required. Name of the clean room. #' @param remote_detailed_info Required. Central clean room details. #' -#' @rdname cleanRoomsCreate +#' @rdname create_clean_room +#' @alias cleanRoomsCreate #' @export -cleanRoomsCreate <- function(client, name, remote_detailed_info, comment = NULL) { +create_clean_room <- function(client, name, remote_detailed_info, comment = NULL) { body <- list(comment = comment, name = name, remote_detailed_info = remote_detailed_info) client$do("POST", "/api/2.1/unity-catalog/clean-rooms", body = body) } + +#' @rdname create_clean_room +#' @export +cleanRoomsCreate <- create_clean_room #' Delete a clean room. #' #' Deletes a data object clean room from the metastore. The caller must be an @@ -27,12 +32,17 @@ cleanRoomsCreate <- function(client, name, remote_detailed_info, comment = NULL) #' #' @param name Required. The name of the clean room. #' -#' @rdname cleanRoomsDelete +#' @rdname delete_clean_room +#' @alias cleanRoomsDelete #' @export -cleanRoomsDelete <- function(client, name) { +delete_clean_room <- function(client, name) { client$do("DELETE", paste("/api/2.1/unity-catalog/clean-rooms/", name, sep = "")) } + +#' @rdname delete_clean_room +#' @export +cleanRoomsDelete <- delete_clean_room #' Get a clean room. #' #' Gets a data object clean room from the metastore. The caller must be a @@ -42,13 +52,18 @@ cleanRoomsDelete <- function(client, name) { #' @param include_remote_details Whether to include remote details (central) on the clean room. #' @param name Required. The name of the clean room. #' -#' @rdname cleanRoomsGet +#' @rdname get_clean_room +#' @alias cleanRoomsGet #' @export -cleanRoomsGet <- function(client, name, include_remote_details = NULL) { +get_clean_room <- function(client, name, include_remote_details = NULL) { query <- list(include_remote_details = include_remote_details) client$do("GET", paste("/api/2.1/unity-catalog/clean-rooms/", name, sep = ""), query = query) } + +#' @rdname get_clean_room +#' @export +cleanRoomsGet <- get_clean_room #' List clean rooms. #' #' Gets an array of data object clean rooms from the metastore. The caller must @@ -61,9 +76,10 @@ cleanRoomsGet <- function(client, name, include_remote_details = NULL) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname cleanRoomsList +#' @rdname list_clean_rooms +#' @alias cleanRoomsList #' @export -cleanRoomsList <- function(client, max_results = NULL, page_token = NULL) { +list_clean_rooms <- function(client, max_results = NULL, page_token = NULL) { query <- list(max_results = max_results, page_token = page_token) results <- data.frame() @@ -82,6 +98,10 @@ cleanRoomsList <- function(client, max_results = NULL, page_token = NULL) { return(results) } + +#' @rdname list_clean_rooms +#' @export +cleanRoomsList <- list_clean_rooms #' Update a clean room. #' #' Updates the clean room with the changes and data objects in the request. The @@ -106,15 +126,20 @@ cleanRoomsList <- function(client, max_results = NULL, page_token = NULL) { #' @param name Required. The name of the clean room. #' @param owner Username of current owner of clean room. #' -#' @rdname cleanRoomsUpdate +#' @rdname update_clean_room +#' @alias cleanRoomsUpdate #' @export -cleanRoomsUpdate <- function(client, name, catalog_updates = NULL, comment = NULL, +update_clean_room <- function(client, name, catalog_updates = NULL, comment = NULL, owner = NULL) { body <- list(catalog_updates = catalog_updates, comment = comment, owner = owner) client$do("PATCH", paste("/api/2.1/unity-catalog/clean-rooms/", name, sep = ""), body = body) } +#' @rdname update_clean_room +#' @export +cleanRoomsUpdate <- update_clean_room + diff --git a/R/cluster_policies.R b/R/cluster_policies.R index 9f115daa..04d3f6c3 100755 --- a/R/cluster_policies.R +++ b/R/cluster_policies.R @@ -16,9 +16,10 @@ NULL #' @param policy_family_definition_overrides Policy definition JSON document expressed in [Databricks Policy Definition Language](https://docs.databricks.com/administration-guide/clusters/policy-definition.html). #' @param policy_family_id ID of the policy family. #' -#' @rdname clusterPoliciesCreate +#' @rdname create_cluster_policy +#' @alias clusterPoliciesCreate #' @export -clusterPoliciesCreate <- function(client, name, definition = NULL, description = NULL, +create_cluster_policy <- function(client, name, definition = NULL, description = NULL, libraries = NULL, max_clusters_per_user = NULL, policy_family_definition_overrides = NULL, policy_family_id = NULL) { body <- list(definition = definition, description = description, libraries = libraries, @@ -26,6 +27,10 @@ clusterPoliciesCreate <- function(client, name, definition = NULL, description = policy_family_id = policy_family_id) client$do("POST", "/api/2.0/policies/clusters/create", body = body) } + +#' @rdname create_cluster_policy +#' @export +clusterPoliciesCreate <- create_cluster_policy #' Delete a cluster policy. #' #' Delete a policy for a cluster. Clusters governed by this policy can still @@ -34,12 +39,17 @@ clusterPoliciesCreate <- function(client, name, definition = NULL, description = #' #' @param policy_id Required. The ID of the policy to delete. #' -#' @rdname clusterPoliciesDelete +#' @rdname delete_cluster_policy +#' @alias clusterPoliciesDelete #' @export -clusterPoliciesDelete <- function(client, policy_id) { +delete_cluster_policy <- function(client, policy_id) { body <- list(policy_id = policy_id) client$do("POST", "/api/2.0/policies/clusters/delete", body = body) } + +#' @rdname delete_cluster_policy +#' @export +clusterPoliciesDelete <- delete_cluster_policy #' Update a cluster policy. #' #' Update an existing policy for cluster. This operation may make some clusters @@ -55,9 +65,10 @@ clusterPoliciesDelete <- function(client, policy_id) { #' @param policy_family_id ID of the policy family. #' @param policy_id Required. The ID of the policy to update. #' -#' @rdname clusterPoliciesEdit +#' @rdname edit_cluster_policy +#' @alias clusterPoliciesEdit #' @export -clusterPoliciesEdit <- function(client, policy_id, name, definition = NULL, description = NULL, +edit_cluster_policy <- function(client, policy_id, name, definition = NULL, description = NULL, libraries = NULL, max_clusters_per_user = NULL, policy_family_definition_overrides = NULL, policy_family_id = NULL) { body <- list(definition = definition, description = description, libraries = libraries, @@ -65,6 +76,10 @@ clusterPoliciesEdit <- function(client, policy_id, name, definition = NULL, desc policy_family_id = policy_family_id, policy_id = policy_id) client$do("POST", "/api/2.0/policies/clusters/edit", body = body) } + +#' @rdname edit_cluster_policy +#' @export +clusterPoliciesEdit <- edit_cluster_policy #' Get a cluster policy. #' #' Get a cluster policy entity. Creation and editing is available to admins @@ -73,12 +88,17 @@ clusterPoliciesEdit <- function(client, policy_id, name, definition = NULL, desc #' #' @param policy_id Required. Canonical unique identifier for the cluster policy. #' -#' @rdname clusterPoliciesGet +#' @rdname get_cluster_policy +#' @alias clusterPoliciesGet #' @export -clusterPoliciesGet <- function(client, policy_id) { +get_cluster_policy <- function(client, policy_id) { query <- list(policy_id = policy_id) client$do("GET", "/api/2.0/policies/clusters/get", query = query) } + +#' @rdname get_cluster_policy +#' @export +clusterPoliciesGet <- get_cluster_policy #' Get cluster policy permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -86,13 +106,18 @@ clusterPoliciesGet <- function(client, policy_id) { #' #' @param cluster_policy_id Required. The cluster policy for which to get or manage permissions. #' -#' @rdname clusterPoliciesGetPermissionLevels +#' @rdname get_cluster_policy_permission_levels +#' @alias clusterPoliciesGetPermissionLevels #' @export -clusterPoliciesGetPermissionLevels <- function(client, cluster_policy_id) { +get_cluster_policy_permission_levels <- function(client, cluster_policy_id) { client$do("GET", paste("/api/2.0/permissions/cluster-policies/", cluster_policy_id, "/permissionLevels", , sep = "")) } + +#' @rdname get_cluster_policy_permission_levels +#' @export +clusterPoliciesGetPermissionLevels <- get_cluster_policy_permission_levels #' Get cluster policy permissions. #' #' Gets the permissions of a cluster policy. Cluster policies can inherit @@ -101,13 +126,18 @@ clusterPoliciesGetPermissionLevels <- function(client, cluster_policy_id) { #' #' @param cluster_policy_id Required. The cluster policy for which to get or manage permissions. #' -#' @rdname clusterPoliciesGetPermissions +#' @rdname get_cluster_policy_permissions +#' @alias clusterPoliciesGetPermissions #' @export -clusterPoliciesGetPermissions <- function(client, cluster_policy_id) { +get_cluster_policy_permissions <- function(client, cluster_policy_id) { client$do("GET", paste("/api/2.0/permissions/cluster-policies/", cluster_policy_id, sep = "")) } + +#' @rdname get_cluster_policy_permissions +#' @export +clusterPoliciesGetPermissions <- get_cluster_policy_permissions #' List cluster policies. #' #' Returns a list of policies accessible by the requesting user. @@ -118,15 +148,20 @@ clusterPoliciesGetPermissions <- function(client, cluster_policy_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname clusterPoliciesList +#' @rdname list_cluster_policies +#' @alias clusterPoliciesList #' @export -clusterPoliciesList <- function(client, sort_column = NULL, sort_order = NULL) { +list_cluster_policies <- function(client, sort_column = NULL, sort_order = NULL) { query <- list(sort_column = sort_column, sort_order = sort_order) json <- client$do("GET", "/api/2.0/policies/clusters/list", query = query) return(json$policies) } + +#' @rdname list_cluster_policies +#' @export +clusterPoliciesList <- list_cluster_policies #' Set cluster policy permissions. #' #' Sets permissions on a cluster policy. Cluster policies can inherit @@ -136,13 +171,18 @@ clusterPoliciesList <- function(client, sort_column = NULL, sort_order = NULL) { #' @param access_control_list This field has no description yet. #' @param cluster_policy_id Required. The cluster policy for which to get or manage permissions. #' -#' @rdname clusterPoliciesSetPermissions +#' @rdname set_cluster_policy_permissions +#' @alias clusterPoliciesSetPermissions #' @export -clusterPoliciesSetPermissions <- function(client, cluster_policy_id, access_control_list = NULL) { +set_cluster_policy_permissions <- function(client, cluster_policy_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", paste("/api/2.0/permissions/cluster-policies/", cluster_policy_id, sep = ""), body = body) } + +#' @rdname set_cluster_policy_permissions +#' @export +clusterPoliciesSetPermissions <- set_cluster_policy_permissions #' Update cluster policy permissions. #' #' Updates the permissions on a cluster policy. Cluster policies can inherit @@ -152,14 +192,19 @@ clusterPoliciesSetPermissions <- function(client, cluster_policy_id, access_cont #' @param access_control_list This field has no description yet. #' @param cluster_policy_id Required. The cluster policy for which to get or manage permissions. #' -#' @rdname clusterPoliciesUpdatePermissions +#' @rdname update_cluster_policy_permissions +#' @alias clusterPoliciesUpdatePermissions #' @export -clusterPoliciesUpdatePermissions <- function(client, cluster_policy_id, access_control_list = NULL) { +update_cluster_policy_permissions <- function(client, cluster_policy_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", paste("/api/2.0/permissions/cluster-policies/", cluster_policy_id, sep = ""), body = body) } +#' @rdname update_cluster_policy_permissions +#' @export +clusterPoliciesUpdatePermissions <- update_cluster_policy_permissions + diff --git a/R/clusters.R b/R/clusters.R index c4bee265..efff85bd 100755 --- a/R/clusters.R +++ b/R/clusters.R @@ -13,12 +13,17 @@ NULL #' @param cluster_id Required. . #' @param owner_username Required. New owner of the cluster_id after this RPC. #' -#' @rdname clustersChangeOwner +#' @rdname change_cluster_owner +#' @alias clustersChangeOwner #' @export -clustersChangeOwner <- function(client, cluster_id, owner_username) { +change_cluster_owner <- function(client, cluster_id, owner_username) { body <- list(cluster_id = cluster_id, owner_username = owner_username) client$do("POST", "/api/2.0/clusters/change-owner", body = body) } + +#' @rdname change_cluster_owner +#' @export +clustersChangeOwner <- change_cluster_owner #' Create new cluster. #' #' Creates a new Spark cluster. This method will acquire new instances from the @@ -60,9 +65,10 @@ clustersChangeOwner <- function(client, cluster_id, owner_username) { #' @param ssh_public_keys SSH public key contents that will be added to each Spark node in this cluster. #' @param workload_type This field has no description yet. #' -#' @rdname clustersCreate +#' @rdname create_cluster +#' @alias clustersCreate #' @export -clustersCreate <- function(client, spark_version, apply_policy_default_values = NULL, +create_cluster <- function(client, spark_version, apply_policy_default_values = NULL, autoscale = NULL, autotermination_minutes = NULL, aws_attributes = NULL, azure_attributes = NULL, cluster_log_conf = NULL, cluster_name = NULL, cluster_source = NULL, custom_tags = NULL, data_security_mode = NULL, docker_image = NULL, driver_instance_pool_id = NULL, @@ -83,6 +89,10 @@ clustersCreate <- function(client, spark_version, apply_policy_default_values = spark_version = spark_version, ssh_public_keys = ssh_public_keys, workload_type = workload_type) client$do("POST", "/api/2.0/clusters/create", body = body) } + +#' @rdname create_cluster +#' @export +clustersCreate <- create_cluster #' Terminate cluster. #' #' Terminates the Spark cluster with the specified ID. The cluster is removed @@ -93,12 +103,17 @@ clustersCreate <- function(client, spark_version, apply_policy_default_values = #' #' @param cluster_id Required. The cluster to be terminated. #' -#' @rdname clustersDelete +#' @rdname delete_cluster +#' @alias clustersDelete #' @export -clustersDelete <- function(client, cluster_id) { +delete_cluster <- function(client, cluster_id) { body <- list(cluster_id = cluster_id) client$do("POST", "/api/2.0/clusters/delete", body = body) } + +#' @rdname delete_cluster +#' @export +clustersDelete <- delete_cluster #' Update cluster configuration. #' #' Updates the configuration of a cluster to match the provided attributes and @@ -145,9 +160,10 @@ clustersDelete <- function(client, cluster_id) { #' @param ssh_public_keys SSH public key contents that will be added to each Spark node in this cluster. #' @param workload_type This field has no description yet. #' -#' @rdname clustersEdit +#' @rdname edit_cluster +#' @alias clustersEdit #' @export -clustersEdit <- function(client, cluster_id, spark_version, apply_policy_default_values = NULL, +edit_cluster <- function(client, cluster_id, spark_version, apply_policy_default_values = NULL, autoscale = NULL, autotermination_minutes = NULL, aws_attributes = NULL, azure_attributes = NULL, cluster_log_conf = NULL, cluster_name = NULL, cluster_source = NULL, custom_tags = NULL, data_security_mode = NULL, docker_image = NULL, driver_instance_pool_id = NULL, @@ -168,6 +184,10 @@ clustersEdit <- function(client, cluster_id, spark_version, apply_policy_default spark_version = spark_version, ssh_public_keys = ssh_public_keys, workload_type = workload_type) client$do("POST", "/api/2.0/clusters/edit", body = body) } + +#' @rdname edit_cluster +#' @export +clustersEdit <- edit_cluster #' List cluster activity events. #' #' Retrieves a list of events about the activity of a cluster. This API is @@ -185,9 +205,10 @@ clustersEdit <- function(client, cluster_id, spark_version, apply_policy_default #' #' @return `data.frame` with all of the response pages. #' -#' @rdname clustersEvents +#' @rdname events_cluster +#' @alias clustersEvents #' @export -clustersEvents <- function(client, cluster_id, end_time = NULL, event_types = NULL, +events_cluster <- function(client, cluster_id, end_time = NULL, event_types = NULL, limit = NULL, offset = NULL, order = NULL, start_time = NULL) { body <- list(cluster_id = cluster_id, end_time = end_time, event_types = event_types, limit = limit, offset = offset, order = order, start_time = start_time) @@ -208,6 +229,10 @@ clustersEvents <- function(client, cluster_id, end_time = NULL, event_types = NU return(results) } + +#' @rdname events_cluster +#' @export +clustersEvents <- events_cluster #' Get cluster info. #' #' Retrieves the information for a cluster given its identifier. Clusters can be @@ -216,12 +241,17 @@ clustersEvents <- function(client, cluster_id, end_time = NULL, event_types = NU #' #' @param cluster_id Required. The cluster about which to retrieve information. #' -#' @rdname clustersGet +#' @rdname get_cluster +#' @alias clustersGet #' @export -clustersGet <- function(client, cluster_id) { +get_cluster <- function(client, cluster_id) { query <- list(cluster_id = cluster_id) client$do("GET", "/api/2.0/clusters/get", query = query) } + +#' @rdname get_cluster +#' @export +clustersGet <- get_cluster #' Get cluster permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -229,13 +259,18 @@ clustersGet <- function(client, cluster_id) { #' #' @param cluster_id Required. The cluster for which to get or manage permissions. #' -#' @rdname clustersGetPermissionLevels +#' @rdname get_cluster_permission_levels +#' @alias clustersGetPermissionLevels #' @export -clustersGetPermissionLevels <- function(client, cluster_id) { +get_cluster_permission_levels <- function(client, cluster_id) { client$do("GET", paste("/api/2.0/permissions/clusters/", cluster_id, "/permissionLevels", , sep = "")) } + +#' @rdname get_cluster_permission_levels +#' @export +clustersGetPermissionLevels <- get_cluster_permission_levels #' Get cluster permissions. #' #' Gets the permissions of a cluster. Clusters can inherit permissions from @@ -244,12 +279,17 @@ clustersGetPermissionLevels <- function(client, cluster_id) { #' #' @param cluster_id Required. The cluster for which to get or manage permissions. #' -#' @rdname clustersGetPermissions +#' @rdname get_cluster_permissions +#' @alias clustersGetPermissions #' @export -clustersGetPermissions <- function(client, cluster_id) { +get_cluster_permissions <- function(client, cluster_id) { client$do("GET", paste("/api/2.0/permissions/clusters/", cluster_id, sep = "")) } + +#' @rdname get_cluster_permissions +#' @export +clustersGetPermissions <- get_cluster_permissions #' List all clusters. #' #' Return information about all pinned clusters, active clusters, up to 200 of @@ -267,37 +307,52 @@ clustersGetPermissions <- function(client, cluster_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname clustersList +#' @rdname list_clusters +#' @alias clustersList #' @export -clustersList <- function(client, can_use_client = NULL) { +list_clusters <- function(client, can_use_client = NULL) { query <- list(can_use_client = can_use_client) json <- client$do("GET", "/api/2.0/clusters/list", query = query) return(json$clusters) } + +#' @rdname list_clusters +#' @export +clustersList <- list_clusters #' List node types. #' #' Returns a list of supported Spark node types. These node types can be used to #' launch a cluster. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname clustersListNodeTypes +#' @rdname list_cluster_node_types +#' @alias clustersListNodeTypes #' @export -clustersListNodeTypes <- function(client) { +list_cluster_node_types <- function(client) { client$do("GET", "/api/2.0/clusters/list-node-types") } + +#' @rdname list_cluster_node_types +#' @export +clustersListNodeTypes <- list_cluster_node_types #' List availability zones. #' #' Returns a list of availability zones where clusters can be created in (For #' example, us-west-2a). These zones can be used to launch a cluster. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname clustersListZones +#' @rdname list_cluster_zones +#' @alias clustersListZones #' @export -clustersListZones <- function(client) { +list_cluster_zones <- function(client) { client$do("GET", "/api/2.0/clusters/list-zones") } + +#' @rdname list_cluster_zones +#' @export +clustersListZones <- list_cluster_zones #' Permanently delete cluster. #' #' Permanently deletes a Spark cluster. This cluster is terminated and resources @@ -310,12 +365,17 @@ clustersListZones <- function(client) { #' #' @param cluster_id Required. The cluster to be deleted. #' -#' @rdname clustersPermanentDelete +#' @rdname permanent_cluster_delete +#' @alias clustersPermanentDelete #' @export -clustersPermanentDelete <- function(client, cluster_id) { +permanent_cluster_delete <- function(client, cluster_id) { body <- list(cluster_id = cluster_id) client$do("POST", "/api/2.0/clusters/permanent-delete", body = body) } + +#' @rdname permanent_cluster_delete +#' @export +clustersPermanentDelete <- permanent_cluster_delete #' Pin cluster. #' #' Pinning a cluster ensures that the cluster will always be returned by the @@ -325,12 +385,17 @@ clustersPermanentDelete <- function(client, cluster_id) { #' #' @param cluster_id Required. . #' -#' @rdname clustersPin +#' @rdname pin_cluster +#' @alias clustersPin #' @export -clustersPin <- function(client, cluster_id) { +pin_cluster <- function(client, cluster_id) { body <- list(cluster_id = cluster_id) client$do("POST", "/api/2.0/clusters/pin", body = body) } + +#' @rdname pin_cluster +#' @export +clustersPin <- pin_cluster #' Resize cluster. #' #' Resizes a cluster to have a desired number of workers. This will fail unless @@ -341,12 +406,17 @@ clustersPin <- function(client, cluster_id) { #' @param cluster_id Required. The cluster to be resized. #' @param num_workers Number of worker nodes that this cluster should have. #' -#' @rdname clustersResize +#' @rdname resize_cluster +#' @alias clustersResize #' @export -clustersResize <- function(client, cluster_id, autoscale = NULL, num_workers = NULL) { +resize_cluster <- function(client, cluster_id, autoscale = NULL, num_workers = NULL) { body <- list(autoscale = autoscale, cluster_id = cluster_id, num_workers = num_workers) client$do("POST", "/api/2.0/clusters/resize", body = body) } + +#' @rdname resize_cluster +#' @export +clustersResize <- resize_cluster #' Restart cluster. #' #' Restarts a Spark cluster with the supplied ID. If the cluster is not @@ -356,12 +426,17 @@ clustersResize <- function(client, cluster_id, autoscale = NULL, num_workers = N #' @param cluster_id Required. The cluster to be started. #' @param restart_user . #' -#' @rdname clustersRestart +#' @rdname restart_cluster +#' @alias clustersRestart #' @export -clustersRestart <- function(client, cluster_id, restart_user = NULL) { +restart_cluster <- function(client, cluster_id, restart_user = NULL) { body <- list(cluster_id = cluster_id, restart_user = restart_user) client$do("POST", "/api/2.0/clusters/restart", body = body) } + +#' @rdname restart_cluster +#' @export +clustersRestart <- restart_cluster #' Set cluster permissions. #' #' Sets permissions on a cluster. Clusters can inherit permissions from their @@ -371,24 +446,34 @@ clustersRestart <- function(client, cluster_id, restart_user = NULL) { #' @param access_control_list This field has no description yet. #' @param cluster_id Required. The cluster for which to get or manage permissions. #' -#' @rdname clustersSetPermissions +#' @rdname set_cluster_permissions +#' @alias clustersSetPermissions #' @export -clustersSetPermissions <- function(client, cluster_id, access_control_list = NULL) { +set_cluster_permissions <- function(client, cluster_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", paste("/api/2.0/permissions/clusters/", cluster_id, sep = ""), body = body) } + +#' @rdname set_cluster_permissions +#' @export +clustersSetPermissions <- set_cluster_permissions #' List available Spark versions. #' #' Returns the list of available Spark versions. These versions can be used to #' launch a cluster. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname clustersSparkVersions +#' @rdname spark_cluster_versions +#' @alias clustersSparkVersions #' @export -clustersSparkVersions <- function(client) { +spark_cluster_versions <- function(client) { client$do("GET", "/api/2.0/clusters/spark-versions") } + +#' @rdname spark_cluster_versions +#' @export +clustersSparkVersions <- spark_cluster_versions #' Start terminated cluster. #' #' Starts a terminated Spark cluster with the supplied ID. This works similar to @@ -403,12 +488,17 @@ clustersSparkVersions <- function(client) { #' #' @param cluster_id Required. The cluster to be started. #' -#' @rdname clustersStart +#' @rdname start_cluster +#' @alias clustersStart #' @export -clustersStart <- function(client, cluster_id) { +start_cluster <- function(client, cluster_id) { body <- list(cluster_id = cluster_id) client$do("POST", "/api/2.0/clusters/start", body = body) } + +#' @rdname start_cluster +#' @export +clustersStart <- start_cluster #' Unpin cluster. #' #' Unpinning a cluster will allow the cluster to eventually be removed from the @@ -418,12 +508,17 @@ clustersStart <- function(client, cluster_id) { #' #' @param cluster_id Required. . #' -#' @rdname clustersUnpin +#' @rdname unpin_cluster +#' @alias clustersUnpin #' @export -clustersUnpin <- function(client, cluster_id) { +unpin_cluster <- function(client, cluster_id) { body <- list(cluster_id = cluster_id) client$do("POST", "/api/2.0/clusters/unpin", body = body) } + +#' @rdname unpin_cluster +#' @export +clustersUnpin <- unpin_cluster #' Update cluster permissions. #' #' Updates the permissions on a cluster. Clusters can inherit permissions from @@ -433,14 +528,19 @@ clustersUnpin <- function(client, cluster_id) { #' @param access_control_list This field has no description yet. #' @param cluster_id Required. The cluster for which to get or manage permissions. #' -#' @rdname clustersUpdatePermissions +#' @rdname update_cluster_permissions +#' @alias clustersUpdatePermissions #' @export -clustersUpdatePermissions <- function(client, cluster_id, access_control_list = NULL) { +update_cluster_permissions <- function(client, cluster_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", paste("/api/2.0/permissions/clusters/", cluster_id, sep = ""), body = body) } +#' @rdname update_cluster_permissions +#' @export +clustersUpdatePermissions <- update_cluster_permissions + #' Create new cluster. #' #' Creates a new Spark cluster. This method will acquire new instances from the @@ -491,9 +591,9 @@ clustersUpdatePermissions <- function(client, cluster_id, access_control_list = #' @param ssh_public_keys SSH public key contents that will be added to each Spark node in this cluster. #' @param workload_type This field has no description yet. #' -#' @rdname clustersCreateAndWait +#' @rdname create_cluster_and_wait #' @export -clustersCreateAndWait <- function(client, spark_version, apply_policy_default_values = NULL, +create_cluster_and_wait <- function(client, spark_version, apply_policy_default_values = NULL, autoscale = NULL, autotermination_minutes = NULL, aws_attributes = NULL, azure_attributes = NULL, cluster_log_conf = NULL, cluster_name = NULL, cluster_source = NULL, custom_tags = NULL, data_security_mode = NULL, docker_image = NULL, driver_instance_pool_id = NULL, @@ -570,9 +670,9 @@ clustersCreateAndWait <- function(client, spark_version, apply_policy_default_va #' #' @param cluster_id Required. The cluster to be terminated. #' -#' @rdname clustersDeleteAndWait +#' @rdname delete_cluster_and_wait #' @export -clustersDeleteAndWait <- function(client, cluster_id, timeout = 20, callback = cli_reporter) { +delete_cluster_and_wait <- function(client, cluster_id, timeout = 20, callback = cli_reporter) { body <- list(cluster_id = cluster_id) op_response <- client$do("POST", "/api/2.0/clusters/delete", body = body) started <- as.numeric(Sys.time()) @@ -666,9 +766,9 @@ clustersDeleteAndWait <- function(client, cluster_id, timeout = 20, callback = c #' @param ssh_public_keys SSH public key contents that will be added to each Spark node in this cluster. #' @param workload_type This field has no description yet. #' -#' @rdname clustersEditAndWait +#' @rdname edit_cluster_and_wait #' @export -clustersEditAndWait <- function(client, cluster_id, spark_version, apply_policy_default_values = NULL, +edit_cluster_and_wait <- function(client, cluster_id, spark_version, apply_policy_default_values = NULL, autoscale = NULL, autotermination_minutes = NULL, aws_attributes = NULL, azure_attributes = NULL, cluster_log_conf = NULL, cluster_name = NULL, cluster_source = NULL, custom_tags = NULL, data_security_mode = NULL, docker_image = NULL, driver_instance_pool_id = NULL, @@ -753,9 +853,9 @@ clustersEditAndWait <- function(client, cluster_id, spark_version, apply_policy_ #' @param cluster_id Required. The cluster to be resized. #' @param num_workers Number of worker nodes that this cluster should have. #' -#' @rdname clustersResizeAndWait +#' @rdname resize_cluster_and_wait #' @export -clustersResizeAndWait <- function(client, cluster_id, autoscale = NULL, num_workers = NULL, +resize_cluster_and_wait <- function(client, cluster_id, autoscale = NULL, num_workers = NULL, timeout = 20, callback = cli_reporter) { body <- list(autoscale = autoscale, cluster_id = cluster_id, num_workers = num_workers) op_response <- client$do("POST", "/api/2.0/clusters/resize", body = body) @@ -813,9 +913,9 @@ clustersResizeAndWait <- function(client, cluster_id, autoscale = NULL, num_work #' @param cluster_id Required. The cluster to be started. #' @param restart_user . #' -#' @rdname clustersRestartAndWait +#' @rdname restart_cluster_and_wait #' @export -clustersRestartAndWait <- function(client, cluster_id, restart_user = NULL, timeout = 20, +restart_cluster_and_wait <- function(client, cluster_id, restart_user = NULL, timeout = 20, callback = cli_reporter) { body <- list(cluster_id = cluster_id, restart_user = restart_user) op_response <- client$do("POST", "/api/2.0/clusters/restart", body = body) @@ -880,9 +980,9 @@ clustersRestartAndWait <- function(client, cluster_id, restart_user = NULL, time #' #' @param cluster_id Required. The cluster to be started. #' -#' @rdname clustersStartAndWait +#' @rdname start_cluster_and_wait #' @export -clustersStartAndWait <- function(client, cluster_id, timeout = 20, callback = cli_reporter) { +start_cluster_and_wait <- function(client, cluster_id, timeout = 20, callback = cli_reporter) { body <- list(cluster_id = cluster_id) op_response <- client$do("POST", "/api/2.0/clusters/start", body = body) started <- as.numeric(Sys.time()) diff --git a/R/command_execution.R b/R/command_execution.R index 371050d5..bf54e76c 100755 --- a/R/command_execution.R +++ b/R/command_execution.R @@ -14,13 +14,18 @@ NULL #' @param command_id This field has no description yet. #' @param context_id This field has no description yet. #' -#' @rdname commandExecutionCancel +#' @rdname cancel_command_execution +#' @alias commandExecutionCancel #' @export -commandExecutionCancel <- function(client, cluster_id = NULL, command_id = NULL, +cancel_command_execution <- function(client, cluster_id = NULL, command_id = NULL, context_id = NULL) { body <- list(clusterId = cluster_id, commandId = command_id, contextId = context_id) client$do("POST", "/api/1.2/commands/cancel", body = body) } + +#' @rdname cancel_command_execution +#' @export +commandExecutionCancel <- cancel_command_execution #' Get command info. #' #' Gets the status of and, if available, the results from a currently executing @@ -33,12 +38,17 @@ commandExecutionCancel <- function(client, cluster_id = NULL, command_id = NULL, #' @param command_id Required. This field has no description yet. #' @param context_id Required. This field has no description yet. #' -#' @rdname commandExecutionCommandStatus +#' @rdname command_execution_status +#' @alias commandExecutionCommandStatus #' @export -commandExecutionCommandStatus <- function(client, cluster_id, context_id, command_id) { +command_execution_status <- function(client, cluster_id, context_id, command_id) { query <- list(clusterId = cluster_id, commandId = command_id, contextId = context_id) client$do("GET", "/api/1.2/commands/status", query = query) } + +#' @rdname command_execution_status +#' @export +commandExecutionCommandStatus <- command_execution_status #' Get status. #' #' Gets the status for an execution context. @@ -47,12 +57,17 @@ commandExecutionCommandStatus <- function(client, cluster_id, context_id, comman #' @param cluster_id Required. This field has no description yet. #' @param context_id Required. This field has no description yet. #' -#' @rdname commandExecutionContextStatus +#' @rdname context_command_execution_status +#' @alias commandExecutionContextStatus #' @export -commandExecutionContextStatus <- function(client, cluster_id, context_id) { +context_command_execution_status <- function(client, cluster_id, context_id) { query <- list(clusterId = cluster_id, contextId = context_id) client$do("GET", "/api/1.2/contexts/status", query = query) } + +#' @rdname context_command_execution_status +#' @export +commandExecutionContextStatus <- context_command_execution_status #' Create an execution context. #' #' Creates an execution context for running cluster commands. @@ -63,12 +78,17 @@ commandExecutionContextStatus <- function(client, cluster_id, context_id) { #' @param cluster_id Running cluster id. #' @param language This field has no description yet. #' -#' @rdname commandExecutionCreate +#' @rdname create_command_execution +#' @alias commandExecutionCreate #' @export -commandExecutionCreate <- function(client, cluster_id = NULL, language = NULL) { +create_command_execution <- function(client, cluster_id = NULL, language = NULL) { body <- list(clusterId = cluster_id, language = language) client$do("POST", "/api/1.2/contexts/create", body = body) } + +#' @rdname create_command_execution +#' @export +commandExecutionCreate <- create_command_execution #' Delete an execution context. #' #' Deletes an execution context. @@ -77,12 +97,17 @@ commandExecutionCreate <- function(client, cluster_id = NULL, language = NULL) { #' @param cluster_id Required. This field has no description yet. #' @param context_id Required. This field has no description yet. #' -#' @rdname commandExecutionDestroy +#' @rdname destroy_command_execution +#' @alias commandExecutionDestroy #' @export -commandExecutionDestroy <- function(client, cluster_id, context_id) { +destroy_command_execution <- function(client, cluster_id, context_id) { body <- list(clusterId = cluster_id, contextId = context_id) client$do("POST", "/api/1.2/contexts/destroy", body = body) } + +#' @rdname destroy_command_execution +#' @export +commandExecutionDestroy <- destroy_command_execution #' Run a command. #' #' Runs a cluster command in the given execution context, using the provided @@ -97,14 +122,19 @@ commandExecutionDestroy <- function(client, cluster_id, context_id) { #' @param context_id Running context id. #' @param language This field has no description yet. #' -#' @rdname commandExecutionExecute +#' @rdname execute_command +#' @alias commandExecutionExecute #' @export -commandExecutionExecute <- function(client, cluster_id = NULL, command = NULL, context_id = NULL, +execute_command <- function(client, cluster_id = NULL, command = NULL, context_id = NULL, language = NULL) { body <- list(clusterId = cluster_id, command = command, contextId = context_id, language = language) client$do("POST", "/api/1.2/commands/execute", body = body) } + +#' @rdname execute_command +#' @export +commandExecutionExecute <- execute_command #' Cancel a command. #' #' Cancels a currently running command within an execution context. @@ -125,9 +155,9 @@ commandExecutionExecute <- function(client, cluster_id = NULL, command = NULL, c #' @param command_id This field has no description yet. #' @param context_id This field has no description yet. #' -#' @rdname commandExecutionCancelAndWait +#' @rdname cancel_command_execution_and_wait #' @export -commandExecutionCancelAndWait <- function(client, cluster_id = NULL, command_id = NULL, +cancel_command_execution_and_wait <- function(client, cluster_id = NULL, command_id = NULL, context_id = NULL, timeout = 20, callback = cli_reporter) { body <- list(clusterId = cluster_id, commandId = command_id, contextId = context_id) op_response <- client$do("POST", "/api/1.2/commands/cancel", body = body) @@ -193,9 +223,9 @@ commandExecutionCancelAndWait <- function(client, cluster_id = NULL, command_id #' @param cluster_id Running cluster id. #' @param language This field has no description yet. #' -#' @rdname commandExecutionCreateAndWait +#' @rdname create_command_execution_and_wait #' @export -commandExecutionCreateAndWait <- function(client, cluster_id = NULL, language = NULL, +create_command_execution_and_wait <- function(client, cluster_id = NULL, language = NULL, timeout = 20, callback = cli_reporter) { body <- list(clusterId = cluster_id, language = language) op_response <- client$do("POST", "/api/1.2/contexts/create", body = body) @@ -260,10 +290,10 @@ commandExecutionCreateAndWait <- function(client, cluster_id = NULL, language = #' @param context_id Running context id. #' @param language This field has no description yet. #' -#' @rdname commandExecutionExecuteAndWait +#' @rdname execute_command_and_wait #' @export -commandExecutionExecuteAndWait <- function(client, cluster_id = NULL, command = NULL, - context_id = NULL, language = NULL, timeout = 20, callback = cli_reporter) { +execute_command_and_wait <- function(client, cluster_id = NULL, command = NULL, context_id = NULL, + language = NULL, timeout = 20, callback = cli_reporter) { body <- list(clusterId = cluster_id, command = command, contextId = context_id, language = language) op_response <- client$do("POST", "/api/1.2/commands/execute", body = body) diff --git a/R/connections.R b/R/connections.R index 18fa790c..777c8edc 100755 --- a/R/connections.R +++ b/R/connections.R @@ -19,14 +19,19 @@ NULL #' @param properties An object containing map of key-value properties attached to the connection. #' @param read_only If the connection is read only. #' -#' @rdname connectionsCreate +#' @rdname create_connection +#' @alias connectionsCreate #' @export -connectionsCreate <- function(client, name, connection_type, options, comment = NULL, +create_connection <- function(client, name, connection_type, options, comment = NULL, properties = NULL, read_only = NULL) { body <- list(comment = comment, connection_type = connection_type, name = name, options = options, properties = properties, read_only = read_only) client$do("POST", "/api/2.1/unity-catalog/connections", body = body) } + +#' @rdname create_connection +#' @export +connectionsCreate <- create_connection #' Delete a connection. #' #' Deletes the connection that matches the supplied name. @@ -34,12 +39,17 @@ connectionsCreate <- function(client, name, connection_type, options, comment = #' #' @param name Required. The name of the connection to be deleted. #' -#' @rdname connectionsDelete +#' @rdname delete_connection +#' @alias connectionsDelete #' @export -connectionsDelete <- function(client, name) { +delete_connection <- function(client, name) { client$do("DELETE", paste("/api/2.1/unity-catalog/connections/", name, sep = "")) } + +#' @rdname delete_connection +#' @export +connectionsDelete <- delete_connection #' Get a connection. #' #' Gets a connection from it's name. @@ -47,12 +57,17 @@ connectionsDelete <- function(client, name) { #' #' @param name Required. Name of the connection. #' -#' @rdname connectionsGet +#' @rdname get_connection +#' @alias connectionsGet #' @export -connectionsGet <- function(client, name) { +get_connection <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/connections/", name, sep = "")) } + +#' @rdname get_connection +#' @export +connectionsGet <- get_connection #' List connections. #' #' List all connections. @@ -60,14 +75,19 @@ connectionsGet <- function(client, name) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname connectionsList +#' @rdname list_connections +#' @alias connectionsList #' @export -connectionsList <- function(client) { +list_connections <- function(client) { json <- client$do("GET", "/api/2.1/unity-catalog/connections") return(json$connections) } + +#' @rdname list_connections +#' @export +connectionsList <- list_connections #' Update a connection. #' #' Updates the connection that matches the supplied name. @@ -78,14 +98,19 @@ connectionsList <- function(client) { #' @param options Required. A map of key-value properties attached to the securable. #' @param owner Username of current owner of the connection. #' -#' @rdname connectionsUpdate +#' @rdname update_connection +#' @alias connectionsUpdate #' @export -connectionsUpdate <- function(client, name, options, new_name = NULL, owner = NULL) { +update_connection <- function(client, name, options, new_name = NULL, owner = NULL) { body <- list(new_name = new_name, options = options, owner = owner) client$do("PATCH", paste("/api/2.1/unity-catalog/connections/", name, sep = ""), body = body) } +#' @rdname update_connection +#' @export +connectionsUpdate <- update_connection + diff --git a/R/credentials_manager.R b/R/credentials_manager.R index ca2c8cac..de75ee3c 100755 --- a/R/credentials_manager.R +++ b/R/credentials_manager.R @@ -13,10 +13,16 @@ NULL #' @param scopes Required. Array of scopes for the token request. #' @param token_type Required. A list of token types being requested. #' -#' @rdname credentialsManagerExchangeToken +#' @rdname exchange_credentials_manager_token +#' @alias credentialsManagerExchangeToken #' @export -credentialsManagerExchangeToken <- function(client, partition_id, token_type, scopes) { +exchange_credentials_manager_token <- function(client, partition_id, token_type, + scopes) { body <- list(partitionId = partition_id, scopes = scopes, tokenType = token_type) client$do("POST", "/api/2.0/credentials-manager/exchange-tokens/token", body = body) } +#' @rdname exchange_credentials_manager_token +#' @export +credentialsManagerExchangeToken <- exchange_credentials_manager_token + diff --git a/R/csp_enablement.R b/R/csp_enablement.R index 5a62fad1..8700f6ad 100755 --- a/R/csp_enablement.R +++ b/R/csp_enablement.R @@ -10,13 +10,18 @@ NULL #' #' @param etag etag used for versioning. #' -#' @rdname cspEnablementGet +#' @rdname get_csp_enablement +#' @alias cspEnablementGet #' @export -cspEnablementGet <- function(client, etag = NULL) { +get_csp_enablement <- function(client, etag = NULL) { query <- list(etag = etag) client$do("GET", "/api/2.0/settings/types/shield_csp_enablement_ws_db/names/default", query = query) } + +#' @rdname get_csp_enablement +#' @export +cspEnablementGet <- get_csp_enablement #' Update the compliance security profile setting. #' #' Updates the compliance security profile setting for the workspace. A fresh @@ -30,12 +35,17 @@ cspEnablementGet <- function(client, etag = NULL) { #' @param field_mask Required. Field mask is required to be passed into the PATCH request. #' @param setting Required. This field has no description yet. #' -#' @rdname cspEnablementUpdate +#' @rdname update_csp_enablement +#' @alias cspEnablementUpdate #' @export -cspEnablementUpdate <- function(client, allow_missing, setting, field_mask) { +update_csp_enablement <- function(client, allow_missing, setting, field_mask) { body <- list(allow_missing = allow_missing, field_mask = field_mask, setting = setting) client$do("PATCH", "/api/2.0/settings/types/shield_csp_enablement_ws_db/names/default", body = body) } +#' @rdname update_csp_enablement +#' @export +cspEnablementUpdate <- update_csp_enablement + diff --git a/R/current_user.R b/R/current_user.R index 1773b00f..4e39b50c 100755 --- a/R/current_user.R +++ b/R/current_user.R @@ -8,9 +8,14 @@ NULL #' Get details about the current method caller's identity. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname currentUserMe +#' @rdname me +#' @alias currentUserMe #' @export -currentUserMe <- function(client) { +me <- function(client) { client$do("GET", "/api/2.0/preview/scim/v2/Me") } +#' @rdname me +#' @export +currentUserMe <- me + diff --git a/R/dashboard_widgets.R b/R/dashboard_widgets.R index be049891..66f6db77 100755 --- a/R/dashboard_widgets.R +++ b/R/dashboard_widgets.R @@ -12,25 +12,35 @@ NULL #' @param visualization_id Query Vizualization ID returned by :method:queryvisualizations/create. #' @param width Required. Width of a widget. #' -#' @rdname dashboardWidgetsCreate +#' @rdname create_dashboard_widget +#' @alias dashboardWidgetsCreate #' @export -dashboardWidgetsCreate <- function(client, dashboard_id, options, width, text = NULL, +create_dashboard_widget <- function(client, dashboard_id, options, width, text = NULL, visualization_id = NULL) { body <- list(dashboard_id = dashboard_id, options = options, text = text, visualization_id = visualization_id, width = width) client$do("POST", "/api/2.0/preview/sql/widgets", body = body) } + +#' @rdname create_dashboard_widget +#' @export +dashboardWidgetsCreate <- create_dashboard_widget #' Remove widget. #' @param client Required. Instance of DatabricksClient() #' #' @param id Required. Widget ID returned by :method:dashboardwidgets/create. #' -#' @rdname dashboardWidgetsDelete +#' @rdname delete_dashboard_widget +#' @alias dashboardWidgetsDelete #' @export -dashboardWidgetsDelete <- function(client, id) { +delete_dashboard_widget <- function(client, id) { client$do("DELETE", paste("/api/2.0/preview/sql/widgets/", id, sep = "")) } + +#' @rdname delete_dashboard_widget +#' @export +dashboardWidgetsDelete <- delete_dashboard_widget #' Update existing widget. #' @param client Required. Instance of DatabricksClient() #' @@ -41,14 +51,19 @@ dashboardWidgetsDelete <- function(client, id) { #' @param visualization_id Query Vizualization ID returned by :method:queryvisualizations/create. #' @param width Required. Width of a widget. #' -#' @rdname dashboardWidgetsUpdate +#' @rdname update_dashboard_widget +#' @alias dashboardWidgetsUpdate #' @export -dashboardWidgetsUpdate <- function(client, id, dashboard_id, options, width, text = NULL, +update_dashboard_widget <- function(client, id, dashboard_id, options, width, text = NULL, visualization_id = NULL) { body <- list(dashboard_id = dashboard_id, options = options, text = text, visualization_id = visualization_id, width = width) client$do("POST", paste("/api/2.0/preview/sql/widgets/", id, sep = ""), body = body) } +#' @rdname update_dashboard_widget +#' @export +dashboardWidgetsUpdate <- update_dashboard_widget + diff --git a/R/dashboards.R b/R/dashboards.R index 943c64f4..4f754508 100755 --- a/R/dashboards.R +++ b/R/dashboards.R @@ -13,14 +13,19 @@ NULL #' @param run_as_role Sets the **Run as** role for the object. #' @param tags This field has no description yet. #' -#' @rdname dashboardsCreate +#' @rdname create_dashboard +#' @alias dashboardsCreate #' @export -dashboardsCreate <- function(client, name, dashboard_filters_enabled = NULL, is_favorite = NULL, +create_dashboard <- function(client, name, dashboard_filters_enabled = NULL, is_favorite = NULL, parent = NULL, run_as_role = NULL, tags = NULL) { body <- list(dashboard_filters_enabled = dashboard_filters_enabled, is_favorite = is_favorite, name = name, parent = parent, run_as_role = run_as_role, tags = tags) client$do("POST", "/api/2.0/preview/sql/dashboards", body = body) } + +#' @rdname create_dashboard +#' @export +dashboardsCreate <- create_dashboard #' Remove a dashboard. #' #' Moves a dashboard to the trash. Trashed dashboards do not appear in list @@ -29,12 +34,17 @@ dashboardsCreate <- function(client, name, dashboard_filters_enabled = NULL, is_ #' #' @param dashboard_id Required. This field has no description yet. #' -#' @rdname dashboardsDelete +#' @rdname delete_dashboard +#' @alias dashboardsDelete #' @export -dashboardsDelete <- function(client, dashboard_id) { +delete_dashboard <- function(client, dashboard_id) { client$do("DELETE", paste("/api/2.0/preview/sql/dashboards/", dashboard_id, sep = "")) } + +#' @rdname delete_dashboard +#' @export +dashboardsDelete <- delete_dashboard #' Retrieve a definition. #' #' Returns a JSON representation of a dashboard object, including its @@ -43,12 +53,17 @@ dashboardsDelete <- function(client, dashboard_id) { #' #' @param dashboard_id Required. This field has no description yet. #' -#' @rdname dashboardsGet +#' @rdname get_dashboard +#' @alias dashboardsGet #' @export -dashboardsGet <- function(client, dashboard_id) { +get_dashboard <- function(client, dashboard_id) { client$do("GET", paste("/api/2.0/preview/sql/dashboards/", dashboard_id, sep = "")) } + +#' @rdname get_dashboard +#' @export +dashboardsGet <- get_dashboard #' Get dashboard objects. #' #' Fetch a paginated list of dashboard objects. @@ -64,9 +79,11 @@ dashboardsGet <- function(client, dashboard_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname dashboardsList +#' @rdname list_dashboards +#' @alias dashboardsList #' @export -dashboardsList <- function(client, order = NULL, page = NULL, page_size = NULL, q = NULL) { +list_dashboards <- function(client, order = NULL, page = NULL, page_size = NULL, + q = NULL) { query <- list(order = order, page = page, page_size = page_size, q = q) query$page = 1 @@ -85,6 +102,10 @@ dashboardsList <- function(client, order = NULL, page = NULL, page_size = NULL, return(results) } + +#' @rdname list_dashboards +#' @export +dashboardsList <- list_dashboards #' Restore a dashboard. #' #' A restored dashboard appears in list views and searches and can be shared. @@ -92,13 +113,18 @@ dashboardsList <- function(client, order = NULL, page = NULL, page_size = NULL, #' #' @param dashboard_id Required. This field has no description yet. #' -#' @rdname dashboardsRestore +#' @rdname restore_dashboard +#' @alias dashboardsRestore #' @export -dashboardsRestore <- function(client, dashboard_id) { +restore_dashboard <- function(client, dashboard_id) { client$do("POST", paste("/api/2.0/preview/sql/dashboards/trash/", dashboard_id, sep = "")) } + +#' @rdname restore_dashboard +#' @export +dashboardsRestore <- restore_dashboard #' Change a dashboard definition. #' #' Modify this dashboard definition. This operation only affects attributes of @@ -111,14 +137,19 @@ dashboardsRestore <- function(client, dashboard_id) { #' @param name The title of this dashboard that appears in list views and at the top of the dashboard page. #' @param run_as_role Sets the **Run as** role for the object. #' -#' @rdname dashboardsUpdate +#' @rdname update_dashboard +#' @alias dashboardsUpdate #' @export -dashboardsUpdate <- function(client, dashboard_id, name = NULL, run_as_role = NULL) { +update_dashboard <- function(client, dashboard_id, name = NULL, run_as_role = NULL) { body <- list(name = name, run_as_role = run_as_role) client$do("POST", paste("/api/2.0/preview/sql/dashboards/", dashboard_id, sep = ""), body = body) } +#' @rdname update_dashboard +#' @export +dashboardsUpdate <- update_dashboard + diff --git a/R/data_sources.R b/R/data_sources.R index 4bc66a5b..365e92be 100755 --- a/R/data_sources.R +++ b/R/data_sources.R @@ -10,9 +10,14 @@ NULL #' you need only a SQL warehouse's `id` to create new queries against it. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname dataSourcesList +#' @rdname list_data_sources +#' @alias dataSourcesList #' @export -dataSourcesList <- function(client) { +list_data_sources <- function(client) { client$do("GET", "/api/2.0/preview/sql/data_sources") } +#' @rdname list_data_sources +#' @export +dataSourcesList <- list_data_sources + diff --git a/R/dbfs.R b/R/dbfs.R index 81ef807d..89ae2a4a 100755 --- a/R/dbfs.R +++ b/R/dbfs.R @@ -16,12 +16,17 @@ NULL #' @param data Required. The base64-encoded data to append to the stream. #' @param handle Required. The handle on an open stream. #' -#' @rdname dbfsAddBlock +#' @rdname add_dbfs_block +#' @alias dbfsAddBlock #' @export -dbfsAddBlock <- function(client, handle, data) { +add_dbfs_block <- function(client, handle, data) { body <- list(data = data, handle = handle) client$do("POST", "/api/2.0/dbfs/add-block", body = body) } + +#' @rdname add_dbfs_block +#' @export +dbfsAddBlock <- add_dbfs_block #' Close the stream. #' #' Closes the stream specified by the input handle. If the handle does not @@ -30,12 +35,17 @@ dbfsAddBlock <- function(client, handle, data) { #' #' @param handle Required. The handle on an open stream. #' -#' @rdname dbfsClose +#' @rdname close_dbfs +#' @alias dbfsClose #' @export -dbfsClose <- function(client, handle) { +close_dbfs <- function(client, handle) { body <- list(handle = handle) client$do("POST", "/api/2.0/dbfs/close", body = body) } + +#' @rdname close_dbfs +#' @export +dbfsClose <- close_dbfs #' Open a stream. #' #' Opens a stream to write to a file and returns a handle to this stream. There @@ -53,12 +63,17 @@ dbfsClose <- function(client, handle) { #' @param overwrite The flag that specifies whether to overwrite existing file/files. #' @param path Required. The path of the new file. #' -#' @rdname dbfsCreate +#' @rdname create_dbfs +#' @alias dbfsCreate #' @export -dbfsCreate <- function(client, path, overwrite = NULL) { +create_dbfs <- function(client, path, overwrite = NULL) { body <- list(overwrite = overwrite, path = path) client$do("POST", "/api/2.0/dbfs/create", body = body) } + +#' @rdname create_dbfs +#' @export +dbfsCreate <- create_dbfs #' Delete a file/directory. #' #' Delete the file or directory (optionally recursively delete all files in the @@ -84,12 +99,17 @@ dbfsCreate <- function(client, path, overwrite = NULL) { #' @param path Required. The path of the file or directory to delete. #' @param recursive Whether or not to recursively delete the directory's contents. #' -#' @rdname dbfsDelete +#' @rdname delete_dbfs +#' @alias dbfsDelete #' @export -dbfsDelete <- function(client, path, recursive = NULL) { +delete_dbfs <- function(client, path, recursive = NULL) { body <- list(path = path, recursive = recursive) client$do("POST", "/api/2.0/dbfs/delete", body = body) } + +#' @rdname delete_dbfs +#' @export +dbfsDelete <- delete_dbfs #' Get the information of a file or directory. #' #' Gets the file information for a file or directory. If the file or directory @@ -98,12 +118,17 @@ dbfsDelete <- function(client, path, recursive = NULL) { #' #' @param path Required. The path of the file or directory. #' -#' @rdname dbfsGetStatus +#' @rdname get_dbfs_status +#' @alias dbfsGetStatus #' @export -dbfsGetStatus <- function(client, path) { +get_dbfs_status <- function(client, path) { query <- list(path = path) client$do("GET", "/api/2.0/dbfs/get-status", query = query) } + +#' @rdname get_dbfs_status +#' @export +dbfsGetStatus <- get_dbfs_status #' List directory contents or file details. #' #' List the contents of a directory, or details of the file. If the file or @@ -123,15 +148,20 @@ dbfsGetStatus <- function(client, path) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname dbfsList +#' @rdname list_dbfs +#' @alias dbfsList #' @export -dbfsList <- function(client, path) { +list_dbfs <- function(client, path) { query <- list(path = path) json <- client$do("GET", "/api/2.0/dbfs/list", query = query) return(json$files) } + +#' @rdname list_dbfs +#' @export +dbfsList <- list_dbfs #' Create a directory. #' #' Creates the given directory and necessary parent directories if they do not @@ -143,12 +173,17 @@ dbfsList <- function(client, path) { #' #' @param path Required. The path of the new directory. #' -#' @rdname dbfsMkdirs +#' @rdname mkdirs_dbfs +#' @alias dbfsMkdirs #' @export -dbfsMkdirs <- function(client, path) { +mkdirs_dbfs <- function(client, path) { body <- list(path = path) client$do("POST", "/api/2.0/dbfs/mkdirs", body = body) } + +#' @rdname mkdirs_dbfs +#' @export +dbfsMkdirs <- mkdirs_dbfs #' Move a file. #' #' Moves a file from one location to another location within DBFS. If the source @@ -161,12 +196,17 @@ dbfsMkdirs <- function(client, path) { #' @param destination_path Required. The destination path of the file or directory. #' @param source_path Required. The source path of the file or directory. #' -#' @rdname dbfsMove +#' @rdname move_dbfs +#' @alias dbfsMove #' @export -dbfsMove <- function(client, source_path, destination_path) { +move_dbfs <- function(client, source_path, destination_path) { body <- list(destination_path = destination_path, source_path = source_path) client$do("POST", "/api/2.0/dbfs/move", body = body) } + +#' @rdname move_dbfs +#' @export +dbfsMove <- move_dbfs #' Upload a file. #' #' Uploads a file through the use of multipart form post. It is mainly used for @@ -187,12 +227,17 @@ dbfsMove <- function(client, source_path, destination_path) { #' @param overwrite The flag that specifies whether to overwrite existing file/files. #' @param path Required. The path of the new file. #' -#' @rdname dbfsPut +#' @rdname put_dbfs +#' @alias dbfsPut #' @export -dbfsPut <- function(client, path, contents = NULL, overwrite = NULL) { +put_dbfs <- function(client, path, contents = NULL, overwrite = NULL) { body <- list(contents = contents, overwrite = overwrite, path = path) client$do("POST", "/api/2.0/dbfs/put", body = body) } + +#' @rdname put_dbfs +#' @export +dbfsPut <- put_dbfs #' Get the contents of a file. #' #' Returns the contents of a file. If the file does not exist, this call throws @@ -209,13 +254,18 @@ dbfsPut <- function(client, path, contents = NULL, overwrite = NULL) { #' @param offset The offset to read from in bytes. #' @param path Required. The path of the file to read. #' -#' @rdname dbfsRead +#' @rdname read_dbfs +#' @alias dbfsRead #' @export -dbfsRead <- function(client, path, length = NULL, offset = NULL) { +read_dbfs <- function(client, path, length = NULL, offset = NULL) { query <- list(length = length, offset = offset, path = path) client$do("GET", "/api/2.0/dbfs/read", query = query) } +#' @rdname read_dbfs +#' @export +dbfsRead <- read_dbfs + diff --git a/R/dbsql_permissions.R b/R/dbsql_permissions.R index 10ac74dd..d1f40c4f 100755 --- a/R/dbsql_permissions.R +++ b/R/dbsql_permissions.R @@ -12,13 +12,18 @@ NULL #' @param object_id Required. Object ID. #' @param object_type Required. The type of object permissions to check. #' -#' @rdname dbsqlPermissionsGet +#' @rdname get_dbsql_permission +#' @alias dbsqlPermissionsGet #' @export -dbsqlPermissionsGet <- function(client, object_type, object_id) { +get_dbsql_permission <- function(client, object_type, object_id) { client$do("GET", paste("/api/2.0/preview/sql/permissions/", object_type, "/", object_id, sep = "")) } + +#' @rdname get_dbsql_permission +#' @export +dbsqlPermissionsGet <- get_dbsql_permission #' Set object ACL. #' #' Sets the access control list (ACL) for a specified object. This operation @@ -29,13 +34,18 @@ dbsqlPermissionsGet <- function(client, object_type, object_id) { #' @param object_id Required. Object ID. #' @param object_type Required. The type of object permission to set. #' -#' @rdname dbsqlPermissionsSet +#' @rdname set_dbsql_permission +#' @alias dbsqlPermissionsSet #' @export -dbsqlPermissionsSet <- function(client, object_type, object_id, access_control_list = NULL) { +set_dbsql_permission <- function(client, object_type, object_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("POST", paste("/api/2.0/preview/sql/permissions/", object_type, "/", object_id, sep = ""), body = body) } + +#' @rdname set_dbsql_permission +#' @export +dbsqlPermissionsSet <- set_dbsql_permission #' Transfer object ownership. #' #' Transfers ownership of a dashboard, query, or alert to an active user. @@ -46,13 +56,18 @@ dbsqlPermissionsSet <- function(client, object_type, object_id, access_control_l #' @param object_id Required. The ID of the object on which to change ownership. #' @param object_type Required. The type of object on which to change ownership. #' -#' @rdname dbsqlPermissionsTransferOwnership +#' @rdname transfer_dbsql_permission_ownership +#' @alias dbsqlPermissionsTransferOwnership #' @export -dbsqlPermissionsTransferOwnership <- function(client, object_type, object_id, new_owner = NULL) { +transfer_dbsql_permission_ownership <- function(client, object_type, object_id, new_owner = NULL) { body <- list(new_owner = new_owner) client$do("POST", paste("/api/2.0/preview/sql/permissions/", object_type, "/", object_id, "/transfer", , sep = ""), body = body) } +#' @rdname transfer_dbsql_permission_ownership +#' @export +dbsqlPermissionsTransferOwnership <- transfer_dbsql_permission_ownership + diff --git a/R/default_namespace.R b/R/default_namespace.R index 494d3e48..7a0c95c8 100755 --- a/R/default_namespace.R +++ b/R/default_namespace.R @@ -14,13 +14,18 @@ NULL #' #' @param etag etag used for versioning. #' -#' @rdname defaultNamespaceDelete +#' @rdname delete_default_namespace +#' @alias defaultNamespaceDelete #' @export -defaultNamespaceDelete <- function(client, etag = NULL) { +delete_default_namespace <- function(client, etag = NULL) { query <- list(etag = etag) client$do("DELETE", "/api/2.0/settings/types/default_namespace_ws/names/default", query = query) } + +#' @rdname delete_default_namespace +#' @export +defaultNamespaceDelete <- delete_default_namespace #' Get the default namespace setting. #' #' Gets the default namespace setting. @@ -28,13 +33,18 @@ defaultNamespaceDelete <- function(client, etag = NULL) { #' #' @param etag etag used for versioning. #' -#' @rdname defaultNamespaceGet +#' @rdname get_default_namespace +#' @alias defaultNamespaceGet #' @export -defaultNamespaceGet <- function(client, etag = NULL) { +get_default_namespace <- function(client, etag = NULL) { query <- list(etag = etag) client$do("GET", "/api/2.0/settings/types/default_namespace_ws/names/default", query = query) } + +#' @rdname get_default_namespace +#' @export +defaultNamespaceGet <- get_default_namespace #' Update the default namespace setting. #' #' Updates the default namespace setting for the workspace. A fresh etag needs @@ -50,13 +60,18 @@ defaultNamespaceGet <- function(client, etag = NULL) { #' @param field_mask Required. Field mask is required to be passed into the PATCH request. #' @param setting Required. This represents the setting configuration for the default namespace in the Databricks workspace. #' -#' @rdname defaultNamespaceUpdate +#' @rdname update_default_namespace +#' @alias defaultNamespaceUpdate #' @export -defaultNamespaceUpdate <- function(client, allow_missing, setting, field_mask) { +update_default_namespace <- function(client, allow_missing, setting, field_mask) { body <- list(allow_missing = allow_missing, field_mask = field_mask, setting = setting) client$do("PATCH", "/api/2.0/settings/types/default_namespace_ws/names/default", body = body) } +#' @rdname update_default_namespace +#' @export +defaultNamespaceUpdate <- update_default_namespace + diff --git a/R/esm_enablement.R b/R/esm_enablement.R index 7827429f..62f95c58 100755 --- a/R/esm_enablement.R +++ b/R/esm_enablement.R @@ -10,13 +10,18 @@ NULL #' #' @param etag etag used for versioning. #' -#' @rdname esmEnablementGet +#' @rdname get_esm_enablement +#' @alias esmEnablementGet #' @export -esmEnablementGet <- function(client, etag = NULL) { +get_esm_enablement <- function(client, etag = NULL) { query <- list(etag = etag) client$do("GET", "/api/2.0/settings/types/shield_esm_enablement_ws_db/names/default", query = query) } + +#' @rdname get_esm_enablement +#' @export +esmEnablementGet <- get_esm_enablement #' Update the enhanced security monitoring setting. #' #' Updates the enhanced security monitoring setting for the workspace. A fresh @@ -30,12 +35,17 @@ esmEnablementGet <- function(client, etag = NULL) { #' @param field_mask Required. Field mask is required to be passed into the PATCH request. #' @param setting Required. This field has no description yet. #' -#' @rdname esmEnablementUpdate +#' @rdname update_esm_enablement +#' @alias esmEnablementUpdate #' @export -esmEnablementUpdate <- function(client, allow_missing, setting, field_mask) { +update_esm_enablement <- function(client, allow_missing, setting, field_mask) { body <- list(allow_missing = allow_missing, field_mask = field_mask, setting = setting) client$do("PATCH", "/api/2.0/settings/types/shield_esm_enablement_ws_db/names/default", body = body) } +#' @rdname update_esm_enablement +#' @export +esmEnablementUpdate <- update_esm_enablement + diff --git a/R/experiments.R b/R/experiments.R index 83200af1..fbfe7acf 100755 --- a/R/experiments.R +++ b/R/experiments.R @@ -17,12 +17,17 @@ NULL #' @param name Required. Experiment name. #' @param tags A collection of tags to set on the experiment. #' -#' @rdname experimentsCreateExperiment +#' @rdname create_experiment +#' @alias experimentsCreateExperiment #' @export -experimentsCreateExperiment <- function(client, name, artifact_location = NULL, tags = NULL) { +create_experiment <- function(client, name, artifact_location = NULL, tags = NULL) { body <- list(artifact_location = artifact_location, name = name, tags = tags) client$do("POST", "/api/2.0/mlflow/experiments/create", body = body) } + +#' @rdname create_experiment +#' @export +experimentsCreateExperiment <- create_experiment #' Create a run. #' #' Creates a new run within an experiment. A run is usually a single execution @@ -36,14 +41,19 @@ experimentsCreateExperiment <- function(client, name, artifact_location = NULL, #' @param tags Additional metadata for run. #' @param user_id ID of the user executing the run. #' -#' @rdname experimentsCreateRun +#' @rdname create_experiment_run +#' @alias experimentsCreateRun #' @export -experimentsCreateRun <- function(client, experiment_id = NULL, start_time = NULL, +create_experiment_run <- function(client, experiment_id = NULL, start_time = NULL, tags = NULL, user_id = NULL) { body <- list(experiment_id = experiment_id, start_time = start_time, tags = tags, user_id = user_id) client$do("POST", "/api/2.0/mlflow/runs/create", body = body) } + +#' @rdname create_experiment_run +#' @export +experimentsCreateRun <- create_experiment_run #' Delete an experiment. #' #' Marks an experiment and associated metadata, runs, metrics, params, and tags @@ -53,12 +63,17 @@ experimentsCreateRun <- function(client, experiment_id = NULL, start_time = NULL #' #' @param experiment_id Required. ID of the associated experiment. #' -#' @rdname experimentsDeleteExperiment +#' @rdname delete_experiment +#' @alias experimentsDeleteExperiment #' @export -experimentsDeleteExperiment <- function(client, experiment_id) { +delete_experiment <- function(client, experiment_id) { body <- list(experiment_id = experiment_id) client$do("POST", "/api/2.0/mlflow/experiments/delete", body = body) } + +#' @rdname delete_experiment +#' @export +experimentsDeleteExperiment <- delete_experiment #' Delete a run. #' #' Marks a run for deletion. @@ -66,12 +81,17 @@ experimentsDeleteExperiment <- function(client, experiment_id) { #' #' @param run_id Required. ID of the run to delete. #' -#' @rdname experimentsDeleteRun +#' @rdname delete_experiment_run +#' @alias experimentsDeleteRun #' @export -experimentsDeleteRun <- function(client, run_id) { +delete_experiment_run <- function(client, run_id) { body <- list(run_id = run_id) client$do("POST", "/api/2.0/mlflow/runs/delete", body = body) } + +#' @rdname delete_experiment_run +#' @export +experimentsDeleteRun <- delete_experiment_run #' Delete runs by creation time. #' #' Bulk delete runs in an experiment that were created prior to or at the @@ -84,12 +104,17 @@ experimentsDeleteRun <- function(client, run_id) { #' @param max_runs An optional positive integer indicating the maximum number of runs to delete. #' @param max_timestamp_millis Required. The maximum creation timestamp in milliseconds since the UNIX epoch for deleting runs. #' -#' @rdname experimentsDeleteRuns +#' @rdname delete_experiment_runs +#' @alias experimentsDeleteRuns #' @export -experimentsDeleteRuns <- function(client, experiment_id, max_timestamp_millis, max_runs = NULL) { +delete_experiment_runs <- function(client, experiment_id, max_timestamp_millis, max_runs = NULL) { body <- list(experiment_id = experiment_id, max_runs = max_runs, max_timestamp_millis = max_timestamp_millis) client$do("POST", "/api/2.0/mlflow/databricks/runs/delete-runs", body = body) } + +#' @rdname delete_experiment_runs +#' @export +experimentsDeleteRuns <- delete_experiment_runs #' Delete a tag. #' #' Deletes a tag on a run. Tags are run metadata that can be updated during a @@ -99,12 +124,17 @@ experimentsDeleteRuns <- function(client, experiment_id, max_timestamp_millis, m #' @param key Required. Name of the tag. #' @param run_id Required. ID of the run that the tag was logged under. #' -#' @rdname experimentsDeleteTag +#' @rdname delete_experiment_tag +#' @alias experimentsDeleteTag #' @export -experimentsDeleteTag <- function(client, run_id, key) { +delete_experiment_tag <- function(client, run_id, key) { body <- list(key = key, run_id = run_id) client$do("POST", "/api/2.0/mlflow/runs/delete-tag", body = body) } + +#' @rdname delete_experiment_tag +#' @export +experimentsDeleteTag <- delete_experiment_tag #' Get metadata. #' #' Gets metadata for an experiment. @@ -120,12 +150,17 @@ experimentsDeleteTag <- function(client, run_id, key) { #' #' @param experiment_name Required. Name of the associated experiment. #' -#' @rdname experimentsGetByName +#' @rdname get_experiment_by_name +#' @alias experimentsGetByName #' @export -experimentsGetByName <- function(client, experiment_name) { +get_experiment_by_name <- function(client, experiment_name) { query <- list(experiment_name = experiment_name) client$do("GET", "/api/2.0/mlflow/experiments/get-by-name", query = query) } + +#' @rdname get_experiment_by_name +#' @export +experimentsGetByName <- get_experiment_by_name #' Get an experiment. #' #' Gets metadata for an experiment. This method works on deleted experiments. @@ -133,12 +168,17 @@ experimentsGetByName <- function(client, experiment_name) { #' #' @param experiment_id Required. ID of the associated experiment. #' -#' @rdname experimentsGetExperiment +#' @rdname get_experiment +#' @alias experimentsGetExperiment #' @export -experimentsGetExperiment <- function(client, experiment_id) { +get_experiment <- function(client, experiment_id) { query <- list(experiment_id = experiment_id) client$do("GET", "/api/2.0/mlflow/experiments/get", query = query) } + +#' @rdname get_experiment +#' @export +experimentsGetExperiment <- get_experiment #' Get history of a given metric within a run. #' #' Gets a list of all values for the specified metric for a given run. @@ -152,9 +192,10 @@ experimentsGetExperiment <- function(client, experiment_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname experimentsGetHistory +#' @rdname get_experiment_history +#' @alias experimentsGetHistory #' @export -experimentsGetHistory <- function(client, metric_key, max_results = NULL, page_token = NULL, +get_experiment_history <- function(client, metric_key, max_results = NULL, page_token = NULL, run_id = NULL, run_uuid = NULL) { query <- list(max_results = max_results, metric_key = metric_key, page_token = page_token, run_id = run_id, run_uuid = run_uuid) @@ -175,6 +216,10 @@ experimentsGetHistory <- function(client, metric_key, max_results = NULL, page_t return(results) } + +#' @rdname get_experiment_history +#' @export +experimentsGetHistory <- get_experiment_history #' Get experiment permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -182,13 +227,18 @@ experimentsGetHistory <- function(client, metric_key, max_results = NULL, page_t #' #' @param experiment_id Required. The experiment for which to get or manage permissions. #' -#' @rdname experimentsGetPermissionLevels +#' @rdname get_experiment_permission_levels +#' @alias experimentsGetPermissionLevels #' @export -experimentsGetPermissionLevels <- function(client, experiment_id) { +get_experiment_permission_levels <- function(client, experiment_id) { client$do("GET", paste("/api/2.0/permissions/experiments/", experiment_id, "/permissionLevels", , sep = "")) } + +#' @rdname get_experiment_permission_levels +#' @export +experimentsGetPermissionLevels <- get_experiment_permission_levels #' Get experiment permissions. #' #' Gets the permissions of an experiment. Experiments can inherit permissions @@ -197,12 +247,17 @@ experimentsGetPermissionLevels <- function(client, experiment_id) { #' #' @param experiment_id Required. The experiment for which to get or manage permissions. #' -#' @rdname experimentsGetPermissions +#' @rdname get_experiment_permissions +#' @alias experimentsGetPermissions #' @export -experimentsGetPermissions <- function(client, experiment_id) { +get_experiment_permissions <- function(client, experiment_id) { client$do("GET", paste("/api/2.0/permissions/experiments/", experiment_id, sep = "")) } + +#' @rdname get_experiment_permissions +#' @export +experimentsGetPermissions <- get_experiment_permissions #' Get a run. #' #' Gets the metadata, metrics, params, and tags for a run. In the case where @@ -216,12 +271,17 @@ experimentsGetPermissions <- function(client, experiment_id) { #' @param run_id Required. ID of the run to fetch. #' @param run_uuid Deprecated, use run_id instead. ID of the run to fetch. #' -#' @rdname experimentsGetRun +#' @rdname get_experiment_run +#' @alias experimentsGetRun #' @export -experimentsGetRun <- function(client, run_id, run_uuid = NULL) { +get_experiment_run <- function(client, run_id, run_uuid = NULL) { query <- list(run_id = run_id, run_uuid = run_uuid) client$do("GET", "/api/2.0/mlflow/runs/get", query = query) } + +#' @rdname get_experiment_run +#' @export +experimentsGetRun <- get_experiment_run #' Get all artifacts. #' #' List artifacts for a run. Takes an optional `artifact_path` prefix. If it is @@ -235,9 +295,10 @@ experimentsGetRun <- function(client, run_id, run_uuid = NULL) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname experimentsListArtifacts +#' @rdname list_experiment_artifacts +#' @alias experimentsListArtifacts #' @export -experimentsListArtifacts <- function(client, page_token = NULL, path = NULL, run_id = NULL, +list_experiment_artifacts <- function(client, page_token = NULL, path = NULL, run_id = NULL, run_uuid = NULL) { query <- list(page_token = page_token, path = path, run_id = run_id, run_uuid = run_uuid) @@ -257,6 +318,10 @@ experimentsListArtifacts <- function(client, page_token = NULL, path = NULL, run return(results) } + +#' @rdname list_experiment_artifacts +#' @export +experimentsListArtifacts <- list_experiment_artifacts #' List experiments. #' #' Gets a list of all experiments. @@ -268,9 +333,10 @@ experimentsListArtifacts <- function(client, page_token = NULL, path = NULL, run #' #' @return `data.frame` with all of the response pages. #' -#' @rdname experimentsListExperiments +#' @rdname list_experiment_experiments +#' @alias experimentsListExperiments #' @export -experimentsListExperiments <- function(client, max_results = NULL, page_token = NULL, +list_experiment_experiments <- function(client, max_results = NULL, page_token = NULL, view_type = NULL) { query <- list(max_results = max_results, page_token = page_token, view_type = view_type) @@ -290,6 +356,10 @@ experimentsListExperiments <- function(client, max_results = NULL, page_token = return(results) } + +#' @rdname list_experiment_experiments +#' @export +experimentsListExperiments <- list_experiment_experiments #' Log a batch. #' #' Logs a batch of metrics, params, and tags for a run. If any data failed to be @@ -336,13 +406,18 @@ experimentsListExperiments <- function(client, max_results = NULL, page_token = #' @param run_id ID of the run to log under. #' @param tags Tags to log. #' -#' @rdname experimentsLogBatch +#' @rdname log_experiment_batch +#' @alias experimentsLogBatch #' @export -experimentsLogBatch <- function(client, metrics = NULL, params = NULL, run_id = NULL, +log_experiment_batch <- function(client, metrics = NULL, params = NULL, run_id = NULL, tags = NULL) { body <- list(metrics = metrics, params = params, run_id = run_id, tags = tags) client$do("POST", "/api/2.0/mlflow/runs/log-batch", body = body) } + +#' @rdname log_experiment_batch +#' @export +experimentsLogBatch <- log_experiment_batch #' Log inputs to a run. #' #' **NOTE:** Experimental: This API may change or be removed in a future release @@ -352,12 +427,17 @@ experimentsLogBatch <- function(client, metrics = NULL, params = NULL, run_id = #' @param datasets Dataset inputs. #' @param run_id ID of the run to log under. #' -#' @rdname experimentsLogInputs +#' @rdname log_experiment_inputs +#' @alias experimentsLogInputs #' @export -experimentsLogInputs <- function(client, datasets = NULL, run_id = NULL) { +log_experiment_inputs <- function(client, datasets = NULL, run_id = NULL) { body <- list(datasets = datasets, run_id = run_id) client$do("POST", "/api/2.0/mlflow/runs/log-inputs", body = body) } + +#' @rdname log_experiment_inputs +#' @export +experimentsLogInputs <- log_experiment_inputs #' Log a metric. #' #' Logs a metric for a run. A metric is a key-value pair (string key, float @@ -372,14 +452,19 @@ experimentsLogInputs <- function(client, datasets = NULL, run_id = NULL) { #' @param timestamp Required. Unix timestamp in milliseconds at the time metric was logged. #' @param value Required. Double value of the metric being logged. #' -#' @rdname experimentsLogMetric +#' @rdname log_experiment_metric +#' @alias experimentsLogMetric #' @export -experimentsLogMetric <- function(client, key, value, timestamp, run_id = NULL, run_uuid = NULL, +log_experiment_metric <- function(client, key, value, timestamp, run_id = NULL, run_uuid = NULL, step = NULL) { body <- list(key = key, run_id = run_id, run_uuid = run_uuid, step = step, timestamp = timestamp, value = value) client$do("POST", "/api/2.0/mlflow/runs/log-metric", body = body) } + +#' @rdname log_experiment_metric +#' @export +experimentsLogMetric <- log_experiment_metric #' Log a model. #' #' **NOTE:** Experimental: This API may change or be removed in a future release @@ -389,12 +474,17 @@ experimentsLogMetric <- function(client, key, value, timestamp, run_id = NULL, r #' @param model_json MLmodel file in json format. #' @param run_id ID of the run to log under. #' -#' @rdname experimentsLogModel +#' @rdname log_experiment_model +#' @alias experimentsLogModel #' @export -experimentsLogModel <- function(client, model_json = NULL, run_id = NULL) { +log_experiment_model <- function(client, model_json = NULL, run_id = NULL) { body <- list(model_json = model_json, run_id = run_id) client$do("POST", "/api/2.0/mlflow/runs/log-model", body = body) } + +#' @rdname log_experiment_model +#' @export +experimentsLogModel <- log_experiment_model #' Log a param. #' #' Logs a param used for a run. A param is a key-value pair (string key, string @@ -408,12 +498,17 @@ experimentsLogModel <- function(client, model_json = NULL, run_id = NULL) { #' @param run_uuid Deprecated, use run_id instead. ID of the run under which to log the param. #' @param value Required. String value of the param being logged. #' -#' @rdname experimentsLogParam +#' @rdname log_experiment_param +#' @alias experimentsLogParam #' @export -experimentsLogParam <- function(client, key, value, run_id = NULL, run_uuid = NULL) { +log_experiment_param <- function(client, key, value, run_id = NULL, run_uuid = NULL) { body <- list(key = key, run_id = run_id, run_uuid = run_uuid, value = value) client$do("POST", "/api/2.0/mlflow/runs/log-parameter", body = body) } + +#' @rdname log_experiment_param +#' @export +experimentsLogParam <- log_experiment_param #' Restores an experiment. #' #' Restore an experiment marked for deletion. This also restores associated @@ -426,12 +521,17 @@ experimentsLogParam <- function(client, key, value, run_id = NULL, run_uuid = NU #' #' @param experiment_id Required. ID of the associated experiment. #' -#' @rdname experimentsRestoreExperiment +#' @rdname restore_experiment +#' @alias experimentsRestoreExperiment #' @export -experimentsRestoreExperiment <- function(client, experiment_id) { +restore_experiment <- function(client, experiment_id) { body <- list(experiment_id = experiment_id) client$do("POST", "/api/2.0/mlflow/experiments/restore", body = body) } + +#' @rdname restore_experiment +#' @export +experimentsRestoreExperiment <- restore_experiment #' Restore a run. #' #' Restores a deleted run. @@ -439,12 +539,17 @@ experimentsRestoreExperiment <- function(client, experiment_id) { #' #' @param run_id Required. ID of the run to restore. #' -#' @rdname experimentsRestoreRun +#' @rdname restore_experiment_run +#' @alias experimentsRestoreRun #' @export -experimentsRestoreRun <- function(client, run_id) { +restore_experiment_run <- function(client, run_id) { body <- list(run_id = run_id) client$do("POST", "/api/2.0/mlflow/runs/restore", body = body) } + +#' @rdname restore_experiment_run +#' @export +experimentsRestoreRun <- restore_experiment_run #' Restore runs by deletion time. #' #' Bulk restore runs in an experiment that were deleted no earlier than the @@ -457,12 +562,18 @@ experimentsRestoreRun <- function(client, run_id) { #' @param max_runs An optional positive integer indicating the maximum number of runs to restore. #' @param min_timestamp_millis Required. The minimum deletion timestamp in milliseconds since the UNIX epoch for restoring runs. #' -#' @rdname experimentsRestoreRuns +#' @rdname restore_experiment_runs +#' @alias experimentsRestoreRuns #' @export -experimentsRestoreRuns <- function(client, experiment_id, min_timestamp_millis, max_runs = NULL) { +restore_experiment_runs <- function(client, experiment_id, min_timestamp_millis, + max_runs = NULL) { body <- list(experiment_id = experiment_id, max_runs = max_runs, min_timestamp_millis = min_timestamp_millis) client$do("POST", "/api/2.0/mlflow/databricks/runs/restore-runs", body = body) } + +#' @rdname restore_experiment_runs +#' @export +experimentsRestoreRuns <- restore_experiment_runs #' Search experiments. #' #' Searches for experiments that satisfy specified search criteria. @@ -476,9 +587,10 @@ experimentsRestoreRuns <- function(client, experiment_id, min_timestamp_millis, #' #' @return `data.frame` with all of the response pages. #' -#' @rdname experimentsSearchExperiments +#' @rdname search_experiment_experiments +#' @alias experimentsSearchExperiments #' @export -experimentsSearchExperiments <- function(client, filter = NULL, max_results = NULL, +search_experiment_experiments <- function(client, filter = NULL, max_results = NULL, order_by = NULL, page_token = NULL, view_type = NULL) { body <- list(filter = filter, max_results = max_results, order_by = order_by, page_token = page_token, view_type = view_type) @@ -499,6 +611,10 @@ experimentsSearchExperiments <- function(client, filter = NULL, max_results = NU return(results) } + +#' @rdname search_experiment_experiments +#' @export +experimentsSearchExperiments <- search_experiment_experiments #' Search for runs. #' #' Searches for runs that satisfy expressions. @@ -515,10 +631,11 @@ experimentsSearchExperiments <- function(client, filter = NULL, max_results = NU #' #' @return `data.frame` with all of the response pages. #' -#' @rdname experimentsSearchRuns +#' @rdname search_experiment_runs +#' @alias experimentsSearchRuns #' @export -experimentsSearchRuns <- function(client, experiment_ids = NULL, filter = NULL, max_results = NULL, - order_by = NULL, page_token = NULL, run_view_type = NULL) { +search_experiment_runs <- function(client, experiment_ids = NULL, filter = NULL, + max_results = NULL, order_by = NULL, page_token = NULL, run_view_type = NULL) { body <- list(experiment_ids = experiment_ids, filter = filter, max_results = max_results, order_by = order_by, page_token = page_token, run_view_type = run_view_type) @@ -538,6 +655,10 @@ experimentsSearchRuns <- function(client, experiment_ids = NULL, filter = NULL, return(results) } + +#' @rdname search_experiment_runs +#' @export +experimentsSearchRuns <- search_experiment_runs #' Set a tag. #' #' Sets a tag on an experiment. Experiment tags are metadata that can be @@ -548,12 +669,17 @@ experimentsSearchRuns <- function(client, experiment_ids = NULL, filter = NULL, #' @param key Required. Name of the tag. #' @param value Required. String value of the tag being logged. #' -#' @rdname experimentsSetExperimentTag +#' @rdname set_experiment_tag +#' @alias experimentsSetExperimentTag #' @export -experimentsSetExperimentTag <- function(client, experiment_id, key, value) { +set_experiment_tag <- function(client, experiment_id, key, value) { body <- list(experiment_id = experiment_id, key = key, value = value) client$do("POST", "/api/2.0/mlflow/experiments/set-experiment-tag", body = body) } + +#' @rdname set_experiment_tag +#' @export +experimentsSetExperimentTag <- set_experiment_tag #' Set experiment permissions. #' #' Sets permissions on an experiment. Experiments can inherit permissions from @@ -563,13 +689,18 @@ experimentsSetExperimentTag <- function(client, experiment_id, key, value) { #' @param access_control_list This field has no description yet. #' @param experiment_id Required. The experiment for which to get or manage permissions. #' -#' @rdname experimentsSetPermissions +#' @rdname set_experiment_permissions +#' @alias experimentsSetPermissions #' @export -experimentsSetPermissions <- function(client, experiment_id, access_control_list = NULL) { +set_experiment_permissions <- function(client, experiment_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", paste("/api/2.0/permissions/experiments/", experiment_id, sep = ""), body = body) } + +#' @rdname set_experiment_permissions +#' @export +experimentsSetPermissions <- set_experiment_permissions #' Set a tag. #' #' Sets a tag on a run. Tags are run metadata that can be updated during a run @@ -581,12 +712,17 @@ experimentsSetPermissions <- function(client, experiment_id, access_control_list #' @param run_uuid Deprecated, use run_id instead. ID of the run under which to log the tag. #' @param value Required. String value of the tag being logged. #' -#' @rdname experimentsSetTag +#' @rdname set_experiment_tag +#' @alias experimentsSetTag #' @export -experimentsSetTag <- function(client, key, value, run_id = NULL, run_uuid = NULL) { +set_experiment_tag <- function(client, key, value, run_id = NULL, run_uuid = NULL) { body <- list(key = key, run_id = run_id, run_uuid = run_uuid, value = value) client$do("POST", "/api/2.0/mlflow/runs/set-tag", body = body) } + +#' @rdname set_experiment_tag +#' @export +experimentsSetTag <- set_experiment_tag #' Update an experiment. #' #' Updates experiment metadata. @@ -595,12 +731,17 @@ experimentsSetTag <- function(client, key, value, run_id = NULL, run_uuid = NULL #' @param experiment_id Required. ID of the associated experiment. #' @param new_name If provided, the experiment's name is changed to the new name. #' -#' @rdname experimentsUpdateExperiment +#' @rdname update_experiment +#' @alias experimentsUpdateExperiment #' @export -experimentsUpdateExperiment <- function(client, experiment_id, new_name = NULL) { +update_experiment <- function(client, experiment_id, new_name = NULL) { body <- list(experiment_id = experiment_id, new_name = new_name) client$do("POST", "/api/2.0/mlflow/experiments/update", body = body) } + +#' @rdname update_experiment +#' @export +experimentsUpdateExperiment <- update_experiment #' Update experiment permissions. #' #' Updates the permissions on an experiment. Experiments can inherit permissions @@ -610,13 +751,18 @@ experimentsUpdateExperiment <- function(client, experiment_id, new_name = NULL) #' @param access_control_list This field has no description yet. #' @param experiment_id Required. The experiment for which to get or manage permissions. #' -#' @rdname experimentsUpdatePermissions +#' @rdname update_experiment_permissions +#' @alias experimentsUpdatePermissions #' @export -experimentsUpdatePermissions <- function(client, experiment_id, access_control_list = NULL) { +update_experiment_permissions <- function(client, experiment_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", paste("/api/2.0/permissions/experiments/", experiment_id, sep = ""), body = body) } + +#' @rdname update_experiment_permissions +#' @export +experimentsUpdatePermissions <- update_experiment_permissions #' Update a run. #' #' Updates run metadata. @@ -627,14 +773,19 @@ experimentsUpdatePermissions <- function(client, experiment_id, access_control_l #' @param run_uuid Deprecated, use run_id instead. ID of the run to update. #' @param status Updated status of the run. #' -#' @rdname experimentsUpdateRun +#' @rdname update_experiment_run +#' @alias experimentsUpdateRun #' @export -experimentsUpdateRun <- function(client, end_time = NULL, run_id = NULL, run_uuid = NULL, +update_experiment_run <- function(client, end_time = NULL, run_id = NULL, run_uuid = NULL, status = NULL) { body <- list(end_time = end_time, run_id = run_id, run_uuid = run_uuid, status = status) client$do("POST", "/api/2.0/mlflow/runs/update", body = body) } +#' @rdname update_experiment_run +#' @export +experimentsUpdateRun <- update_experiment_run + diff --git a/R/external_locations.R b/R/external_locations.R index 7838d4c9..64382813 100755 --- a/R/external_locations.R +++ b/R/external_locations.R @@ -19,15 +19,20 @@ NULL #' @param skip_validation Skips validation of the storage credential associated with the external location. #' @param url Required. Path URL of the external location. #' -#' @rdname externalLocationsCreate +#' @rdname create_external_location +#' @alias externalLocationsCreate #' @export -externalLocationsCreate <- function(client, name, url, credential_name, access_point = NULL, +create_external_location <- function(client, name, url, credential_name, access_point = NULL, comment = NULL, encryption_details = NULL, read_only = NULL, skip_validation = NULL) { body <- list(access_point = access_point, comment = comment, credential_name = credential_name, encryption_details = encryption_details, name = name, read_only = read_only, skip_validation = skip_validation, url = url) client$do("POST", "/api/2.1/unity-catalog/external-locations", body = body) } + +#' @rdname create_external_location +#' @export +externalLocationsCreate <- create_external_location #' Delete an external location. #' #' Deletes the specified external location from the metastore. The caller must @@ -37,13 +42,18 @@ externalLocationsCreate <- function(client, name, url, credential_name, access_p #' @param force Force deletion even if there are dependent external tables or mounts. #' @param name Required. Name of the external location. #' -#' @rdname externalLocationsDelete +#' @rdname delete_external_location +#' @alias externalLocationsDelete #' @export -externalLocationsDelete <- function(client, name, force = NULL) { +delete_external_location <- function(client, name, force = NULL) { query <- list(force = force) client$do("DELETE", paste("/api/2.1/unity-catalog/external-locations/", name, sep = ""), query = query) } + +#' @rdname delete_external_location +#' @export +externalLocationsDelete <- delete_external_location #' Get an external location. #' #' Gets an external location from the metastore. The caller must be either a @@ -54,13 +64,18 @@ externalLocationsDelete <- function(client, name, force = NULL) { #' @param include_browse Whether to include external locations in the response for which the principal can only access selective metadata for. #' @param name Required. Name of the external location. #' -#' @rdname externalLocationsGet +#' @rdname get_external_location +#' @alias externalLocationsGet #' @export -externalLocationsGet <- function(client, name, include_browse = NULL) { +get_external_location <- function(client, name, include_browse = NULL) { query <- list(include_browse = include_browse) client$do("GET", paste("/api/2.1/unity-catalog/external-locations/", name, sep = ""), query = query) } + +#' @rdname get_external_location +#' @export +externalLocationsGet <- get_external_location #' List external locations. #' #' Gets an array of external locations (__ExternalLocationInfo__ objects) from @@ -76,9 +91,10 @@ externalLocationsGet <- function(client, name, include_browse = NULL) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname externalLocationsList +#' @rdname list_external_locations +#' @alias externalLocationsList #' @export -externalLocationsList <- function(client, include_browse = NULL, max_results = NULL, +list_external_locations <- function(client, include_browse = NULL, max_results = NULL, page_token = NULL) { query <- list(include_browse = include_browse, max_results = max_results, page_token = page_token) @@ -98,6 +114,10 @@ externalLocationsList <- function(client, include_browse = NULL, max_results = N return(results) } + +#' @rdname list_external_locations +#' @export +externalLocationsList <- list_external_locations #' Update an external location. #' #' Updates an external location in the metastore. The caller must be the owner @@ -117,9 +137,10 @@ externalLocationsList <- function(client, include_browse = NULL, max_results = N #' @param skip_validation Skips validation of the storage credential associated with the external location. #' @param url Path URL of the external location. #' -#' @rdname externalLocationsUpdate +#' @rdname update_external_location +#' @alias externalLocationsUpdate #' @export -externalLocationsUpdate <- function(client, name, access_point = NULL, comment = NULL, +update_external_location <- function(client, name, access_point = NULL, comment = NULL, credential_name = NULL, encryption_details = NULL, force = NULL, new_name = NULL, owner = NULL, read_only = NULL, skip_validation = NULL, url = NULL) { body <- list(access_point = access_point, comment = comment, credential_name = credential_name, @@ -130,6 +151,10 @@ externalLocationsUpdate <- function(client, name, access_point = NULL, comment = sep = ""), body = body) } +#' @rdname update_external_location +#' @export +externalLocationsUpdate <- update_external_location + diff --git a/R/files.R b/R/files.R index 021b1b22..10fbb009 100755 --- a/R/files.R +++ b/R/files.R @@ -13,12 +13,17 @@ NULL #' #' @param directory_path Required. The absolute path of a directory. #' -#' @rdname filesCreateDirectory +#' @rdname create_file_directory +#' @alias filesCreateDirectory #' @export -filesCreateDirectory <- function(client, directory_path) { +create_file_directory <- function(client, directory_path) { client$do("PUT", paste("/api/2.0/fs/directories", directory_path, sep = "")) } + +#' @rdname create_file_directory +#' @export +filesCreateDirectory <- create_file_directory #' Delete a file. #' #' Deletes a file. If the request is successful, there is no response body. @@ -26,12 +31,17 @@ filesCreateDirectory <- function(client, directory_path) { #' #' @param file_path Required. The absolute path of the file. #' -#' @rdname filesDelete +#' @rdname delete_file +#' @alias filesDelete #' @export -filesDelete <- function(client, file_path) { +delete_file <- function(client, file_path) { client$do("DELETE", paste("/api/2.0/fs/files", file_path, sep = "")) } + +#' @rdname delete_file +#' @export +filesDelete <- delete_file #' Delete a directory. #' #' Deletes an empty directory. @@ -43,12 +53,17 @@ filesDelete <- function(client, file_path) { #' #' @param directory_path Required. The absolute path of a directory. #' -#' @rdname filesDeleteDirectory +#' @rdname delete_file_directory +#' @alias filesDeleteDirectory #' @export -filesDeleteDirectory <- function(client, directory_path) { +delete_file_directory <- function(client, directory_path) { client$do("DELETE", paste("/api/2.0/fs/directories", directory_path, sep = "")) } + +#' @rdname delete_file_directory +#' @export +filesDeleteDirectory <- delete_file_directory #' Download a file. #' #' Downloads a file of up to 5 GiB. The file contents are the response body. @@ -57,12 +72,17 @@ filesDeleteDirectory <- function(client, directory_path) { #' #' @param file_path Required. The absolute path of the file. #' -#' @rdname filesDownload +#' @rdname download_file +#' @alias filesDownload #' @export -filesDownload <- function(client, file_path) { +download_file <- function(client, file_path) { client$do("GET", paste("/api/2.0/fs/files", file_path, sep = "")) } + +#' @rdname download_file +#' @export +filesDownload <- download_file #' Get directory metadata. #' #' Get the metadata of a directory. The response HTTP headers contain the @@ -78,12 +98,17 @@ filesDownload <- function(client, file_path) { #' #' @param directory_path Required. The absolute path of a directory. #' -#' @rdname filesGetDirectoryMetadata +#' @rdname get_file_directory_metadata +#' @alias filesGetDirectoryMetadata #' @export -filesGetDirectoryMetadata <- function(client, directory_path) { +get_file_directory_metadata <- function(client, directory_path) { client$do("HEAD", paste("/api/2.0/fs/directories", directory_path, sep = "")) } + +#' @rdname get_file_directory_metadata +#' @export +filesGetDirectoryMetadata <- get_file_directory_metadata #' Get file metadata. #' #' Get the metadata of a file. The response HTTP headers contain the metadata. @@ -92,12 +117,17 @@ filesGetDirectoryMetadata <- function(client, directory_path) { #' #' @param file_path Required. The absolute path of the file. #' -#' @rdname filesGetMetadata +#' @rdname get_file_metadata +#' @alias filesGetMetadata #' @export -filesGetMetadata <- function(client, file_path) { +get_file_metadata <- function(client, file_path) { client$do("HEAD", paste("/api/2.0/fs/files", file_path, sep = "")) } + +#' @rdname get_file_metadata +#' @export +filesGetMetadata <- get_file_metadata #' List directory contents. #' #' Returns the contents of a directory. If there is no directory at the @@ -110,9 +140,10 @@ filesGetMetadata <- function(client, file_path) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname filesListDirectoryContents +#' @rdname list_file_directory_contents +#' @alias filesListDirectoryContents #' @export -filesListDirectoryContents <- function(client, directory_path, page_size = NULL, +list_file_directory_contents <- function(client, directory_path, page_size = NULL, page_token = NULL) { query <- list(page_size = page_size, page_token = page_token) @@ -133,6 +164,10 @@ filesListDirectoryContents <- function(client, directory_path, page_size = NULL, return(results) } + +#' @rdname list_file_directory_contents +#' @export +filesListDirectoryContents <- list_file_directory_contents #' Upload a file. #' #' Uploads a file of up to 5 GiB. The file contents should be sent as the @@ -146,13 +181,18 @@ filesListDirectoryContents <- function(client, directory_path, page_size = NULL, #' @param file_path Required. The absolute path of the file. #' @param overwrite If true, an existing file will be overwritten. #' -#' @rdname filesUpload +#' @rdname upload_file +#' @alias filesUpload #' @export -filesUpload <- function(client, file_path, contents, overwrite = NULL) { +upload_file <- function(client, file_path, contents, overwrite = NULL) { query <- list(overwrite = overwrite) client$do("PUT", paste("/api/2.0/fs/files", file_path, sep = ""), query = query) } +#' @rdname upload_file +#' @export +filesUpload <- upload_file + diff --git a/R/functions.R b/R/functions.R index 7f738059..fcbc93ad 100755 --- a/R/functions.R +++ b/R/functions.R @@ -14,12 +14,17 @@ NULL #' #' @param function_info Required. Partial __FunctionInfo__ specifying the function to be created. #' -#' @rdname functionsCreate +#' @rdname create_function +#' @alias functionsCreate #' @export -functionsCreate <- function(client, function_info) { +create_function <- function(client, function_info) { body <- list(function_info = function_info) client$do("POST", "/api/2.1/unity-catalog/functions", body = body) } + +#' @rdname create_function +#' @export +functionsCreate <- create_function #' Delete a function. #' #' Deletes the function that matches the supplied name. For the deletion to @@ -34,13 +39,18 @@ functionsCreate <- function(client, function_info) { #' @param force Force deletion even if the function is notempty. #' @param name Required. The fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__). #' -#' @rdname functionsDelete +#' @rdname delete_function +#' @alias functionsDelete #' @export -functionsDelete <- function(client, name, force = NULL) { +delete_function <- function(client, name, force = NULL) { query <- list(force = force) client$do("DELETE", paste("/api/2.1/unity-catalog/functions/", name, sep = ""), query = query) } + +#' @rdname delete_function +#' @export +functionsDelete <- delete_function #' Get a function. #' #' Gets a function from within a parent catalog and schema. For the fetch to @@ -55,13 +65,18 @@ functionsDelete <- function(client, name, force = NULL) { #' @param include_browse Whether to include functions in the response for which the principal can only access selective metadata for. #' @param name Required. The fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__). #' -#' @rdname functionsGet +#' @rdname get_function +#' @alias functionsGet #' @export -functionsGet <- function(client, name, include_browse = NULL) { +get_function <- function(client, name, include_browse = NULL) { query <- list(include_browse = include_browse) client$do("GET", paste("/api/2.1/unity-catalog/functions/", name, sep = ""), query = query) } + +#' @rdname get_function +#' @export +functionsGet <- get_function #' List functions. #' #' List functions within the specified parent catalog and schema. If the user is @@ -81,9 +96,10 @@ functionsGet <- function(client, name, include_browse = NULL) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname functionsList +#' @rdname list_functions +#' @alias functionsList #' @export -functionsList <- function(client, catalog_name, schema_name, include_browse = NULL, +list_functions <- function(client, catalog_name, schema_name, include_browse = NULL, max_results = NULL, page_token = NULL) { query <- list(catalog_name = catalog_name, include_browse = include_browse, max_results = max_results, page_token = page_token, schema_name = schema_name) @@ -104,6 +120,10 @@ functionsList <- function(client, catalog_name, schema_name, include_browse = NU return(results) } + +#' @rdname list_functions +#' @export +functionsList <- list_functions #' Update a function. #' #' Updates the function that matches the supplied name. Only the owner of the @@ -119,14 +139,19 @@ functionsList <- function(client, catalog_name, schema_name, include_browse = NU #' @param name Required. The fully-qualified name of the function (of the form __catalog_name__.__schema_name__.__function__name__). #' @param owner Username of current owner of function. #' -#' @rdname functionsUpdate +#' @rdname update_function +#' @alias functionsUpdate #' @export -functionsUpdate <- function(client, name, owner = NULL) { +update_function <- function(client, name, owner = NULL) { body <- list(owner = owner) client$do("PATCH", paste("/api/2.1/unity-catalog/functions/", name, sep = ""), body = body) } +#' @rdname update_function +#' @export +functionsUpdate <- update_function + diff --git a/R/git_credentials.R b/R/git_credentials.R index d15f9436..b101ed13 100755 --- a/R/git_credentials.R +++ b/R/git_credentials.R @@ -15,12 +15,17 @@ NULL #' @param git_username Git username. #' @param personal_access_token The personal access token used to authenticate to the corresponding Git provider. #' -#' @rdname gitCredentialsCreate +#' @rdname create_git_credential +#' @alias gitCredentialsCreate #' @export -gitCredentialsCreate <- function(client, git_provider, git_username = NULL, personal_access_token = NULL) { +create_git_credential <- function(client, git_provider, git_username = NULL, personal_access_token = NULL) { body <- list(git_provider = git_provider, git_username = git_username, personal_access_token = personal_access_token) client$do("POST", "/api/2.0/git-credentials", body = body) } + +#' @rdname create_git_credential +#' @export +gitCredentialsCreate <- create_git_credential #' Delete a credential. #' #' Deletes the specified Git credential. @@ -28,12 +33,17 @@ gitCredentialsCreate <- function(client, git_provider, git_username = NULL, pers #' #' @param credential_id Required. The ID for the corresponding credential to access. #' -#' @rdname gitCredentialsDelete +#' @rdname delete_git_credential +#' @alias gitCredentialsDelete #' @export -gitCredentialsDelete <- function(client, credential_id) { +delete_git_credential <- function(client, credential_id) { client$do("DELETE", paste("/api/2.0/git-credentials/", credential_id, sep = "")) } + +#' @rdname delete_git_credential +#' @export +gitCredentialsDelete <- delete_git_credential #' Get a credential entry. #' #' Gets the Git credential with the specified credential ID. @@ -41,12 +51,17 @@ gitCredentialsDelete <- function(client, credential_id) { #' #' @param credential_id Required. The ID for the corresponding credential to access. #' -#' @rdname gitCredentialsGet +#' @rdname get_git_credential +#' @alias gitCredentialsGet #' @export -gitCredentialsGet <- function(client, credential_id) { +get_git_credential <- function(client, credential_id) { client$do("GET", paste("/api/2.0/git-credentials/", credential_id, sep = "")) } + +#' @rdname get_git_credential +#' @export +gitCredentialsGet <- get_git_credential #' Get Git credentials. #' #' Lists the calling user's Git credentials. One credential per user is @@ -55,14 +70,19 @@ gitCredentialsGet <- function(client, credential_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname gitCredentialsList +#' @rdname list_git_credentials +#' @alias gitCredentialsList #' @export -gitCredentialsList <- function(client) { +list_git_credentials <- function(client) { json <- client$do("GET", "/api/2.0/git-credentials") return(json$credentials) } + +#' @rdname list_git_credentials +#' @export +gitCredentialsList <- list_git_credentials #' Update a credential. #' #' Updates the specified Git credential. @@ -73,15 +93,20 @@ gitCredentialsList <- function(client) { #' @param git_username Git username. #' @param personal_access_token The personal access token used to authenticate to the corresponding Git provider. #' -#' @rdname gitCredentialsUpdate +#' @rdname update_git_credential +#' @alias gitCredentialsUpdate #' @export -gitCredentialsUpdate <- function(client, credential_id, git_provider = NULL, git_username = NULL, +update_git_credential <- function(client, credential_id, git_provider = NULL, git_username = NULL, personal_access_token = NULL) { body <- list(git_provider = git_provider, git_username = git_username, personal_access_token = personal_access_token) client$do("PATCH", paste("/api/2.0/git-credentials/", credential_id, sep = ""), body = body) } +#' @rdname update_git_credential +#' @export +gitCredentialsUpdate <- update_git_credential + diff --git a/R/global_init_scripts.R b/R/global_init_scripts.R index f2489ebb..ad6aed2f 100755 --- a/R/global_init_scripts.R +++ b/R/global_init_scripts.R @@ -13,12 +13,17 @@ NULL #' @param position The position of a global init script, where 0 represents the first script to run, 1 is the second script to run, in ascending order. #' @param script Required. The Base64-encoded content of the script. #' -#' @rdname globalInitScriptsCreate +#' @rdname create_global_init_script +#' @alias globalInitScriptsCreate #' @export -globalInitScriptsCreate <- function(client, name, script, enabled = NULL, position = NULL) { +create_global_init_script <- function(client, name, script, enabled = NULL, position = NULL) { body <- list(enabled = enabled, name = name, position = position, script = script) client$do("POST", "/api/2.0/global-init-scripts", body = body) } + +#' @rdname create_global_init_script +#' @export +globalInitScriptsCreate <- create_global_init_script #' Delete init script. #' #' Deletes a global init script. @@ -26,12 +31,17 @@ globalInitScriptsCreate <- function(client, name, script, enabled = NULL, positi #' #' @param script_id Required. The ID of the global init script. #' -#' @rdname globalInitScriptsDelete +#' @rdname delete_global_init_script +#' @alias globalInitScriptsDelete #' @export -globalInitScriptsDelete <- function(client, script_id) { +delete_global_init_script <- function(client, script_id) { client$do("DELETE", paste("/api/2.0/global-init-scripts/", script_id, sep = "")) } + +#' @rdname delete_global_init_script +#' @export +globalInitScriptsDelete <- delete_global_init_script #' Get an init script. #' #' Gets all the details of a script, including its Base64-encoded contents. @@ -39,12 +49,17 @@ globalInitScriptsDelete <- function(client, script_id) { #' #' @param script_id Required. The ID of the global init script. #' -#' @rdname globalInitScriptsGet +#' @rdname get_global_init_script +#' @alias globalInitScriptsGet #' @export -globalInitScriptsGet <- function(client, script_id) { +get_global_init_script <- function(client, script_id) { client$do("GET", paste("/api/2.0/global-init-scripts/", script_id, sep = "")) } + +#' @rdname get_global_init_script +#' @export +globalInitScriptsGet <- get_global_init_script #' Get init scripts. #' #' Get a list of all global init scripts for this workspace. This returns all @@ -55,14 +70,19 @@ globalInitScriptsGet <- function(client, script_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname globalInitScriptsList +#' @rdname list_global_init_scripts +#' @alias globalInitScriptsList #' @export -globalInitScriptsList <- function(client) { +list_global_init_scripts <- function(client) { json <- client$do("GET", "/api/2.0/global-init-scripts") return(json$scripts) } + +#' @rdname list_global_init_scripts +#' @export +globalInitScriptsList <- list_global_init_scripts #' Update init script. #' #' Updates a global init script, specifying only the fields to change. All @@ -75,15 +95,20 @@ globalInitScriptsList <- function(client) { #' @param script Required. The Base64-encoded content of the script. #' @param script_id Required. The ID of the global init script. #' -#' @rdname globalInitScriptsUpdate +#' @rdname update_global_init_script +#' @alias globalInitScriptsUpdate #' @export -globalInitScriptsUpdate <- function(client, script_id, name, script, enabled = NULL, +update_global_init_script <- function(client, script_id, name, script, enabled = NULL, position = NULL) { body <- list(enabled = enabled, name = name, position = position, script = script) client$do("PATCH", paste("/api/2.0/global-init-scripts/", script_id, sep = ""), body = body) } +#' @rdname update_global_init_script +#' @export +globalInitScriptsUpdate <- update_global_init_script + diff --git a/R/grants.R b/R/grants.R index c2a9f4d4..29a1b13c 100755 --- a/R/grants.R +++ b/R/grants.R @@ -12,13 +12,18 @@ NULL #' @param principal If provided, only the permissions for the specified principal (user or group) are returned. #' @param securable_type Required. Type of securable. #' -#' @rdname grantsGet +#' @rdname get_grant +#' @alias grantsGet #' @export -grantsGet <- function(client, securable_type, full_name, principal = NULL) { +get_grant <- function(client, securable_type, full_name, principal = NULL) { query <- list(principal = principal) client$do("GET", paste("/api/2.1/unity-catalog/permissions/", securable_type, "/", full_name, sep = ""), query = query) } + +#' @rdname get_grant +#' @export +grantsGet <- get_grant #' Get effective permissions. #' #' Gets the effective permissions for a securable. @@ -28,13 +33,18 @@ grantsGet <- function(client, securable_type, full_name, principal = NULL) { #' @param principal If provided, only the effective permissions for the specified principal (user or group) are returned. #' @param securable_type Required. Type of securable. #' -#' @rdname grantsGetEffective +#' @rdname get_grant_effective +#' @alias grantsGetEffective #' @export -grantsGetEffective <- function(client, securable_type, full_name, principal = NULL) { +get_grant_effective <- function(client, securable_type, full_name, principal = NULL) { query <- list(principal = principal) client$do("GET", paste("/api/2.1/unity-catalog/effective-permissions/", securable_type, "/", full_name, sep = ""), query = query) } + +#' @rdname get_grant_effective +#' @export +grantsGetEffective <- get_grant_effective #' Update permissions. #' #' Updates the permissions for a securable. @@ -44,13 +54,18 @@ grantsGetEffective <- function(client, securable_type, full_name, principal = NU #' @param full_name Required. Full name of securable. #' @param securable_type Required. Type of securable. #' -#' @rdname grantsUpdate +#' @rdname update_grant +#' @alias grantsUpdate #' @export -grantsUpdate <- function(client, securable_type, full_name, changes = NULL) { +update_grant <- function(client, securable_type, full_name, changes = NULL) { body <- list(changes = changes) client$do("PATCH", paste("/api/2.1/unity-catalog/permissions/", securable_type, "/", full_name, sep = ""), body = body) } +#' @rdname update_grant +#' @export +grantsUpdate <- update_grant + diff --git a/R/groups.R b/R/groups.R index aa7153fd..c79ca7c4 100755 --- a/R/groups.R +++ b/R/groups.R @@ -19,15 +19,20 @@ NULL #' @param roles Corresponds to AWS instance profile/arn role. #' @param schemas The schema of the group. #' -#' @rdname groupsCreate +#' @rdname create_group +#' @alias groupsCreate #' @export -groupsCreate <- function(client, display_name = NULL, entitlements = NULL, external_id = NULL, +create_group <- function(client, display_name = NULL, entitlements = NULL, external_id = NULL, groups = NULL, id = NULL, members = NULL, meta = NULL, roles = NULL, schemas = NULL) { body <- list(displayName = display_name, entitlements = entitlements, externalId = external_id, groups = groups, id = id, members = members, meta = meta, roles = roles, schemas = schemas) client$do("POST", "/api/2.0/preview/scim/v2/Groups", body = body) } + +#' @rdname create_group +#' @export +groupsCreate <- create_group #' Delete a group. #' #' Deletes a group from the Databricks workspace. @@ -35,12 +40,17 @@ groupsCreate <- function(client, display_name = NULL, entitlements = NULL, exter #' #' @param id Required. Unique ID for a group in the Databricks workspace. #' -#' @rdname groupsDelete +#' @rdname delete_group +#' @alias groupsDelete #' @export -groupsDelete <- function(client, id) { +delete_group <- function(client, id) { client$do("DELETE", paste("/api/2.0/preview/scim/v2/Groups/", id, sep = "")) } + +#' @rdname delete_group +#' @export +groupsDelete <- delete_group #' Get group details. #' #' Gets the information for a specific group in the Databricks workspace. @@ -48,12 +58,17 @@ groupsDelete <- function(client, id) { #' #' @param id Required. Unique ID for a group in the Databricks workspace. #' -#' @rdname groupsGet +#' @rdname get_group +#' @alias groupsGet #' @export -groupsGet <- function(client, id) { +get_group <- function(client, id) { client$do("GET", paste("/api/2.0/preview/scim/v2/Groups/", id, sep = "")) } + +#' @rdname get_group +#' @export +groupsGet <- get_group #' List group details. #' #' Gets all details of the groups associated with the Databricks workspace. @@ -69,9 +84,10 @@ groupsGet <- function(client, id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname groupsList +#' @rdname list_groups +#' @alias groupsList #' @export -groupsList <- function(client, attributes = NULL, count = NULL, excluded_attributes = NULL, +list_groups <- function(client, attributes = NULL, count = NULL, excluded_attributes = NULL, filter = NULL, sort_by = NULL, sort_order = NULL, start_index = NULL) { query <- list(attributes = attributes, count = count, excludedAttributes = excluded_attributes, filter = filter, sortBy = sort_by, sortOrder = sort_order, startIndex = start_index) @@ -92,6 +108,10 @@ groupsList <- function(client, attributes = NULL, count = NULL, excluded_attribu return(results) } + +#' @rdname list_groups +#' @export +groupsList <- list_groups #' Update group details. #' #' Partially updates the details of a group. @@ -101,12 +121,17 @@ groupsList <- function(client, attributes = NULL, count = NULL, excluded_attribu #' @param operations This field has no description yet. #' @param schemas The schema of the patch request. #' -#' @rdname groupsPatch +#' @rdname patch_group +#' @alias groupsPatch #' @export -groupsPatch <- function(client, id, operations = NULL, schemas = NULL) { +patch_group <- function(client, id, operations = NULL, schemas = NULL) { body <- list(Operations = operations, schemas = schemas) client$do("PATCH", paste("/api/2.0/preview/scim/v2/Groups/", id, sep = ""), body = body) } + +#' @rdname patch_group +#' @export +groupsPatch <- patch_group #' Replace a group. #' #' Updates the details of a group by replacing the entire group entity. @@ -122,9 +147,10 @@ groupsPatch <- function(client, id, operations = NULL, schemas = NULL) { #' @param roles Corresponds to AWS instance profile/arn role. #' @param schemas The schema of the group. #' -#' @rdname groupsUpdate +#' @rdname update_group +#' @alias groupsUpdate #' @export -groupsUpdate <- function(client, id, display_name = NULL, entitlements = NULL, external_id = NULL, +update_group <- function(client, id, display_name = NULL, entitlements = NULL, external_id = NULL, groups = NULL, members = NULL, meta = NULL, roles = NULL, schemas = NULL) { body <- list(displayName = display_name, entitlements = entitlements, externalId = external_id, groups = groups, id = id, members = members, meta = meta, roles = roles, @@ -132,6 +158,10 @@ groupsUpdate <- function(client, id, display_name = NULL, entitlements = NULL, e client$do("PUT", paste("/api/2.0/preview/scim/v2/Groups/", id, sep = ""), body = body) } +#' @rdname update_group +#' @export +groupsUpdate <- update_group + diff --git a/R/instance_pools.R b/R/instance_pools.R index 3faa0af1..30cc802d 100755 --- a/R/instance_pools.R +++ b/R/instance_pools.R @@ -22,9 +22,10 @@ NULL #' @param preloaded_docker_images Custom Docker Image BYOC. #' @param preloaded_spark_versions A list containing at most one preloaded Spark image version for the pool. #' -#' @rdname instancePoolsCreate +#' @rdname create_instance_pool +#' @alias instancePoolsCreate #' @export -instancePoolsCreate <- function(client, instance_pool_name, node_type_id, aws_attributes = NULL, +create_instance_pool <- function(client, instance_pool_name, node_type_id, aws_attributes = NULL, azure_attributes = NULL, custom_tags = NULL, disk_spec = NULL, enable_elastic_disk = NULL, gcp_attributes = NULL, idle_instance_autotermination_minutes = NULL, max_capacity = NULL, min_idle_instances = NULL, preloaded_docker_images = NULL, preloaded_spark_versions = NULL) { @@ -36,6 +37,10 @@ instancePoolsCreate <- function(client, instance_pool_name, node_type_id, aws_at preloaded_spark_versions = preloaded_spark_versions) client$do("POST", "/api/2.0/instance-pools/create", body = body) } + +#' @rdname create_instance_pool +#' @export +instancePoolsCreate <- create_instance_pool #' Delete an instance pool. #' #' Deletes the instance pool permanently. The idle instances in the pool are @@ -44,12 +49,17 @@ instancePoolsCreate <- function(client, instance_pool_name, node_type_id, aws_at #' #' @param instance_pool_id Required. The instance pool to be terminated. #' -#' @rdname instancePoolsDelete +#' @rdname delete_instance_pool +#' @alias instancePoolsDelete #' @export -instancePoolsDelete <- function(client, instance_pool_id) { +delete_instance_pool <- function(client, instance_pool_id) { body <- list(instance_pool_id = instance_pool_id) client$do("POST", "/api/2.0/instance-pools/delete", body = body) } + +#' @rdname delete_instance_pool +#' @export +instancePoolsDelete <- delete_instance_pool #' Edit an existing instance pool. #' #' Modifies the configuration of an existing instance pool. @@ -63,9 +73,10 @@ instancePoolsDelete <- function(client, instance_pool_id) { #' @param min_idle_instances Minimum number of idle instances to keep in the instance pool. #' @param node_type_id Required. This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. #' -#' @rdname instancePoolsEdit +#' @rdname edit_instance_pool +#' @alias instancePoolsEdit #' @export -instancePoolsEdit <- function(client, instance_pool_id, instance_pool_name, node_type_id, +edit_instance_pool <- function(client, instance_pool_id, instance_pool_name, node_type_id, custom_tags = NULL, idle_instance_autotermination_minutes = NULL, max_capacity = NULL, min_idle_instances = NULL) { body <- list(custom_tags = custom_tags, idle_instance_autotermination_minutes = idle_instance_autotermination_minutes, @@ -73,6 +84,10 @@ instancePoolsEdit <- function(client, instance_pool_id, instance_pool_name, node max_capacity = max_capacity, min_idle_instances = min_idle_instances, node_type_id = node_type_id) client$do("POST", "/api/2.0/instance-pools/edit", body = body) } + +#' @rdname edit_instance_pool +#' @export +instancePoolsEdit <- edit_instance_pool #' Get instance pool information. #' #' Retrieve the information for an instance pool based on its identifier. @@ -80,12 +95,17 @@ instancePoolsEdit <- function(client, instance_pool_id, instance_pool_name, node #' #' @param instance_pool_id Required. The canonical unique identifier for the instance pool. #' -#' @rdname instancePoolsGet +#' @rdname get_instance_pool +#' @alias instancePoolsGet #' @export -instancePoolsGet <- function(client, instance_pool_id) { +get_instance_pool <- function(client, instance_pool_id) { query <- list(instance_pool_id = instance_pool_id) client$do("GET", "/api/2.0/instance-pools/get", query = query) } + +#' @rdname get_instance_pool +#' @export +instancePoolsGet <- get_instance_pool #' Get instance pool permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -93,13 +113,18 @@ instancePoolsGet <- function(client, instance_pool_id) { #' #' @param instance_pool_id Required. The instance pool for which to get or manage permissions. #' -#' @rdname instancePoolsGetPermissionLevels +#' @rdname get_instance_pool_permission_levels +#' @alias instancePoolsGetPermissionLevels #' @export -instancePoolsGetPermissionLevels <- function(client, instance_pool_id) { +get_instance_pool_permission_levels <- function(client, instance_pool_id) { client$do("GET", paste("/api/2.0/permissions/instance-pools/", instance_pool_id, "/permissionLevels", , sep = "")) } + +#' @rdname get_instance_pool_permission_levels +#' @export +instancePoolsGetPermissionLevels <- get_instance_pool_permission_levels #' Get instance pool permissions. #' #' Gets the permissions of an instance pool. Instance pools can inherit @@ -108,13 +133,18 @@ instancePoolsGetPermissionLevels <- function(client, instance_pool_id) { #' #' @param instance_pool_id Required. The instance pool for which to get or manage permissions. #' -#' @rdname instancePoolsGetPermissions +#' @rdname get_instance_pool_permissions +#' @alias instancePoolsGetPermissions #' @export -instancePoolsGetPermissions <- function(client, instance_pool_id) { +get_instance_pool_permissions <- function(client, instance_pool_id) { client$do("GET", paste("/api/2.0/permissions/instance-pools/", instance_pool_id, sep = "")) } + +#' @rdname get_instance_pool_permissions +#' @export +instancePoolsGetPermissions <- get_instance_pool_permissions #' List instance pool info. #' #' Gets a list of instance pools with their statistics. @@ -122,14 +152,19 @@ instancePoolsGetPermissions <- function(client, instance_pool_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname instancePoolsList +#' @rdname list_instance_pools +#' @alias instancePoolsList #' @export -instancePoolsList <- function(client) { +list_instance_pools <- function(client) { json <- client$do("GET", "/api/2.0/instance-pools/list") return(json$instance_pools) } + +#' @rdname list_instance_pools +#' @export +instancePoolsList <- list_instance_pools #' Set instance pool permissions. #' #' Sets permissions on an instance pool. Instance pools can inherit permissions @@ -139,13 +174,18 @@ instancePoolsList <- function(client) { #' @param access_control_list This field has no description yet. #' @param instance_pool_id Required. The instance pool for which to get or manage permissions. #' -#' @rdname instancePoolsSetPermissions +#' @rdname set_instance_pool_permissions +#' @alias instancePoolsSetPermissions #' @export -instancePoolsSetPermissions <- function(client, instance_pool_id, access_control_list = NULL) { +set_instance_pool_permissions <- function(client, instance_pool_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", paste("/api/2.0/permissions/instance-pools/", instance_pool_id, sep = ""), body = body) } + +#' @rdname set_instance_pool_permissions +#' @export +instancePoolsSetPermissions <- set_instance_pool_permissions #' Update instance pool permissions. #' #' Updates the permissions on an instance pool. Instance pools can inherit @@ -155,14 +195,19 @@ instancePoolsSetPermissions <- function(client, instance_pool_id, access_control #' @param access_control_list This field has no description yet. #' @param instance_pool_id Required. The instance pool for which to get or manage permissions. #' -#' @rdname instancePoolsUpdatePermissions +#' @rdname update_instance_pool_permissions +#' @alias instancePoolsUpdatePermissions #' @export -instancePoolsUpdatePermissions <- function(client, instance_pool_id, access_control_list = NULL) { +update_instance_pool_permissions <- function(client, instance_pool_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", paste("/api/2.0/permissions/instance-pools/", instance_pool_id, sep = ""), body = body) } +#' @rdname update_instance_pool_permissions +#' @export +instancePoolsUpdatePermissions <- update_instance_pool_permissions + diff --git a/R/instance_profiles.R b/R/instance_profiles.R index b4b2c744..3874d210 100755 --- a/R/instance_profiles.R +++ b/R/instance_profiles.R @@ -14,14 +14,19 @@ NULL #' @param is_meta_instance_profile Boolean flag indicating whether the instance profile should only be used in credential passthrough scenarios. #' @param skip_validation By default, Databricks validates that it has sufficient permissions to launch instances with the instance profile. #' -#' @rdname instanceProfilesAdd +#' @rdname add_instance_profile +#' @alias instanceProfilesAdd #' @export -instanceProfilesAdd <- function(client, instance_profile_arn, iam_role_arn = NULL, +add_instance_profile <- function(client, instance_profile_arn, iam_role_arn = NULL, is_meta_instance_profile = NULL, skip_validation = NULL) { body <- list(iam_role_arn = iam_role_arn, instance_profile_arn = instance_profile_arn, is_meta_instance_profile = is_meta_instance_profile, skip_validation = skip_validation) client$do("POST", "/api/2.0/instance-profiles/add", body = body) } + +#' @rdname add_instance_profile +#' @export +instanceProfilesAdd <- add_instance_profile #' Edit an instance profile. #' #' The only supported field to change is the optional IAM role ARN associated @@ -45,14 +50,19 @@ instanceProfilesAdd <- function(client, instance_profile_arn, iam_role_arn = NUL #' @param instance_profile_arn Required. The AWS ARN of the instance profile to register with Databricks. #' @param is_meta_instance_profile Boolean flag indicating whether the instance profile should only be used in credential passthrough scenarios. #' -#' @rdname instanceProfilesEdit +#' @rdname edit_instance_profile +#' @alias instanceProfilesEdit #' @export -instanceProfilesEdit <- function(client, instance_profile_arn, iam_role_arn = NULL, +edit_instance_profile <- function(client, instance_profile_arn, iam_role_arn = NULL, is_meta_instance_profile = NULL) { body <- list(iam_role_arn = iam_role_arn, instance_profile_arn = instance_profile_arn, is_meta_instance_profile = is_meta_instance_profile) client$do("POST", "/api/2.0/instance-profiles/edit", body = body) } + +#' @rdname edit_instance_profile +#' @export +instanceProfilesEdit <- edit_instance_profile #' List available instance profiles. #' #' List the instance profiles that the calling user can use to launch a cluster. @@ -62,14 +72,19 @@ instanceProfilesEdit <- function(client, instance_profile_arn, iam_role_arn = NU #' #' @return `data.frame` with all of the response pages. #' -#' @rdname instanceProfilesList +#' @rdname list_instance_profiles +#' @alias instanceProfilesList #' @export -instanceProfilesList <- function(client) { +list_instance_profiles <- function(client) { json <- client$do("GET", "/api/2.0/instance-profiles/list") return(json$instance_profiles) } + +#' @rdname list_instance_profiles +#' @export +instanceProfilesList <- list_instance_profiles #' Remove the instance profile. #' #' Remove the instance profile with the provided ARN. Existing clusters with @@ -80,13 +95,18 @@ instanceProfilesList <- function(client) { #' #' @param instance_profile_arn Required. The ARN of the instance profile to remove. #' -#' @rdname instanceProfilesRemove +#' @rdname remove_instance_profile +#' @alias instanceProfilesRemove #' @export -instanceProfilesRemove <- function(client, instance_profile_arn) { +remove_instance_profile <- function(client, instance_profile_arn) { body <- list(instance_profile_arn = instance_profile_arn) client$do("POST", "/api/2.0/instance-profiles/remove", body = body) } +#' @rdname remove_instance_profile +#' @export +instanceProfilesRemove <- remove_instance_profile + diff --git a/R/ip_access_lists.R b/R/ip_access_lists.R index 1b40a4f2..a6604a3b 100755 --- a/R/ip_access_lists.R +++ b/R/ip_access_lists.R @@ -27,12 +27,17 @@ NULL #' @param label Required. Label for the IP access list. #' @param list_type Required. Type of IP access list. #' -#' @rdname ipAccessListsCreate +#' @rdname create_ip_access_list +#' @alias ipAccessListsCreate #' @export -ipAccessListsCreate <- function(client, label, list_type, ip_addresses = NULL) { +create_ip_access_list <- function(client, label, list_type, ip_addresses = NULL) { body <- list(ip_addresses = ip_addresses, label = label, list_type = list_type) client$do("POST", "/api/2.0/ip-access-lists", body = body) } + +#' @rdname create_ip_access_list +#' @export +ipAccessListsCreate <- create_ip_access_list #' Delete access list. #' #' Deletes an IP access list, specified by its list ID. @@ -40,12 +45,17 @@ ipAccessListsCreate <- function(client, label, list_type, ip_addresses = NULL) { #' #' @param ip_access_list_id Required. The ID for the corresponding IP access list. #' -#' @rdname ipAccessListsDelete +#' @rdname delete_ip_access_list +#' @alias ipAccessListsDelete #' @export -ipAccessListsDelete <- function(client, ip_access_list_id) { +delete_ip_access_list <- function(client, ip_access_list_id) { client$do("DELETE", paste("/api/2.0/ip-access-lists/", ip_access_list_id, sep = "")) } + +#' @rdname delete_ip_access_list +#' @export +ipAccessListsDelete <- delete_ip_access_list #' Get access list. #' #' Gets an IP access list, specified by its list ID. @@ -53,12 +63,17 @@ ipAccessListsDelete <- function(client, ip_access_list_id) { #' #' @param ip_access_list_id Required. The ID for the corresponding IP access list. #' -#' @rdname ipAccessListsGet +#' @rdname get_ip_access_list +#' @alias ipAccessListsGet #' @export -ipAccessListsGet <- function(client, ip_access_list_id) { +get_ip_access_list <- function(client, ip_access_list_id) { client$do("GET", paste("/api/2.0/ip-access-lists/", ip_access_list_id, sep = "")) } + +#' @rdname get_ip_access_list +#' @export +ipAccessListsGet <- get_ip_access_list #' Get access lists. #' #' Gets all IP access lists for the specified workspace. @@ -66,14 +81,19 @@ ipAccessListsGet <- function(client, ip_access_list_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname ipAccessListsList +#' @rdname list_ip_access_lists +#' @alias ipAccessListsList #' @export -ipAccessListsList <- function(client) { +list_ip_access_lists <- function(client) { json <- client$do("GET", "/api/2.0/ip-access-lists") return(json$ip_access_lists) } + +#' @rdname list_ip_access_lists +#' @export +ipAccessListsList <- list_ip_access_lists #' Replace access list. #' #' Replaces an IP access list, specified by its ID. @@ -96,14 +116,19 @@ ipAccessListsList <- function(client) { #' @param label Required. Label for the IP access list. #' @param list_type Required. Type of IP access list. #' -#' @rdname ipAccessListsReplace +#' @rdname replace_ip_access_list +#' @alias ipAccessListsReplace #' @export -ipAccessListsReplace <- function(client, ip_access_list_id, label, list_type, enabled, +replace_ip_access_list <- function(client, ip_access_list_id, label, list_type, enabled, ip_addresses = NULL) { body <- list(enabled = enabled, ip_addresses = ip_addresses, label = label, list_type = list_type) client$do("PUT", paste("/api/2.0/ip-access-lists/", ip_access_list_id, sep = ""), body = body) } + +#' @rdname replace_ip_access_list +#' @export +ipAccessListsReplace <- replace_ip_access_list #' Update access list. #' #' Updates an existing IP access list, specified by its ID. @@ -131,15 +156,20 @@ ipAccessListsReplace <- function(client, ip_access_list_id, label, list_type, en #' @param label Label for the IP access list. #' @param list_type Type of IP access list. #' -#' @rdname ipAccessListsUpdate +#' @rdname update_ip_access_list +#' @alias ipAccessListsUpdate #' @export -ipAccessListsUpdate <- function(client, ip_access_list_id, enabled = NULL, ip_addresses = NULL, +update_ip_access_list <- function(client, ip_access_list_id, enabled = NULL, ip_addresses = NULL, label = NULL, list_type = NULL) { body <- list(enabled = enabled, ip_addresses = ip_addresses, label = label, list_type = list_type) client$do("PATCH", paste("/api/2.0/ip-access-lists/", ip_access_list_id, sep = ""), body = body) } +#' @rdname update_ip_access_list +#' @export +ipAccessListsUpdate <- update_ip_access_list + diff --git a/R/jobs.R b/R/jobs.R index 1bac8a6d..a50a3f8f 100755 --- a/R/jobs.R +++ b/R/jobs.R @@ -12,12 +12,17 @@ NULL #' @param all_queued_runs Optional boolean parameter to cancel all queued runs. #' @param job_id The canonical identifier of the job to cancel all runs of. #' -#' @rdname jobsCancelAllRuns +#' @rdname cancel_job_all_runs +#' @alias jobsCancelAllRuns #' @export -jobsCancelAllRuns <- function(client, all_queued_runs = NULL, job_id = NULL) { +cancel_job_all_runs <- function(client, all_queued_runs = NULL, job_id = NULL) { body <- list(all_queued_runs = all_queued_runs, job_id = job_id) client$do("POST", "/api/2.1/jobs/runs/cancel-all", body = body) } + +#' @rdname cancel_job_all_runs +#' @export +jobsCancelAllRuns <- cancel_job_all_runs #' Cancel a run. #' #' Cancels a job run or a task run. The run is canceled asynchronously, so it @@ -26,12 +31,17 @@ jobsCancelAllRuns <- function(client, all_queued_runs = NULL, job_id = NULL) { #' #' @param run_id Required. This field is required. #' -#' @rdname jobsCancelRun +#' @rdname cancel_job_run +#' @alias jobsCancelRun #' @export -jobsCancelRun <- function(client, run_id) { +cancel_job_run <- function(client, run_id) { body <- list(run_id = run_id) client$do("POST", "/api/2.1/jobs/runs/cancel", body = body) } + +#' @rdname cancel_job_run +#' @export +jobsCancelRun <- cancel_job_run #' Create a new job. #' #' Create a new job. @@ -61,9 +71,10 @@ jobsCancelRun <- function(client, run_id) { #' @param trigger A configuration to trigger a run when certain conditions are met. #' @param webhook_notifications A collection of system notification IDs to notify when runs of this job begin or complete. #' -#' @rdname jobsCreate +#' @rdname create_job +#' @alias jobsCreate #' @export -jobsCreate <- function(client, access_control_list = NULL, compute = NULL, continuous = NULL, +create_job <- function(client, access_control_list = NULL, compute = NULL, continuous = NULL, deployment = NULL, description = NULL, edit_mode = NULL, email_notifications = NULL, format = NULL, git_source = NULL, health = NULL, job_clusters = NULL, max_concurrent_runs = NULL, name = NULL, notification_settings = NULL, parameters = NULL, queue = NULL, run_as = NULL, @@ -78,6 +89,10 @@ jobsCreate <- function(client, access_control_list = NULL, compute = NULL, conti timeout_seconds = timeout_seconds, trigger = trigger, webhook_notifications = webhook_notifications) client$do("POST", "/api/2.1/jobs/create", body = body) } + +#' @rdname create_job +#' @export +jobsCreate <- create_job #' Delete a job. #' #' Deletes a job. @@ -85,12 +100,17 @@ jobsCreate <- function(client, access_control_list = NULL, compute = NULL, conti #' #' @param job_id Required. The canonical identifier of the job to delete. #' -#' @rdname jobsDelete +#' @rdname delete_job +#' @alias jobsDelete #' @export -jobsDelete <- function(client, job_id) { +delete_job <- function(client, job_id) { body <- list(job_id = job_id) client$do("POST", "/api/2.1/jobs/delete", body = body) } + +#' @rdname delete_job +#' @export +jobsDelete <- delete_job #' Delete a job run. #' #' Deletes a non-active run. Returns an error if the run is active. @@ -98,12 +118,17 @@ jobsDelete <- function(client, job_id) { #' #' @param run_id Required. The canonical identifier of the run for which to retrieve the metadata. #' -#' @rdname jobsDeleteRun +#' @rdname delete_job_run +#' @alias jobsDeleteRun #' @export -jobsDeleteRun <- function(client, run_id) { +delete_job_run <- function(client, run_id) { body <- list(run_id = run_id) client$do("POST", "/api/2.1/jobs/runs/delete", body = body) } + +#' @rdname delete_job_run +#' @export +jobsDeleteRun <- delete_job_run #' Export and retrieve a job run. #' #' Export and retrieve the job run task. @@ -112,12 +137,17 @@ jobsDeleteRun <- function(client, run_id) { #' @param run_id Required. The canonical identifier for the run. #' @param views_to_export Which views to export (CODE, DASHBOARDS, or ALL). #' -#' @rdname jobsExportRun +#' @rdname export_job_run +#' @alias jobsExportRun #' @export -jobsExportRun <- function(client, run_id, views_to_export = NULL) { +export_job_run <- function(client, run_id, views_to_export = NULL) { query <- list(run_id = run_id, views_to_export = views_to_export) client$do("GET", "/api/2.1/jobs/runs/export", query = query) } + +#' @rdname export_job_run +#' @export +jobsExportRun <- export_job_run #' Get a single job. #' #' Retrieves the details for a single job. @@ -125,12 +155,17 @@ jobsExportRun <- function(client, run_id, views_to_export = NULL) { #' #' @param job_id Required. The canonical identifier of the job to retrieve information about. #' -#' @rdname jobsGet +#' @rdname get_job +#' @alias jobsGet #' @export -jobsGet <- function(client, job_id) { +get_job <- function(client, job_id) { query <- list(job_id = job_id) client$do("GET", "/api/2.1/jobs/get", query = query) } + +#' @rdname get_job +#' @export +jobsGet <- get_job #' Get job permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -138,13 +173,18 @@ jobsGet <- function(client, job_id) { #' #' @param job_id Required. The job for which to get or manage permissions. #' -#' @rdname jobsGetPermissionLevels +#' @rdname get_job_permission_levels +#' @alias jobsGetPermissionLevels #' @export -jobsGetPermissionLevels <- function(client, job_id) { +get_job_permission_levels <- function(client, job_id) { client$do("GET", paste("/api/2.0/permissions/jobs/", job_id, "/permissionLevels", , sep = "")) } + +#' @rdname get_job_permission_levels +#' @export +jobsGetPermissionLevels <- get_job_permission_levels #' Get job permissions. #' #' Gets the permissions of a job. Jobs can inherit permissions from their root @@ -153,12 +193,17 @@ jobsGetPermissionLevels <- function(client, job_id) { #' #' @param job_id Required. The job for which to get or manage permissions. #' -#' @rdname jobsGetPermissions +#' @rdname get_job_permissions +#' @alias jobsGetPermissions #' @export -jobsGetPermissions <- function(client, job_id) { +get_job_permissions <- function(client, job_id) { client$do("GET", paste("/api/2.0/permissions/jobs/", job_id, sep = "")) } + +#' @rdname get_job_permissions +#' @export +jobsGetPermissions <- get_job_permissions #' Get a single job run. #' #' Retrieve the metadata of a run. @@ -168,13 +213,18 @@ jobsGetPermissions <- function(client, job_id) { #' @param include_resolved_values Whether to include resolved parameter values in the response. #' @param run_id Required. The canonical identifier of the run for which to retrieve the metadata. #' -#' @rdname jobsGetRun +#' @rdname get_job_run +#' @alias jobsGetRun #' @export -jobsGetRun <- function(client, run_id, include_history = NULL, include_resolved_values = NULL) { +get_job_run <- function(client, run_id, include_history = NULL, include_resolved_values = NULL) { query <- list(include_history = include_history, include_resolved_values = include_resolved_values, run_id = run_id) client$do("GET", "/api/2.1/jobs/runs/get", query = query) } + +#' @rdname get_job_run +#' @export +jobsGetRun <- get_job_run #' Get the output for a single run. #' #' Retrieve the output and metadata of a single task run. When a notebook task @@ -191,12 +241,17 @@ jobsGetRun <- function(client, run_id, include_history = NULL, include_resolved_ #' #' @param run_id Required. The canonical identifier for the run. #' -#' @rdname jobsGetRunOutput +#' @rdname get_job_run_output +#' @alias jobsGetRunOutput #' @export -jobsGetRunOutput <- function(client, run_id) { +get_job_run_output <- function(client, run_id) { query <- list(run_id = run_id) client$do("GET", "/api/2.1/jobs/runs/get-output", query = query) } + +#' @rdname get_job_run_output +#' @export +jobsGetRunOutput <- get_job_run_output #' List jobs. #' #' Retrieves a list of jobs. @@ -210,9 +265,10 @@ jobsGetRunOutput <- function(client, run_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname jobsList +#' @rdname list_jobs +#' @alias jobsList #' @export -jobsList <- function(client, expand_tasks = NULL, limit = NULL, name = NULL, offset = NULL, +list_jobs <- function(client, expand_tasks = NULL, limit = NULL, name = NULL, offset = NULL, page_token = NULL) { query <- list(expand_tasks = expand_tasks, limit = limit, name = name, offset = offset, page_token = page_token) @@ -233,6 +289,10 @@ jobsList <- function(client, expand_tasks = NULL, limit = NULL, name = NULL, off return(results) } + +#' @rdname list_jobs +#' @export +jobsList <- list_jobs #' List job runs. #' #' List runs in descending order by start time. @@ -251,9 +311,10 @@ jobsList <- function(client, expand_tasks = NULL, limit = NULL, name = NULL, off #' #' @return `data.frame` with all of the response pages. #' -#' @rdname jobsListRuns +#' @rdname list_job_runs +#' @alias jobsListRuns #' @export -jobsListRuns <- function(client, active_only = NULL, completed_only = NULL, expand_tasks = NULL, +list_job_runs <- function(client, active_only = NULL, completed_only = NULL, expand_tasks = NULL, job_id = NULL, limit = NULL, offset = NULL, page_token = NULL, run_type = NULL, start_time_from = NULL, start_time_to = NULL) { query <- list(active_only = active_only, completed_only = completed_only, expand_tasks = expand_tasks, @@ -276,6 +337,10 @@ jobsListRuns <- function(client, active_only = NULL, completed_only = NULL, expa return(results) } + +#' @rdname list_job_runs +#' @export +jobsListRuns <- list_job_runs #' Repair a job run. #' #' Re-run one or more tasks. Tasks are re-run as part of the original job run. @@ -298,9 +363,10 @@ jobsListRuns <- function(client, active_only = NULL, completed_only = NULL, expa #' @param spark_submit_params A list of parameters for jobs with spark submit task, for example `'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']`. #' @param sql_params A map from keys to values for jobs with SQL task, for example `'sql_params': {'name': 'john doe', 'age': '35'}`. #' -#' @rdname jobsRepairRun +#' @rdname repair_job_run +#' @alias jobsRepairRun #' @export -jobsRepairRun <- function(client, run_id, dbt_commands = NULL, jar_params = NULL, +repair_job_run <- function(client, run_id, dbt_commands = NULL, jar_params = NULL, job_parameters = NULL, latest_repair_id = NULL, notebook_params = NULL, pipeline_params = NULL, python_named_params = NULL, python_params = NULL, rerun_all_failed_tasks = NULL, rerun_dependent_tasks = NULL, rerun_tasks = NULL, spark_submit_params = NULL, @@ -313,6 +379,10 @@ jobsRepairRun <- function(client, run_id, dbt_commands = NULL, jar_params = NULL sql_params = sql_params) client$do("POST", "/api/2.1/jobs/runs/repair", body = body) } + +#' @rdname repair_job_run +#' @export +jobsRepairRun <- repair_job_run #' Update all job settings (reset). #' #' Overwrite all settings for the given job. Use the [_Update_ @@ -322,12 +392,17 @@ jobsRepairRun <- function(client, run_id, dbt_commands = NULL, jar_params = NULL #' @param job_id Required. The canonical identifier of the job to reset. #' @param new_settings Required. The new settings of the job. #' -#' @rdname jobsReset +#' @rdname reset_job +#' @alias jobsReset #' @export -jobsReset <- function(client, job_id, new_settings) { +reset_job <- function(client, job_id, new_settings) { body <- list(job_id = job_id, new_settings = new_settings) client$do("POST", "/api/2.1/jobs/reset", body = body) } + +#' @rdname reset_job +#' @export +jobsReset <- reset_job #' Trigger a new job run. #' #' Run a job and return the `run_id` of the triggered run. @@ -346,9 +421,10 @@ jobsReset <- function(client, job_id, new_settings) { #' @param spark_submit_params A list of parameters for jobs with spark submit task, for example `'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']`. #' @param sql_params A map from keys to values for jobs with SQL task, for example `'sql_params': {'name': 'john doe', 'age': '35'}`. #' -#' @rdname jobsRunNow +#' @rdname run_job_now +#' @alias jobsRunNow #' @export -jobsRunNow <- function(client, job_id, dbt_commands = NULL, idempotency_token = NULL, +run_job_now <- function(client, job_id, dbt_commands = NULL, idempotency_token = NULL, jar_params = NULL, job_parameters = NULL, notebook_params = NULL, pipeline_params = NULL, python_named_params = NULL, python_params = NULL, queue = NULL, spark_submit_params = NULL, sql_params = NULL) { @@ -359,6 +435,10 @@ jobsRunNow <- function(client, job_id, dbt_commands = NULL, idempotency_token = sql_params = sql_params) client$do("POST", "/api/2.1/jobs/run-now", body = body) } + +#' @rdname run_job_now +#' @export +jobsRunNow <- run_job_now #' Set job permissions. #' #' Sets permissions on a job. Jobs can inherit permissions from their root @@ -368,12 +448,17 @@ jobsRunNow <- function(client, job_id, dbt_commands = NULL, idempotency_token = #' @param access_control_list This field has no description yet. #' @param job_id Required. The job for which to get or manage permissions. #' -#' @rdname jobsSetPermissions +#' @rdname set_job_permissions +#' @alias jobsSetPermissions #' @export -jobsSetPermissions <- function(client, job_id, access_control_list = NULL) { +set_job_permissions <- function(client, job_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", paste("/api/2.0/permissions/jobs/", job_id, sep = ""), body = body) } + +#' @rdname set_job_permissions +#' @export +jobsSetPermissions <- set_job_permissions #' Create and trigger a one-time run. #' #' Submit a one-time run. This endpoint allows you to submit a workload directly @@ -394,9 +479,10 @@ jobsSetPermissions <- function(client, job_id, access_control_list = NULL) { #' @param timeout_seconds An optional timeout applied to each run of this job. #' @param webhook_notifications A collection of system notification IDs to notify when the run begins or completes. #' -#' @rdname jobsSubmit +#' @rdname submit_job +#' @alias jobsSubmit #' @export -jobsSubmit <- function(client, access_control_list = NULL, email_notifications = NULL, +submit_job <- function(client, access_control_list = NULL, email_notifications = NULL, git_source = NULL, health = NULL, idempotency_token = NULL, notification_settings = NULL, queue = NULL, run_name = NULL, tasks = NULL, timeout_seconds = NULL, webhook_notifications = NULL) { body <- list(access_control_list = access_control_list, email_notifications = email_notifications, @@ -405,6 +491,10 @@ jobsSubmit <- function(client, access_control_list = NULL, email_notifications = tasks = tasks, timeout_seconds = timeout_seconds, webhook_notifications = webhook_notifications) client$do("POST", "/api/2.1/jobs/runs/submit", body = body) } + +#' @rdname submit_job +#' @export +jobsSubmit <- submit_job #' Update job settings partially. #' #' Add, update, or remove specific settings of an existing job. Use the [_Reset_ @@ -415,12 +505,17 @@ jobsSubmit <- function(client, access_control_list = NULL, email_notifications = #' @param job_id Required. The canonical identifier of the job to update. #' @param new_settings The new settings for the job. #' -#' @rdname jobsUpdate +#' @rdname update_job +#' @alias jobsUpdate #' @export -jobsUpdate <- function(client, job_id, fields_to_remove = NULL, new_settings = NULL) { +update_job <- function(client, job_id, fields_to_remove = NULL, new_settings = NULL) { body <- list(fields_to_remove = fields_to_remove, job_id = job_id, new_settings = new_settings) client$do("POST", "/api/2.1/jobs/update", body = body) } + +#' @rdname update_job +#' @export +jobsUpdate <- update_job #' Update job permissions. #' #' Updates the permissions on a job. Jobs can inherit permissions from their @@ -430,13 +525,18 @@ jobsUpdate <- function(client, job_id, fields_to_remove = NULL, new_settings = N #' @param access_control_list This field has no description yet. #' @param job_id Required. The job for which to get or manage permissions. #' -#' @rdname jobsUpdatePermissions +#' @rdname update_job_permissions +#' @alias jobsUpdatePermissions #' @export -jobsUpdatePermissions <- function(client, job_id, access_control_list = NULL) { +update_job_permissions <- function(client, job_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", paste("/api/2.0/permissions/jobs/", job_id, sep = ""), body = body) } +#' @rdname update_job_permissions +#' @export +jobsUpdatePermissions <- update_job_permissions + #' Cancel a run. #' #' Cancels a job run or a task run. The run is canceled asynchronously, so it @@ -454,9 +554,9 @@ jobsUpdatePermissions <- function(client, job_id, access_control_list = NULL) { #' #' @param run_id Required. This field is required. #' -#' @rdname jobsCancelRunAndWait +#' @rdname cancel_job_run_and_wait #' @export -jobsCancelRunAndWait <- function(client, run_id, timeout = 20, callback = cli_reporter) { +cancel_job_run_and_wait <- function(client, run_id, timeout = 20, callback = cli_reporter) { body <- list(run_id = run_id) op_response <- client$do("POST", "/api/2.1/jobs/runs/cancel", body = body) started <- as.numeric(Sys.time()) @@ -524,9 +624,9 @@ jobsCancelRunAndWait <- function(client, run_id, timeout = 20, callback = cli_re #' @param include_resolved_values Whether to include resolved parameter values in the response. #' @param run_id Required. The canonical identifier of the run for which to retrieve the metadata. #' -#' @rdname jobsGetRunAndWait +#' @rdname get_job_run_and_wait #' @export -jobsGetRunAndWait <- function(client, run_id, include_history = NULL, include_resolved_values = NULL, +get_job_run_and_wait <- function(client, run_id, include_history = NULL, include_resolved_values = NULL, timeout = 20, callback = cli_reporter) { query <- list(include_history = include_history, include_resolved_values = include_resolved_values, run_id = run_id) @@ -605,9 +705,9 @@ jobsGetRunAndWait <- function(client, run_id, include_history = NULL, include_re #' @param spark_submit_params A list of parameters for jobs with spark submit task, for example `'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']`. #' @param sql_params A map from keys to values for jobs with SQL task, for example `'sql_params': {'name': 'john doe', 'age': '35'}`. #' -#' @rdname jobsRepairRunAndWait +#' @rdname repair_job_run_and_wait #' @export -jobsRepairRunAndWait <- function(client, run_id, dbt_commands = NULL, jar_params = NULL, +repair_job_run_and_wait <- function(client, run_id, dbt_commands = NULL, jar_params = NULL, job_parameters = NULL, latest_repair_id = NULL, notebook_params = NULL, pipeline_params = NULL, python_named_params = NULL, python_params = NULL, rerun_all_failed_tasks = NULL, rerun_dependent_tasks = NULL, rerun_tasks = NULL, spark_submit_params = NULL, @@ -687,9 +787,9 @@ jobsRepairRunAndWait <- function(client, run_id, dbt_commands = NULL, jar_params #' @param spark_submit_params A list of parameters for jobs with spark submit task, for example `'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']`. #' @param sql_params A map from keys to values for jobs with SQL task, for example `'sql_params': {'name': 'john doe', 'age': '35'}`. #' -#' @rdname jobsRunNowAndWait +#' @rdname run_job_now_and_wait #' @export -jobsRunNowAndWait <- function(client, job_id, dbt_commands = NULL, idempotency_token = NULL, +run_job_now_and_wait <- function(client, job_id, dbt_commands = NULL, idempotency_token = NULL, jar_params = NULL, job_parameters = NULL, notebook_params = NULL, pipeline_params = NULL, python_named_params = NULL, python_params = NULL, queue = NULL, spark_submit_params = NULL, sql_params = NULL, timeout = 20, callback = cli_reporter) { @@ -769,9 +869,9 @@ jobsRunNowAndWait <- function(client, job_id, dbt_commands = NULL, idempotency_t #' @param timeout_seconds An optional timeout applied to each run of this job. #' @param webhook_notifications A collection of system notification IDs to notify when the run begins or completes. #' -#' @rdname jobsSubmitAndWait +#' @rdname submit_job_and_wait #' @export -jobsSubmitAndWait <- function(client, access_control_list = NULL, email_notifications = NULL, +submit_job_and_wait <- function(client, access_control_list = NULL, email_notifications = NULL, git_source = NULL, health = NULL, idempotency_token = NULL, notification_settings = NULL, queue = NULL, run_name = NULL, tasks = NULL, timeout_seconds = NULL, webhook_notifications = NULL, timeout = 20, callback = cli_reporter) { diff --git a/R/lakehouse_monitors.R b/R/lakehouse_monitors.R index 6ddeb4c8..d099d5eb 100755 --- a/R/lakehouse_monitors.R +++ b/R/lakehouse_monitors.R @@ -20,13 +20,18 @@ NULL #' @param full_name Required. Full name of the table. #' @param refresh_id Required. ID of the refresh. #' -#' @rdname lakehouseMonitorsCancelRefresh +#' @rdname cancel_lakehouse_monitor_refresh +#' @alias lakehouseMonitorsCancelRefresh #' @export -lakehouseMonitorsCancelRefresh <- function(client, full_name, refresh_id) { +cancel_lakehouse_monitor_refresh <- function(client, full_name, refresh_id) { client$do("POST", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor/refreshes/", refresh_id, "/cancel", , sep = "")) } + +#' @rdname cancel_lakehouse_monitor_refresh +#' @export +lakehouseMonitorsCancelRefresh <- cancel_lakehouse_monitor_refresh #' Create a table monitor. #' #' Creates a new monitor for the specified table. @@ -58,9 +63,10 @@ lakehouseMonitorsCancelRefresh <- function(client, full_name, refresh_id) { #' @param time_series Configuration for monitoring time series tables. #' @param warehouse_id Optional argument to specify the warehouse for dashboard creation. #' -#' @rdname lakehouseMonitorsCreate +#' @rdname create_lakehouse_monitor +#' @alias lakehouseMonitorsCreate #' @export -lakehouseMonitorsCreate <- function(client, full_name, assets_dir, output_schema_name, +create_lakehouse_monitor <- function(client, full_name, assets_dir, output_schema_name, baseline_table_name = NULL, custom_metrics = NULL, data_classification_config = NULL, inference_log = NULL, notifications = NULL, schedule = NULL, skip_builtin_dashboard = NULL, slicing_exprs = NULL, snapshot = NULL, time_series = NULL, warehouse_id = NULL) { @@ -72,6 +78,10 @@ lakehouseMonitorsCreate <- function(client, full_name, assets_dir, output_schema client$do("POST", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor", , sep = ""), body = body) } + +#' @rdname create_lakehouse_monitor +#' @export +lakehouseMonitorsCreate <- create_lakehouse_monitor #' Delete a table monitor. #' #' Deletes a monitor for the specified table. @@ -91,13 +101,18 @@ lakehouseMonitorsCreate <- function(client, full_name, assets_dir, output_schema #' #' @param full_name Required. Full name of the table. #' -#' @rdname lakehouseMonitorsDelete +#' @rdname delete_lakehouse_monitor +#' @alias lakehouseMonitorsDelete #' @export -lakehouseMonitorsDelete <- function(client, full_name) { +delete_lakehouse_monitor <- function(client, full_name) { client$do("DELETE", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor", , sep = "")) } + +#' @rdname delete_lakehouse_monitor +#' @export +lakehouseMonitorsDelete <- delete_lakehouse_monitor #' Get a table monitor. #' #' Gets a monitor for the specified table. @@ -116,13 +131,18 @@ lakehouseMonitorsDelete <- function(client, full_name) { #' #' @param full_name Required. Full name of the table. #' -#' @rdname lakehouseMonitorsGet +#' @rdname get_lakehouse_monitor +#' @alias lakehouseMonitorsGet #' @export -lakehouseMonitorsGet <- function(client, full_name) { +get_lakehouse_monitor <- function(client, full_name) { client$do("GET", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor", , sep = "")) } + +#' @rdname get_lakehouse_monitor +#' @export +lakehouseMonitorsGet <- get_lakehouse_monitor #' Get refresh. #' #' Gets info about a specific monitor refresh using the given refresh ID. @@ -140,13 +160,18 @@ lakehouseMonitorsGet <- function(client, full_name) { #' @param full_name Required. Full name of the table. #' @param refresh_id Required. ID of the refresh. #' -#' @rdname lakehouseMonitorsGetRefresh +#' @rdname get_lakehouse_monitor_refresh +#' @alias lakehouseMonitorsGetRefresh #' @export -lakehouseMonitorsGetRefresh <- function(client, full_name, refresh_id) { +get_lakehouse_monitor_refresh <- function(client, full_name, refresh_id) { client$do("GET", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor/refreshes/", refresh_id, sep = "")) } + +#' @rdname get_lakehouse_monitor_refresh +#' @export +lakehouseMonitorsGetRefresh <- get_lakehouse_monitor_refresh #' List refreshes. #' #' Gets an array containing the history of the most recent refreshes (up to 25) @@ -164,13 +189,18 @@ lakehouseMonitorsGetRefresh <- function(client, full_name, refresh_id) { #' #' @param full_name Required. Full name of the table. #' -#' @rdname lakehouseMonitorsListRefreshes +#' @rdname list_lakehouse_monitor_refreshes +#' @alias lakehouseMonitorsListRefreshes #' @export -lakehouseMonitorsListRefreshes <- function(client, full_name) { +list_lakehouse_monitor_refreshes <- function(client, full_name) { client$do("GET", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor/refreshes", , sep = "")) } + +#' @rdname list_lakehouse_monitor_refreshes +#' @export +lakehouseMonitorsListRefreshes <- list_lakehouse_monitor_refreshes #' Queue a metric refresh for a monitor. #' #' Queues a metric refresh on the monitor for the specified table. The refresh @@ -188,13 +218,18 @@ lakehouseMonitorsListRefreshes <- function(client, full_name) { #' #' @param full_name Required. Full name of the table. #' -#' @rdname lakehouseMonitorsRunRefresh +#' @rdname run_lakehouse_monitor_refresh +#' @alias lakehouseMonitorsRunRefresh #' @export -lakehouseMonitorsRunRefresh <- function(client, full_name) { +run_lakehouse_monitor_refresh <- function(client, full_name) { client$do("POST", paste("/api/2.1/unity-catalog/tables/", full_name, "/monitor/refreshes", , sep = "")) } + +#' @rdname run_lakehouse_monitor_refresh +#' @export +lakehouseMonitorsRunRefresh <- run_lakehouse_monitor_refresh #' Update a table monitor. #' #' Updates a monitor for the specified table. @@ -224,9 +259,10 @@ lakehouseMonitorsRunRefresh <- function(client, full_name) { #' @param snapshot Configuration for monitoring snapshot tables. #' @param time_series Configuration for monitoring time series tables. #' -#' @rdname lakehouseMonitorsUpdate +#' @rdname update_lakehouse_monitor +#' @alias lakehouseMonitorsUpdate #' @export -lakehouseMonitorsUpdate <- function(client, full_name, output_schema_name, baseline_table_name = NULL, +update_lakehouse_monitor <- function(client, full_name, output_schema_name, baseline_table_name = NULL, custom_metrics = NULL, data_classification_config = NULL, inference_log = NULL, notifications = NULL, schedule = NULL, slicing_exprs = NULL, snapshot = NULL, time_series = NULL) { @@ -238,6 +274,10 @@ lakehouseMonitorsUpdate <- function(client, full_name, output_schema_name, basel , sep = ""), body = body) } +#' @rdname update_lakehouse_monitor +#' @export +lakehouseMonitorsUpdate <- update_lakehouse_monitor + diff --git a/R/lakeview.R b/R/lakeview.R index abc7d544..af6a929f 100755 --- a/R/lakeview.R +++ b/R/lakeview.R @@ -13,14 +13,19 @@ NULL #' @param serialized_dashboard The contents of the dashboard in serialized string form. #' @param warehouse_id The warehouse ID used to run the dashboard. #' -#' @rdname lakeviewCreate +#' @rdname create_lakeview +#' @alias lakeviewCreate #' @export -lakeviewCreate <- function(client, display_name, parent_path = NULL, serialized_dashboard = NULL, +create_lakeview <- function(client, display_name, parent_path = NULL, serialized_dashboard = NULL, warehouse_id = NULL) { body <- list(display_name = display_name, parent_path = parent_path, serialized_dashboard = serialized_dashboard, warehouse_id = warehouse_id) client$do("POST", "/api/2.0/lakeview/dashboards", body = body) } + +#' @rdname create_lakeview +#' @export +lakeviewCreate <- create_lakeview #' Get dashboard. #' #' Get a draft dashboard. @@ -28,12 +33,17 @@ lakeviewCreate <- function(client, display_name, parent_path = NULL, serialized_ #' #' @param dashboard_id Required. UUID identifying the dashboard. #' -#' @rdname lakeviewGet +#' @rdname get_lakeview +#' @alias lakeviewGet #' @export -lakeviewGet <- function(client, dashboard_id) { +get_lakeview <- function(client, dashboard_id) { client$do("GET", paste("/api/2.0/lakeview/dashboards/", dashboard_id, sep = "")) } + +#' @rdname get_lakeview +#' @export +lakeviewGet <- get_lakeview #' Get published dashboard. #' #' Get the current published dashboard. @@ -41,13 +51,18 @@ lakeviewGet <- function(client, dashboard_id) { #' #' @param dashboard_id Required. UUID identifying the dashboard to be published. #' -#' @rdname lakeviewGetPublished +#' @rdname get_lakeview_published +#' @alias lakeviewGetPublished #' @export -lakeviewGetPublished <- function(client, dashboard_id) { +get_lakeview_published <- function(client, dashboard_id) { client$do("GET", paste("/api/2.0/lakeview/dashboards/", dashboard_id, "/published", , sep = "")) } + +#' @rdname get_lakeview_published +#' @export +lakeviewGetPublished <- get_lakeview_published #' Publish dashboard. #' #' Publish the current draft dashboard. @@ -57,13 +72,18 @@ lakeviewGetPublished <- function(client, dashboard_id) { #' @param embed_credentials Flag to indicate if the publisher's credentials should be embedded in the published dashboard. #' @param warehouse_id The ID of the warehouse that can be used to override the warehouse which was set in the draft. #' -#' @rdname lakeviewPublish +#' @rdname publish_lakeview +#' @alias lakeviewPublish #' @export -lakeviewPublish <- function(client, dashboard_id, embed_credentials = NULL, warehouse_id = NULL) { +publish_lakeview <- function(client, dashboard_id, embed_credentials = NULL, warehouse_id = NULL) { body <- list(embed_credentials = embed_credentials, warehouse_id = warehouse_id) client$do("POST", paste("/api/2.0/lakeview/dashboards/", dashboard_id, "/published", , sep = ""), body = body) } + +#' @rdname publish_lakeview +#' @export +lakeviewPublish <- publish_lakeview #' Trash dashboard. #' #' Trash a dashboard. @@ -71,12 +91,17 @@ lakeviewPublish <- function(client, dashboard_id, embed_credentials = NULL, ware #' #' @param dashboard_id Required. UUID identifying the dashboard. #' -#' @rdname lakeviewTrash +#' @rdname trash_lakeview +#' @alias lakeviewTrash #' @export -lakeviewTrash <- function(client, dashboard_id) { +trash_lakeview <- function(client, dashboard_id) { client$do("DELETE", paste("/api/2.0/lakeview/dashboards/", dashboard_id, sep = "")) } + +#' @rdname trash_lakeview +#' @export +lakeviewTrash <- trash_lakeview #' Update dashboard. #' #' Update a draft dashboard. @@ -88,9 +113,10 @@ lakeviewTrash <- function(client, dashboard_id) { #' @param serialized_dashboard The contents of the dashboard in serialized string form. #' @param warehouse_id The warehouse ID used to run the dashboard. #' -#' @rdname lakeviewUpdate +#' @rdname update_lakeview +#' @alias lakeviewUpdate #' @export -lakeviewUpdate <- function(client, dashboard_id, display_name = NULL, etag = NULL, +update_lakeview <- function(client, dashboard_id, display_name = NULL, etag = NULL, serialized_dashboard = NULL, warehouse_id = NULL) { body <- list(display_name = display_name, etag = etag, serialized_dashboard = serialized_dashboard, warehouse_id = warehouse_id) @@ -98,6 +124,10 @@ lakeviewUpdate <- function(client, dashboard_id, display_name = NULL, etag = NUL body = body) } +#' @rdname update_lakeview +#' @export +lakeviewUpdate <- update_lakeview + diff --git a/R/libraries.R b/R/libraries.R index 2f2b50f6..92b66228 100755 --- a/R/libraries.R +++ b/R/libraries.R @@ -11,11 +11,16 @@ NULL #' UI. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname librariesAllClusterStatuses +#' @rdname all_cluster_library_statuses +#' @alias librariesAllClusterStatuses #' @export -librariesAllClusterStatuses <- function(client) { +all_cluster_library_statuses <- function(client) { client$do("GET", "/api/2.0/libraries/all-cluster-statuses") } + +#' @rdname all_cluster_library_statuses +#' @export +librariesAllClusterStatuses <- all_cluster_library_statuses #' Get status. #' #' Get the status of libraries on a cluster. A status will be available for all @@ -39,15 +44,20 @@ librariesAllClusterStatuses <- function(client) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname librariesClusterStatus +#' @rdname cluster_library_status +#' @alias librariesClusterStatus #' @export -librariesClusterStatus <- function(client, cluster_id) { +cluster_library_status <- function(client, cluster_id) { query <- list(cluster_id = cluster_id) json <- client$do("GET", "/api/2.0/libraries/cluster-status", query = query) return(json$library_statuses) } + +#' @rdname cluster_library_status +#' @export +librariesClusterStatus <- cluster_library_status #' Add a library. #' #' Add libraries to be installed on a cluster. The installation is asynchronous; @@ -61,12 +71,17 @@ librariesClusterStatus <- function(client, cluster_id) { #' @param cluster_id Required. Unique identifier for the cluster on which to install these libraries. #' @param libraries Required. The libraries to install. #' -#' @rdname librariesInstall +#' @rdname install_cluster_library +#' @alias librariesInstall #' @export -librariesInstall <- function(client, cluster_id, libraries) { +install_cluster_library <- function(client, cluster_id, libraries) { body <- list(cluster_id = cluster_id, libraries = libraries) client$do("POST", "/api/2.0/libraries/install", body = body) } + +#' @rdname install_cluster_library +#' @export +librariesInstall <- install_cluster_library #' Uninstall libraries. #' #' Set libraries to be uninstalled on a cluster. The libraries won't be @@ -77,13 +92,18 @@ librariesInstall <- function(client, cluster_id, libraries) { #' @param cluster_id Required. Unique identifier for the cluster on which to uninstall these libraries. #' @param libraries Required. The libraries to uninstall. #' -#' @rdname librariesUninstall +#' @rdname uninstall_cluster_library +#' @alias librariesUninstall #' @export -librariesUninstall <- function(client, cluster_id, libraries) { +uninstall_cluster_library <- function(client, cluster_id, libraries) { body <- list(cluster_id = cluster_id, libraries = libraries) client$do("POST", "/api/2.0/libraries/uninstall", body = body) } +#' @rdname uninstall_cluster_library +#' @export +librariesUninstall <- uninstall_cluster_library + diff --git a/R/metastores.R b/R/metastores.R index a21f1225..caa3053f 100755 --- a/R/metastores.R +++ b/R/metastores.R @@ -14,13 +14,18 @@ NULL #' @param metastore_id Required. The unique ID of the metastore. #' @param workspace_id Required. A workspace ID. #' -#' @rdname metastoresAssign +#' @rdname assign_metastore +#' @alias metastoresAssign #' @export -metastoresAssign <- function(client, workspace_id, metastore_id, default_catalog_name) { +assign_metastore <- function(client, workspace_id, metastore_id, default_catalog_name) { body <- list(default_catalog_name = default_catalog_name, metastore_id = metastore_id) client$do("PUT", paste("/api/2.1/unity-catalog/workspaces/", workspace_id, "/metastore", , sep = ""), body = body) } + +#' @rdname assign_metastore +#' @export +metastoresAssign <- assign_metastore #' Create a metastore. #' #' Creates a new metastore based on a provided name and optional storage root @@ -34,22 +39,32 @@ metastoresAssign <- function(client, workspace_id, metastore_id, default_catalog #' @param region Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). #' @param storage_root The storage root URL for metastore. #' -#' @rdname metastoresCreate +#' @rdname create_metastore +#' @alias metastoresCreate #' @export -metastoresCreate <- function(client, name, region = NULL, storage_root = NULL) { +create_metastore <- function(client, name, region = NULL, storage_root = NULL) { body <- list(name = name, region = region, storage_root = storage_root) client$do("POST", "/api/2.1/unity-catalog/metastores", body = body) } + +#' @rdname create_metastore +#' @export +metastoresCreate <- create_metastore #' Get metastore assignment for workspace. #' #' Gets the metastore assignment for the workspace being accessed. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname metastoresCurrent +#' @rdname current_metastore +#' @alias metastoresCurrent #' @export -metastoresCurrent <- function(client) { +current_metastore <- function(client) { client$do("GET", "/api/2.1/unity-catalog/current-metastore-assignment") } + +#' @rdname current_metastore +#' @export +metastoresCurrent <- current_metastore #' Delete a metastore. #' #' Deletes a metastore. The caller must be a metastore admin. @@ -58,13 +73,18 @@ metastoresCurrent <- function(client) { #' @param force Force deletion even if the metastore is not empty. #' @param id Required. Unique ID of the metastore. #' -#' @rdname metastoresDelete +#' @rdname delete_metastore +#' @alias metastoresDelete #' @export -metastoresDelete <- function(client, id, force = NULL) { +delete_metastore <- function(client, id, force = NULL) { query <- list(force = force) client$do("DELETE", paste("/api/2.1/unity-catalog/metastores/", id, sep = ""), query = query) } + +#' @rdname delete_metastore +#' @export +metastoresDelete <- delete_metastore #' Get a metastore. #' #' Gets a metastore that matches the supplied ID. The caller must be a metastore @@ -73,12 +93,17 @@ metastoresDelete <- function(client, id, force = NULL) { #' #' @param id Required. Unique ID of the metastore. #' -#' @rdname metastoresGet +#' @rdname get_metastore +#' @alias metastoresGet #' @export -metastoresGet <- function(client, id) { +get_metastore <- function(client, id) { client$do("GET", paste("/api/2.1/unity-catalog/metastores/", id, sep = "")) } + +#' @rdname get_metastore +#' @export +metastoresGet <- get_metastore #' List metastores. #' #' Gets an array of the available metastores (as __MetastoreInfo__ objects). The @@ -88,25 +113,35 @@ metastoresGet <- function(client, id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname metastoresList +#' @rdname list_metastores +#' @alias metastoresList #' @export -metastoresList <- function(client) { +list_metastores <- function(client) { json <- client$do("GET", "/api/2.1/unity-catalog/metastores") return(json$metastores) } + +#' @rdname list_metastores +#' @export +metastoresList <- list_metastores #' Get a metastore summary. #' #' Gets information about a metastore. This summary includes the storage #' credential, the cloud vendor, the cloud region, and the global metastore ID. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname metastoresSummary +#' @rdname summary_metastore +#' @alias metastoresSummary #' @export -metastoresSummary <- function(client) { +summary_metastore <- function(client) { client$do("GET", "/api/2.1/unity-catalog/metastore_summary") } + +#' @rdname summary_metastore +#' @export +metastoresSummary <- summary_metastore #' Delete an assignment. #' #' Deletes a metastore assignment. The caller must be an account administrator. @@ -115,13 +150,18 @@ metastoresSummary <- function(client) { #' @param metastore_id Required. Query for the ID of the metastore to delete. #' @param workspace_id Required. A workspace ID. #' -#' @rdname metastoresUnassign +#' @rdname unassign_metastore +#' @alias metastoresUnassign #' @export -metastoresUnassign <- function(client, workspace_id, metastore_id) { +unassign_metastore <- function(client, workspace_id, metastore_id) { query <- list(metastore_id = metastore_id) client$do("DELETE", paste("/api/2.1/unity-catalog/workspaces/", workspace_id, "/metastore", , sep = ""), query = query) } + +#' @rdname unassign_metastore +#' @export +metastoresUnassign <- unassign_metastore #' Update a metastore. #' #' Updates information for a specific metastore. The caller must be a metastore @@ -138,9 +178,10 @@ metastoresUnassign <- function(client, workspace_id, metastore_id) { #' @param privilege_model_version Privilege model version of the metastore, of the form `major.minor` (e.g., `1.0`). #' @param storage_root_credential_id UUID of storage credential to access the metastore storage_root. #' -#' @rdname metastoresUpdate +#' @rdname update_metastore +#' @alias metastoresUpdate #' @export -metastoresUpdate <- function(client, id, delta_sharing_organization_name = NULL, +update_metastore <- function(client, id, delta_sharing_organization_name = NULL, delta_sharing_recipient_token_lifetime_in_seconds = NULL, delta_sharing_scope = NULL, new_name = NULL, owner = NULL, privilege_model_version = NULL, storage_root_credential_id = NULL) { body <- list(delta_sharing_organization_name = delta_sharing_organization_name, @@ -150,6 +191,10 @@ metastoresUpdate <- function(client, id, delta_sharing_organization_name = NULL, client$do("PATCH", paste("/api/2.1/unity-catalog/metastores/", id, sep = ""), body = body) } + +#' @rdname update_metastore +#' @export +metastoresUpdate <- update_metastore #' Update an assignment. #' #' Updates a metastore assignment. This operation can be used to update @@ -163,15 +208,20 @@ metastoresUpdate <- function(client, id, delta_sharing_organization_name = NULL, #' @param metastore_id The unique ID of the metastore. #' @param workspace_id Required. A workspace ID. #' -#' @rdname metastoresUpdateAssignment +#' @rdname update_metastore_assignment +#' @alias metastoresUpdateAssignment #' @export -metastoresUpdateAssignment <- function(client, workspace_id, default_catalog_name = NULL, +update_metastore_assignment <- function(client, workspace_id, default_catalog_name = NULL, metastore_id = NULL) { body <- list(default_catalog_name = default_catalog_name, metastore_id = metastore_id) client$do("PATCH", paste("/api/2.1/unity-catalog/workspaces/", workspace_id, "/metastore", , sep = ""), body = body) } +#' @rdname update_metastore_assignment +#' @export +metastoresUpdateAssignment <- update_metastore_assignment + diff --git a/R/model_registry.R b/R/model_registry.R index 9f5f9659..4c3e3ca2 100755 --- a/R/model_registry.R +++ b/R/model_registry.R @@ -14,14 +14,19 @@ NULL #' @param stage Required. Target stage of the transition. #' @param version Required. Version of the model. #' -#' @rdname modelRegistryApproveTransitionRequest +#' @rdname approve_model_transition_request +#' @alias modelRegistryApproveTransitionRequest #' @export -modelRegistryApproveTransitionRequest <- function(client, name, version, stage, archive_existing_versions, +approve_model_transition_request <- function(client, name, version, stage, archive_existing_versions, comment = NULL) { body <- list(archive_existing_versions = archive_existing_versions, comment = comment, name = name, stage = stage, version = version) client$do("POST", "/api/2.0/mlflow/transition-requests/approve", body = body) } + +#' @rdname approve_model_transition_request +#' @export +modelRegistryApproveTransitionRequest <- approve_model_transition_request #' Post a comment. #' #' Posts a comment on a model version. A comment can be submitted either by a @@ -33,12 +38,17 @@ modelRegistryApproveTransitionRequest <- function(client, name, version, stage, #' @param name Required. Name of the model. #' @param version Required. Version of the model. #' -#' @rdname modelRegistryCreateComment +#' @rdname create_model_comment +#' @alias modelRegistryCreateComment #' @export -modelRegistryCreateComment <- function(client, name, version, comment) { +create_model_comment <- function(client, name, version, comment) { body <- list(comment = comment, name = name, version = version) client$do("POST", "/api/2.0/mlflow/comments/create", body = body) } + +#' @rdname create_model_comment +#' @export +modelRegistryCreateComment <- create_model_comment #' Create a model. #' #' Creates a new registered model with the name specified in the request body. @@ -51,12 +61,17 @@ modelRegistryCreateComment <- function(client, name, version, comment) { #' @param name Required. Register models under this name. #' @param tags Additional metadata for registered model. #' -#' @rdname modelRegistryCreateModel +#' @rdname create_model +#' @alias modelRegistryCreateModel #' @export -modelRegistryCreateModel <- function(client, name, description = NULL, tags = NULL) { +create_model <- function(client, name, description = NULL, tags = NULL) { body <- list(description = description, name = name, tags = tags) client$do("POST", "/api/2.0/mlflow/registered-models/create", body = body) } + +#' @rdname create_model +#' @export +modelRegistryCreateModel <- create_model #' Create a model version. #' #' Creates a model version. @@ -69,14 +84,19 @@ modelRegistryCreateModel <- function(client, name, description = NULL, tags = NU #' @param source Required. URI indicating the location of the model artifacts. #' @param tags Additional metadata for model version. #' -#' @rdname modelRegistryCreateModelVersion +#' @rdname create_model_version +#' @alias modelRegistryCreateModelVersion #' @export -modelRegistryCreateModelVersion <- function(client, name, source, description = NULL, - run_id = NULL, run_link = NULL, tags = NULL) { +create_model_version <- function(client, name, source, description = NULL, run_id = NULL, + run_link = NULL, tags = NULL) { body <- list(description = description, name = name, run_id = run_id, run_link = run_link, source = source, tags = tags) client$do("POST", "/api/2.0/mlflow/model-versions/create", body = body) } + +#' @rdname create_model_version +#' @export +modelRegistryCreateModelVersion <- create_model_version #' Make a transition request. #' #' Creates a model version stage transition request. @@ -87,12 +107,17 @@ modelRegistryCreateModelVersion <- function(client, name, source, description = #' @param stage Required. Target stage of the transition. #' @param version Required. Version of the model. #' -#' @rdname modelRegistryCreateTransitionRequest +#' @rdname create_model_transition_request +#' @alias modelRegistryCreateTransitionRequest #' @export -modelRegistryCreateTransitionRequest <- function(client, name, version, stage, comment = NULL) { +create_model_transition_request <- function(client, name, version, stage, comment = NULL) { body <- list(comment = comment, name = name, stage = stage, version = version) client$do("POST", "/api/2.0/mlflow/transition-requests/create", body = body) } + +#' @rdname create_model_transition_request +#' @export +modelRegistryCreateTransitionRequest <- create_model_transition_request #' Create a webhook. #' #' **NOTE**: This endpoint is in Public Preview. @@ -107,14 +132,19 @@ modelRegistryCreateTransitionRequest <- function(client, name, version, stage, c #' @param model_name Name of the model whose events would trigger this webhook. #' @param status Enable or disable triggering the webhook, or put the webhook into test mode. #' -#' @rdname modelRegistryCreateWebhook +#' @rdname create_model_webhook +#' @alias modelRegistryCreateWebhook #' @export -modelRegistryCreateWebhook <- function(client, events, description = NULL, http_url_spec = NULL, +create_model_webhook <- function(client, events, description = NULL, http_url_spec = NULL, job_spec = NULL, model_name = NULL, status = NULL) { body <- list(description = description, events = events, http_url_spec = http_url_spec, job_spec = job_spec, model_name = model_name, status = status) client$do("POST", "/api/2.0/mlflow/registry-webhooks/create", body = body) } + +#' @rdname create_model_webhook +#' @export +modelRegistryCreateWebhook <- create_model_webhook #' Delete a comment. #' #' Deletes a comment on a model version. @@ -122,12 +152,17 @@ modelRegistryCreateWebhook <- function(client, events, description = NULL, http_ #' #' @param id Required. This field has no description yet. #' -#' @rdname modelRegistryDeleteComment +#' @rdname delete_model_comment +#' @alias modelRegistryDeleteComment #' @export -modelRegistryDeleteComment <- function(client, id) { +delete_model_comment <- function(client, id) { query <- list(id = id) client$do("DELETE", "/api/2.0/mlflow/comments/delete", query = query) } + +#' @rdname delete_model_comment +#' @export +modelRegistryDeleteComment <- delete_model_comment #' Delete a model. #' #' Deletes a registered model. @@ -135,12 +170,17 @@ modelRegistryDeleteComment <- function(client, id) { #' #' @param name Required. Registered model unique name identifier. #' -#' @rdname modelRegistryDeleteModel +#' @rdname delete_model +#' @alias modelRegistryDeleteModel #' @export -modelRegistryDeleteModel <- function(client, name) { +delete_model <- function(client, name) { query <- list(name = name) client$do("DELETE", "/api/2.0/mlflow/registered-models/delete", query = query) } + +#' @rdname delete_model +#' @export +modelRegistryDeleteModel <- delete_model #' Delete a model tag. #' #' Deletes the tag for a registered model. @@ -149,12 +189,17 @@ modelRegistryDeleteModel <- function(client, name) { #' @param key Required. Name of the tag. #' @param name Required. Name of the registered model that the tag was logged under. #' -#' @rdname modelRegistryDeleteModelTag +#' @rdname delete_model_tag +#' @alias modelRegistryDeleteModelTag #' @export -modelRegistryDeleteModelTag <- function(client, name, key) { +delete_model_tag <- function(client, name, key) { query <- list(key = key, name = name) client$do("DELETE", "/api/2.0/mlflow/registered-models/delete-tag", query = query) } + +#' @rdname delete_model_tag +#' @export +modelRegistryDeleteModelTag <- delete_model_tag #' Delete a model version. #' #' Deletes a model version. @@ -163,12 +208,17 @@ modelRegistryDeleteModelTag <- function(client, name, key) { #' @param name Required. Name of the registered model. #' @param version Required. Model version number. #' -#' @rdname modelRegistryDeleteModelVersion +#' @rdname delete_model_version +#' @alias modelRegistryDeleteModelVersion #' @export -modelRegistryDeleteModelVersion <- function(client, name, version) { +delete_model_version <- function(client, name, version) { query <- list(name = name, version = version) client$do("DELETE", "/api/2.0/mlflow/model-versions/delete", query = query) } + +#' @rdname delete_model_version +#' @export +modelRegistryDeleteModelVersion <- delete_model_version #' Delete a model version tag. #' #' Deletes a model version tag. @@ -178,12 +228,17 @@ modelRegistryDeleteModelVersion <- function(client, name, version) { #' @param name Required. Name of the registered model that the tag was logged under. #' @param version Required. Model version number that the tag was logged under. #' -#' @rdname modelRegistryDeleteModelVersionTag +#' @rdname delete_model_version_tag +#' @alias modelRegistryDeleteModelVersionTag #' @export -modelRegistryDeleteModelVersionTag <- function(client, name, version, key) { +delete_model_version_tag <- function(client, name, version, key) { query <- list(key = key, name = name, version = version) client$do("DELETE", "/api/2.0/mlflow/model-versions/delete-tag", query = query) } + +#' @rdname delete_model_version_tag +#' @export +modelRegistryDeleteModelVersionTag <- delete_model_version_tag #' Delete a transition request. #' #' Cancels a model version stage transition request. @@ -195,14 +250,19 @@ modelRegistryDeleteModelVersionTag <- function(client, name, version, key) { #' @param stage Required. Target stage of the transition request. #' @param version Required. Version of the model. #' -#' @rdname modelRegistryDeleteTransitionRequest +#' @rdname delete_model_transition_request +#' @alias modelRegistryDeleteTransitionRequest #' @export -modelRegistryDeleteTransitionRequest <- function(client, name, version, stage, creator, +delete_model_transition_request <- function(client, name, version, stage, creator, comment = NULL) { query <- list(comment = comment, creator = creator, name = name, stage = stage, version = version) client$do("DELETE", "/api/2.0/mlflow/transition-requests/delete", query = query) } + +#' @rdname delete_model_transition_request +#' @export +modelRegistryDeleteTransitionRequest <- delete_model_transition_request #' Delete a webhook. #' #' **NOTE:** This endpoint is in Public Preview. @@ -212,12 +272,17 @@ modelRegistryDeleteTransitionRequest <- function(client, name, version, stage, c #' #' @param id Webhook ID required to delete a registry webhook. #' -#' @rdname modelRegistryDeleteWebhook +#' @rdname delete_model_webhook +#' @alias modelRegistryDeleteWebhook #' @export -modelRegistryDeleteWebhook <- function(client, id = NULL) { +delete_model_webhook <- function(client, id = NULL) { query <- list(id = id) client$do("DELETE", "/api/2.0/mlflow/registry-webhooks/delete", query = query) } + +#' @rdname delete_model_webhook +#' @export +modelRegistryDeleteWebhook <- delete_model_webhook #' Get the latest version. #' #' Gets the latest version of a registered model. @@ -228,9 +293,10 @@ modelRegistryDeleteWebhook <- function(client, id = NULL) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname modelRegistryGetLatestVersions +#' @rdname get_model_latest_versions +#' @alias modelRegistryGetLatestVersions #' @export -modelRegistryGetLatestVersions <- function(client, name, stages = NULL) { +get_model_latest_versions <- function(client, name, stages = NULL) { body <- list(name = name, stages = stages) json <- client$do("POST", "/api/2.0/mlflow/registered-models/get-latest-versions", @@ -238,6 +304,10 @@ modelRegistryGetLatestVersions <- function(client, name, stages = NULL) { return(json$model_versions) } + +#' @rdname get_model_latest_versions +#' @export +modelRegistryGetLatestVersions <- get_model_latest_versions #' Get model. #' #' Get the details of a model. This is a Databricks workspace version of the @@ -249,12 +319,17 @@ modelRegistryGetLatestVersions <- function(client, name, stages = NULL) { #' #' @param name Required. Registered model unique name identifier. #' -#' @rdname modelRegistryGetModel +#' @rdname get_model +#' @alias modelRegistryGetModel #' @export -modelRegistryGetModel <- function(client, name) { +get_model <- function(client, name) { query <- list(name = name) client$do("GET", "/api/2.0/mlflow/databricks/registered-models/get", query = query) } + +#' @rdname get_model +#' @export +modelRegistryGetModel <- get_model #' Get a model version. #' #' Get a model version. @@ -263,12 +338,17 @@ modelRegistryGetModel <- function(client, name) { #' @param name Required. Name of the registered model. #' @param version Required. Model version number. #' -#' @rdname modelRegistryGetModelVersion +#' @rdname get_model_version +#' @alias modelRegistryGetModelVersion #' @export -modelRegistryGetModelVersion <- function(client, name, version) { +get_model_version <- function(client, name, version) { query <- list(name = name, version = version) client$do("GET", "/api/2.0/mlflow/model-versions/get", query = query) } + +#' @rdname get_model_version +#' @export +modelRegistryGetModelVersion <- get_model_version #' Get a model version URI. #' #' Gets a URI to download the model version. @@ -277,12 +357,17 @@ modelRegistryGetModelVersion <- function(client, name, version) { #' @param name Required. Name of the registered model. #' @param version Required. Model version number. #' -#' @rdname modelRegistryGetModelVersionDownloadUri +#' @rdname get_model_version_download_uri +#' @alias modelRegistryGetModelVersionDownloadUri #' @export -modelRegistryGetModelVersionDownloadUri <- function(client, name, version) { +get_model_version_download_uri <- function(client, name, version) { query <- list(name = name, version = version) client$do("GET", "/api/2.0/mlflow/model-versions/get-download-uri", query = query) } + +#' @rdname get_model_version_download_uri +#' @export +modelRegistryGetModelVersionDownloadUri <- get_model_version_download_uri #' Get registered model permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -290,13 +375,18 @@ modelRegistryGetModelVersionDownloadUri <- function(client, name, version) { #' #' @param registered_model_id Required. The registered model for which to get or manage permissions. #' -#' @rdname modelRegistryGetPermissionLevels +#' @rdname get_model_permission_levels +#' @alias modelRegistryGetPermissionLevels #' @export -modelRegistryGetPermissionLevels <- function(client, registered_model_id) { +get_model_permission_levels <- function(client, registered_model_id) { client$do("GET", paste("/api/2.0/permissions/registered-models/", registered_model_id, "/permissionLevels", , sep = "")) } + +#' @rdname get_model_permission_levels +#' @export +modelRegistryGetPermissionLevels <- get_model_permission_levels #' Get registered model permissions. #' #' Gets the permissions of a registered model. Registered models can inherit @@ -305,13 +395,18 @@ modelRegistryGetPermissionLevels <- function(client, registered_model_id) { #' #' @param registered_model_id Required. The registered model for which to get or manage permissions. #' -#' @rdname modelRegistryGetPermissions +#' @rdname get_model_permissions +#' @alias modelRegistryGetPermissions #' @export -modelRegistryGetPermissions <- function(client, registered_model_id) { +get_model_permissions <- function(client, registered_model_id) { client$do("GET", paste("/api/2.0/permissions/registered-models/", registered_model_id, sep = "")) } + +#' @rdname get_model_permissions +#' @export +modelRegistryGetPermissions <- get_model_permissions #' List models. #' #' Lists all available registered models, up to the limit specified in @@ -323,9 +418,10 @@ modelRegistryGetPermissions <- function(client, registered_model_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname modelRegistryListModels +#' @rdname list_model_models +#' @alias modelRegistryListModels #' @export -modelRegistryListModels <- function(client, max_results = NULL, page_token = NULL) { +list_model_models <- function(client, max_results = NULL, page_token = NULL) { query <- list(max_results = max_results, page_token = page_token) results <- data.frame() @@ -344,6 +440,10 @@ modelRegistryListModels <- function(client, max_results = NULL, page_token = NUL return(results) } + +#' @rdname list_model_models +#' @export +modelRegistryListModels <- list_model_models #' List transition requests. #' #' Gets a list of all open stage transition requests for the model version. @@ -354,15 +454,20 @@ modelRegistryListModels <- function(client, max_results = NULL, page_token = NUL #' #' @return `data.frame` with all of the response pages. #' -#' @rdname modelRegistryListTransitionRequests +#' @rdname list_model_transition_requests +#' @alias modelRegistryListTransitionRequests #' @export -modelRegistryListTransitionRequests <- function(client, name, version) { +list_model_transition_requests <- function(client, name, version) { query <- list(name = name, version = version) json <- client$do("GET", "/api/2.0/mlflow/transition-requests/list", query = query) return(json$requests) } + +#' @rdname list_model_transition_requests +#' @export +modelRegistryListTransitionRequests <- list_model_transition_requests #' List registry webhooks. #' #' **NOTE:** This endpoint is in Public Preview. @@ -376,9 +481,10 @@ modelRegistryListTransitionRequests <- function(client, name, version) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname modelRegistryListWebhooks +#' @rdname list_model_webhooks +#' @alias modelRegistryListWebhooks #' @export -modelRegistryListWebhooks <- function(client, events = NULL, model_name = NULL, page_token = NULL) { +list_model_webhooks <- function(client, events = NULL, model_name = NULL, page_token = NULL) { query <- list(events = events, model_name = model_name, page_token = page_token) results <- data.frame() @@ -397,6 +503,10 @@ modelRegistryListWebhooks <- function(client, events = NULL, model_name = NULL, return(results) } + +#' @rdname list_model_webhooks +#' @export +modelRegistryListWebhooks <- list_model_webhooks #' Reject a transition request. #' #' Rejects a model version stage transition request. @@ -407,12 +517,17 @@ modelRegistryListWebhooks <- function(client, events = NULL, model_name = NULL, #' @param stage Required. Target stage of the transition. #' @param version Required. Version of the model. #' -#' @rdname modelRegistryRejectTransitionRequest +#' @rdname reject_model_transition_request +#' @alias modelRegistryRejectTransitionRequest #' @export -modelRegistryRejectTransitionRequest <- function(client, name, version, stage, comment = NULL) { +reject_model_transition_request <- function(client, name, version, stage, comment = NULL) { body <- list(comment = comment, name = name, stage = stage, version = version) client$do("POST", "/api/2.0/mlflow/transition-requests/reject", body = body) } + +#' @rdname reject_model_transition_request +#' @export +modelRegistryRejectTransitionRequest <- reject_model_transition_request #' Rename a model. #' #' Renames a registered model. @@ -421,12 +536,17 @@ modelRegistryRejectTransitionRequest <- function(client, name, version, stage, c #' @param name Required. Registered model unique name identifier. #' @param new_name If provided, updates the name for this `registered_model`. #' -#' @rdname modelRegistryRenameModel +#' @rdname rename_model +#' @alias modelRegistryRenameModel #' @export -modelRegistryRenameModel <- function(client, name, new_name = NULL) { +rename_model <- function(client, name, new_name = NULL) { body <- list(name = name, new_name = new_name) client$do("POST", "/api/2.0/mlflow/registered-models/rename", body = body) } + +#' @rdname rename_model +#' @export +modelRegistryRenameModel <- rename_model #' Searches model versions. #' #' Searches for specific model versions based on the supplied __filter__. @@ -439,10 +559,11 @@ modelRegistryRenameModel <- function(client, name, new_name = NULL) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname modelRegistrySearchModelVersions +#' @rdname search_model_versions +#' @alias modelRegistrySearchModelVersions #' @export -modelRegistrySearchModelVersions <- function(client, filter = NULL, max_results = NULL, - order_by = NULL, page_token = NULL) { +search_model_versions <- function(client, filter = NULL, max_results = NULL, order_by = NULL, + page_token = NULL) { query <- list(filter = filter, max_results = max_results, order_by = order_by, page_token = page_token) @@ -462,6 +583,10 @@ modelRegistrySearchModelVersions <- function(client, filter = NULL, max_results return(results) } + +#' @rdname search_model_versions +#' @export +modelRegistrySearchModelVersions <- search_model_versions #' Search models. #' #' Search for registered models based on the specified __filter__. @@ -474,10 +599,11 @@ modelRegistrySearchModelVersions <- function(client, filter = NULL, max_results #' #' @return `data.frame` with all of the response pages. #' -#' @rdname modelRegistrySearchModels +#' @rdname search_model_models +#' @alias modelRegistrySearchModels #' @export -modelRegistrySearchModels <- function(client, filter = NULL, max_results = NULL, - order_by = NULL, page_token = NULL) { +search_model_models <- function(client, filter = NULL, max_results = NULL, order_by = NULL, + page_token = NULL) { query <- list(filter = filter, max_results = max_results, order_by = order_by, page_token = page_token) @@ -497,6 +623,10 @@ modelRegistrySearchModels <- function(client, filter = NULL, max_results = NULL, return(results) } + +#' @rdname search_model_models +#' @export +modelRegistrySearchModels <- search_model_models #' Set a tag. #' #' Sets a tag on a registered model. @@ -506,12 +636,17 @@ modelRegistrySearchModels <- function(client, filter = NULL, max_results = NULL, #' @param name Required. Unique name of the model. #' @param value Required. String value of the tag being logged. #' -#' @rdname modelRegistrySetModelTag +#' @rdname set_model_tag +#' @alias modelRegistrySetModelTag #' @export -modelRegistrySetModelTag <- function(client, name, key, value) { +set_model_tag <- function(client, name, key, value) { body <- list(key = key, name = name, value = value) client$do("POST", "/api/2.0/mlflow/registered-models/set-tag", body = body) } + +#' @rdname set_model_tag +#' @export +modelRegistrySetModelTag <- set_model_tag #' Set a version tag. #' #' Sets a model version tag. @@ -522,12 +657,17 @@ modelRegistrySetModelTag <- function(client, name, key, value) { #' @param value Required. String value of the tag being logged. #' @param version Required. Model version number. #' -#' @rdname modelRegistrySetModelVersionTag +#' @rdname set_model_version_tag +#' @alias modelRegistrySetModelVersionTag #' @export -modelRegistrySetModelVersionTag <- function(client, name, version, key, value) { +set_model_version_tag <- function(client, name, version, key, value) { body <- list(key = key, name = name, value = value, version = version) client$do("POST", "/api/2.0/mlflow/model-versions/set-tag", body = body) } + +#' @rdname set_model_version_tag +#' @export +modelRegistrySetModelVersionTag <- set_model_version_tag #' Set registered model permissions. #' #' Sets permissions on a registered model. Registered models can inherit @@ -537,13 +677,18 @@ modelRegistrySetModelVersionTag <- function(client, name, version, key, value) { #' @param access_control_list This field has no description yet. #' @param registered_model_id Required. The registered model for which to get or manage permissions. #' -#' @rdname modelRegistrySetPermissions +#' @rdname set_model_permissions +#' @alias modelRegistrySetPermissions #' @export -modelRegistrySetPermissions <- function(client, registered_model_id, access_control_list = NULL) { +set_model_permissions <- function(client, registered_model_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", paste("/api/2.0/permissions/registered-models/", registered_model_id, sep = ""), body = body) } + +#' @rdname set_model_permissions +#' @export +modelRegistrySetPermissions <- set_model_permissions #' Test a webhook. #' #' **NOTE:** This endpoint is in Public Preview. @@ -554,12 +699,17 @@ modelRegistrySetPermissions <- function(client, registered_model_id, access_cont #' @param event If `event` is specified, the test trigger uses the specified event. #' @param id Required. Webhook ID. #' -#' @rdname modelRegistryTestRegistryWebhook +#' @rdname test_model_registry_webhook +#' @alias modelRegistryTestRegistryWebhook #' @export -modelRegistryTestRegistryWebhook <- function(client, id, event = NULL) { +test_model_registry_webhook <- function(client, id, event = NULL) { body <- list(event = event, id = id) client$do("POST", "/api/2.0/mlflow/registry-webhooks/test", body = body) } + +#' @rdname test_model_registry_webhook +#' @export +modelRegistryTestRegistryWebhook <- test_model_registry_webhook #' Transition a stage. #' #' Transition a model version's stage. This is a Databricks workspace version of @@ -575,15 +725,20 @@ modelRegistryTestRegistryWebhook <- function(client, id, event = NULL) { #' @param stage Required. Target stage of the transition. #' @param version Required. Version of the model. #' -#' @rdname modelRegistryTransitionStage +#' @rdname transition_model_stage +#' @alias modelRegistryTransitionStage #' @export -modelRegistryTransitionStage <- function(client, name, version, stage, archive_existing_versions, +transition_model_stage <- function(client, name, version, stage, archive_existing_versions, comment = NULL) { body <- list(archive_existing_versions = archive_existing_versions, comment = comment, name = name, stage = stage, version = version) client$do("POST", "/api/2.0/mlflow/databricks/model-versions/transition-stage", body = body) } + +#' @rdname transition_model_stage +#' @export +modelRegistryTransitionStage <- transition_model_stage #' Update a comment. #' #' Post an edit to a comment on a model version. @@ -592,12 +747,17 @@ modelRegistryTransitionStage <- function(client, name, version, stage, archive_e #' @param comment Required. User-provided comment on the action. #' @param id Required. Unique identifier of an activity. #' -#' @rdname modelRegistryUpdateComment +#' @rdname update_model_comment +#' @alias modelRegistryUpdateComment #' @export -modelRegistryUpdateComment <- function(client, id, comment) { +update_model_comment <- function(client, id, comment) { body <- list(comment = comment, id = id) client$do("PATCH", "/api/2.0/mlflow/comments/update", body = body) } + +#' @rdname update_model_comment +#' @export +modelRegistryUpdateComment <- update_model_comment #' Update model. #' #' Updates a registered model. @@ -606,12 +766,17 @@ modelRegistryUpdateComment <- function(client, id, comment) { #' @param description If provided, updates the description for this `registered_model`. #' @param name Required. Registered model unique name identifier. #' -#' @rdname modelRegistryUpdateModel +#' @rdname update_model +#' @alias modelRegistryUpdateModel #' @export -modelRegistryUpdateModel <- function(client, name, description = NULL) { +update_model <- function(client, name, description = NULL) { body <- list(description = description, name = name) client$do("PATCH", "/api/2.0/mlflow/registered-models/update", body = body) } + +#' @rdname update_model +#' @export +modelRegistryUpdateModel <- update_model #' Update model version. #' #' Updates the model version. @@ -621,12 +786,17 @@ modelRegistryUpdateModel <- function(client, name, description = NULL) { #' @param name Required. Name of the registered model. #' @param version Required. Model version number. #' -#' @rdname modelRegistryUpdateModelVersion +#' @rdname update_model_version +#' @alias modelRegistryUpdateModelVersion #' @export -modelRegistryUpdateModelVersion <- function(client, name, version, description = NULL) { +update_model_version <- function(client, name, version, description = NULL) { body <- list(description = description, name = name, version = version) client$do("PATCH", "/api/2.0/mlflow/model-versions/update", body = body) } + +#' @rdname update_model_version +#' @export +modelRegistryUpdateModelVersion <- update_model_version #' Update registered model permissions. #' #' Updates the permissions on a registered model. Registered models can inherit @@ -636,13 +806,18 @@ modelRegistryUpdateModelVersion <- function(client, name, version, description = #' @param access_control_list This field has no description yet. #' @param registered_model_id Required. The registered model for which to get or manage permissions. #' -#' @rdname modelRegistryUpdatePermissions +#' @rdname update_model_permissions +#' @alias modelRegistryUpdatePermissions #' @export -modelRegistryUpdatePermissions <- function(client, registered_model_id, access_control_list = NULL) { +update_model_permissions <- function(client, registered_model_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", paste("/api/2.0/permissions/registered-models/", registered_model_id, sep = ""), body = body) } + +#' @rdname update_model_permissions +#' @export +modelRegistryUpdatePermissions <- update_model_permissions #' Update a webhook. #' #' **NOTE:** This endpoint is in Public Preview. @@ -657,15 +832,20 @@ modelRegistryUpdatePermissions <- function(client, registered_model_id, access_c #' @param job_spec This field has no description yet. #' @param status Enable or disable triggering the webhook, or put the webhook into test mode. #' -#' @rdname modelRegistryUpdateWebhook +#' @rdname update_model_webhook +#' @alias modelRegistryUpdateWebhook #' @export -modelRegistryUpdateWebhook <- function(client, id, description = NULL, events = NULL, - http_url_spec = NULL, job_spec = NULL, status = NULL) { +update_model_webhook <- function(client, id, description = NULL, events = NULL, http_url_spec = NULL, + job_spec = NULL, status = NULL) { body <- list(description = description, events = events, http_url_spec = http_url_spec, id = id, job_spec = job_spec, status = status) client$do("PATCH", "/api/2.0/mlflow/registry-webhooks/update", body = body) } +#' @rdname update_model_webhook +#' @export +modelRegistryUpdateWebhook <- update_model_webhook + diff --git a/R/model_versions.R b/R/model_versions.R index 7fb5e875..54115a8f 100755 --- a/R/model_versions.R +++ b/R/model_versions.R @@ -17,13 +17,18 @@ NULL #' @param full_name Required. The three-level (fully qualified) name of the model version. #' @param version Required. The integer version number of the model version. #' -#' @rdname modelVersionsDelete +#' @rdname delete_model_version +#' @alias modelVersionsDelete #' @export -modelVersionsDelete <- function(client, full_name, version) { +delete_model_version <- function(client, full_name, version) { client$do("DELETE", paste("/api/2.1/unity-catalog/models/", full_name, "/versions/", version, sep = "")) } + +#' @rdname delete_model_version +#' @export +modelVersionsDelete <- delete_model_version #' Get a Model Version. #' #' Get a model version. @@ -38,13 +43,18 @@ modelVersionsDelete <- function(client, full_name, version) { #' @param include_browse Whether to include model versions in the response for which the principal can only access selective metadata for. #' @param version Required. The integer version number of the model version. #' -#' @rdname modelVersionsGet +#' @rdname get_model_version +#' @alias modelVersionsGet #' @export -modelVersionsGet <- function(client, full_name, version, include_browse = NULL) { +get_model_version <- function(client, full_name, version, include_browse = NULL) { query <- list(include_browse = include_browse) client$do("GET", paste("/api/2.1/unity-catalog/models/", full_name, "/versions/", version, sep = ""), query = query) } + +#' @rdname get_model_version +#' @export +modelVersionsGet <- get_model_version #' Get Model Version By Alias. #' #' Get a model version by alias. @@ -58,13 +68,18 @@ modelVersionsGet <- function(client, full_name, version, include_browse = NULL) #' @param alias Required. The name of the alias. #' @param full_name Required. The three-level (fully qualified) name of the registered model. #' -#' @rdname modelVersionsGetByAlias +#' @rdname get_model_version_by_alias +#' @alias modelVersionsGetByAlias #' @export -modelVersionsGetByAlias <- function(client, full_name, alias) { +get_model_version_by_alias <- function(client, full_name, alias) { client$do("GET", paste("/api/2.1/unity-catalog/models/", full_name, "/aliases/", alias, sep = "")) } + +#' @rdname get_model_version_by_alias +#' @export +modelVersionsGetByAlias <- get_model_version_by_alias #' List Model Versions. #' #' List model versions. You can list model versions under a particular schema, @@ -88,9 +103,10 @@ modelVersionsGetByAlias <- function(client, full_name, alias) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname modelVersionsList +#' @rdname list_model_versions +#' @alias modelVersionsList #' @export -modelVersionsList <- function(client, full_name, include_browse = NULL, max_results = NULL, +list_model_versions <- function(client, full_name, include_browse = NULL, max_results = NULL, page_token = NULL) { query <- list(include_browse = include_browse, max_results = max_results, page_token = page_token) @@ -111,6 +127,10 @@ modelVersionsList <- function(client, full_name, include_browse = NULL, max_resu return(results) } + +#' @rdname list_model_versions +#' @export +modelVersionsList <- list_model_versions #' Update a Model Version. #' #' Updates the specified model version. @@ -127,14 +147,19 @@ modelVersionsList <- function(client, full_name, include_browse = NULL, max_resu #' @param full_name Required. The three-level (fully qualified) name of the model version. #' @param version Required. The integer version number of the model version. #' -#' @rdname modelVersionsUpdate +#' @rdname update_model_version +#' @alias modelVersionsUpdate #' @export -modelVersionsUpdate <- function(client, full_name, version, comment = NULL) { +update_model_version <- function(client, full_name, version, comment = NULL) { body <- list(comment = comment) client$do("PATCH", paste("/api/2.1/unity-catalog/models/", full_name, "/versions/", version, sep = ""), body = body) } +#' @rdname update_model_version +#' @export +modelVersionsUpdate <- update_model_version + diff --git a/R/online_tables.R b/R/online_tables.R index b0eb8847..9e8101a5 100755 --- a/R/online_tables.R +++ b/R/online_tables.R @@ -11,12 +11,17 @@ NULL #' @param name Full three-part (catalog, schema, table) name of the table. #' @param spec Specification of the online table. #' -#' @rdname onlineTablesCreate +#' @rdname create_online_table +#' @alias onlineTablesCreate #' @export -onlineTablesCreate <- function(client, name = NULL, spec = NULL) { +create_online_table <- function(client, name = NULL, spec = NULL) { body <- list(name = name, spec = spec) client$do("POST", "/api/2.0/online-tables", body = body) } + +#' @rdname create_online_table +#' @export +onlineTablesCreate <- create_online_table #' Delete an Online Table. #' #' Delete an online table. Warning: This will delete all the data in the online @@ -26,12 +31,17 @@ onlineTablesCreate <- function(client, name = NULL, spec = NULL) { #' #' @param name Required. Full three-part (catalog, schema, table) name of the table. #' -#' @rdname onlineTablesDelete +#' @rdname delete_online_table +#' @alias onlineTablesDelete #' @export -onlineTablesDelete <- function(client, name) { +delete_online_table <- function(client, name) { client$do("DELETE", paste("/api/2.0/online-tables/", name, sep = "")) } + +#' @rdname delete_online_table +#' @export +onlineTablesDelete <- delete_online_table #' Get an Online Table. #' #' Get information about an existing online table and its status. @@ -39,12 +49,17 @@ onlineTablesDelete <- function(client, name) { #' #' @param name Required. Full three-part (catalog, schema, table) name of the table. #' -#' @rdname onlineTablesGet +#' @rdname get_online_table +#' @alias onlineTablesGet #' @export -onlineTablesGet <- function(client, name) { +get_online_table <- function(client, name) { client$do("GET", paste("/api/2.0/online-tables/", name, sep = "")) } +#' @rdname get_online_table +#' @export +onlineTablesGet <- get_online_table + diff --git a/R/permission_migration.R b/R/permission_migration.R index 423c4d6a..681f0366 100755 --- a/R/permission_migration.R +++ b/R/permission_migration.R @@ -14,12 +14,17 @@ NULL #' @param to_account_group_name Required. The name of the account group that permissions will be migrated to. #' @param workspace_id Required. WorkspaceId of the associated workspace where the permission migration will occur. #' -#' @rdname permissionMigrationMigratePermissions +#' @rdname migrate_permission_migration_permissions +#' @alias permissionMigrationMigratePermissions #' @export -permissionMigrationMigratePermissions <- function(client, workspace_id, from_workspace_group_name, +migrate_permission_migration_permissions <- function(client, workspace_id, from_workspace_group_name, to_account_group_name, size = NULL) { body <- list(from_workspace_group_name = from_workspace_group_name, size = size, to_account_group_name = to_account_group_name, workspace_id = workspace_id) client$do("POST", "/api/2.0/permissionmigration", body = body) } +#' @rdname migrate_permission_migration_permissions +#' @export +permissionMigrationMigratePermissions <- migrate_permission_migration_permissions + diff --git a/R/permissions.R b/R/permissions.R index 8d1292dd..30cf7d1c 100755 --- a/R/permissions.R +++ b/R/permissions.R @@ -12,13 +12,18 @@ NULL #' @param request_object_id Required. The id of the request object. #' @param request_object_type Required. The type of the request object. #' -#' @rdname permissionsGet +#' @rdname get_permission +#' @alias permissionsGet #' @export -permissionsGet <- function(client, request_object_type, request_object_id) { +get_permission <- function(client, request_object_type, request_object_id) { client$do("GET", paste("/api/2.0/permissions/", request_object_type, "/", request_object_id, sep = "")) } + +#' @rdname get_permission +#' @export +permissionsGet <- get_permission #' Get object permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -27,13 +32,18 @@ permissionsGet <- function(client, request_object_type, request_object_id) { #' @param request_object_id Required. . #' @param request_object_type Required. . #' -#' @rdname permissionsGetPermissionLevels +#' @rdname get_permission_levels +#' @alias permissionsGetPermissionLevels #' @export -permissionsGetPermissionLevels <- function(client, request_object_type, request_object_id) { +get_permission_levels <- function(client, request_object_type, request_object_id) { client$do("GET", paste("/api/2.0/permissions/", request_object_type, "/", request_object_id, "/permissionLevels", , sep = "")) } + +#' @rdname get_permission_levels +#' @export +permissionsGetPermissionLevels <- get_permission_levels #' Set object permissions. #' #' Sets permissions on an object. Objects can inherit permissions from their @@ -44,13 +54,18 @@ permissionsGetPermissionLevels <- function(client, request_object_type, request_ #' @param request_object_id Required. The id of the request object. #' @param request_object_type Required. The type of the request object. #' -#' @rdname permissionsSet +#' @rdname set_permission +#' @alias permissionsSet #' @export -permissionsSet <- function(client, request_object_type, request_object_id, access_control_list = NULL) { +set_permission <- function(client, request_object_type, request_object_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", paste("/api/2.0/permissions/", request_object_type, "/", request_object_id, sep = ""), body = body) } + +#' @rdname set_permission +#' @export +permissionsSet <- set_permission #' Update object permissions. #' #' Updates the permissions on an object. Objects can inherit permissions from @@ -61,14 +76,19 @@ permissionsSet <- function(client, request_object_type, request_object_id, acces #' @param request_object_id Required. The id of the request object. #' @param request_object_type Required. The type of the request object. #' -#' @rdname permissionsUpdate +#' @rdname update_permission +#' @alias permissionsUpdate #' @export -permissionsUpdate <- function(client, request_object_type, request_object_id, access_control_list = NULL) { +update_permission <- function(client, request_object_type, request_object_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", paste("/api/2.0/permissions/", request_object_type, "/", request_object_id, sep = ""), body = body) } +#' @rdname update_permission +#' @export +permissionsUpdate <- update_permission + diff --git a/R/pipelines.R b/R/pipelines.R index 1c191795..ae1430ce 100755 --- a/R/pipelines.R +++ b/R/pipelines.R @@ -29,9 +29,10 @@ NULL #' @param target Target schema (database) to add tables in this pipeline to. #' @param trigger Which pipeline trigger to use. #' -#' @rdname pipelinesCreate +#' @rdname create_pipeline +#' @alias pipelinesCreate #' @export -pipelinesCreate <- function(client, allow_duplicate_names = NULL, catalog = NULL, +create_pipeline <- function(client, allow_duplicate_names = NULL, catalog = NULL, channel = NULL, clusters = NULL, configuration = NULL, continuous = NULL, development = NULL, dry_run = NULL, edition = NULL, filters = NULL, id = NULL, libraries = NULL, name = NULL, notifications = NULL, photon = NULL, serverless = NULL, storage = NULL, @@ -44,6 +45,10 @@ pipelinesCreate <- function(client, allow_duplicate_names = NULL, catalog = NULL trigger = trigger) client$do("POST", "/api/2.0/pipelines", body = body) } + +#' @rdname create_pipeline +#' @export +pipelinesCreate <- create_pipeline #' Delete a pipeline. #' #' Deletes a pipeline. @@ -51,23 +56,33 @@ pipelinesCreate <- function(client, allow_duplicate_names = NULL, catalog = NULL #' #' @param pipeline_id Required. This field has no description yet. #' -#' @rdname pipelinesDelete +#' @rdname delete_pipeline +#' @alias pipelinesDelete #' @export -pipelinesDelete <- function(client, pipeline_id) { +delete_pipeline <- function(client, pipeline_id) { client$do("DELETE", paste("/api/2.0/pipelines/", pipeline_id, sep = "")) } + +#' @rdname delete_pipeline +#' @export +pipelinesDelete <- delete_pipeline #' Get a pipeline. #' @param client Required. Instance of DatabricksClient() #' #' @param pipeline_id Required. This field has no description yet. #' -#' @rdname pipelinesGet +#' @rdname get_pipeline +#' @alias pipelinesGet #' @export -pipelinesGet <- function(client, pipeline_id) { +get_pipeline <- function(client, pipeline_id) { client$do("GET", paste("/api/2.0/pipelines/", pipeline_id, sep = "")) } + +#' @rdname get_pipeline +#' @export +pipelinesGet <- get_pipeline #' Get pipeline permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -75,13 +90,18 @@ pipelinesGet <- function(client, pipeline_id) { #' #' @param pipeline_id Required. The pipeline for which to get or manage permissions. #' -#' @rdname pipelinesGetPermissionLevels +#' @rdname get_pipeline_permission_levels +#' @alias pipelinesGetPermissionLevels #' @export -pipelinesGetPermissionLevels <- function(client, pipeline_id) { +get_pipeline_permission_levels <- function(client, pipeline_id) { client$do("GET", paste("/api/2.0/permissions/pipelines/", pipeline_id, "/permissionLevels", , sep = "")) } + +#' @rdname get_pipeline_permission_levels +#' @export +pipelinesGetPermissionLevels <- get_pipeline_permission_levels #' Get pipeline permissions. #' #' Gets the permissions of a pipeline. Pipelines can inherit permissions from @@ -90,12 +110,17 @@ pipelinesGetPermissionLevels <- function(client, pipeline_id) { #' #' @param pipeline_id Required. The pipeline for which to get or manage permissions. #' -#' @rdname pipelinesGetPermissions +#' @rdname get_pipeline_permissions +#' @alias pipelinesGetPermissions #' @export -pipelinesGetPermissions <- function(client, pipeline_id) { +get_pipeline_permissions <- function(client, pipeline_id) { client$do("GET", paste("/api/2.0/permissions/pipelines/", pipeline_id, sep = "")) } + +#' @rdname get_pipeline_permissions +#' @export +pipelinesGetPermissions <- get_pipeline_permissions #' Get a pipeline update. #' #' Gets an update from an active pipeline. @@ -104,13 +129,18 @@ pipelinesGetPermissions <- function(client, pipeline_id) { #' @param pipeline_id Required. The ID of the pipeline. #' @param update_id Required. The ID of the update. #' -#' @rdname pipelinesGetUpdate +#' @rdname get_pipeline_update +#' @alias pipelinesGetUpdate #' @export -pipelinesGetUpdate <- function(client, pipeline_id, update_id) { +get_pipeline_update <- function(client, pipeline_id, update_id) { client$do("GET", paste("/api/2.0/pipelines/", pipeline_id, "/updates/", update_id, sep = "")) } + +#' @rdname get_pipeline_update +#' @export +pipelinesGetUpdate <- get_pipeline_update #' List pipeline events. #' #' Retrieves events for a pipeline. @@ -124,9 +154,10 @@ pipelinesGetUpdate <- function(client, pipeline_id, update_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname pipelinesListPipelineEvents +#' @rdname list_pipeline_events +#' @alias pipelinesListPipelineEvents #' @export -pipelinesListPipelineEvents <- function(client, pipeline_id, filter = NULL, max_results = NULL, +list_pipeline_events <- function(client, pipeline_id, filter = NULL, max_results = NULL, order_by = NULL, page_token = NULL) { query <- list(filter = filter, max_results = max_results, order_by = order_by, page_token = page_token) @@ -148,6 +179,10 @@ pipelinesListPipelineEvents <- function(client, pipeline_id, filter = NULL, max_ return(results) } + +#' @rdname list_pipeline_events +#' @export +pipelinesListPipelineEvents <- list_pipeline_events #' List pipelines. #' #' Lists pipelines defined in the Delta Live Tables system. @@ -160,9 +195,10 @@ pipelinesListPipelineEvents <- function(client, pipeline_id, filter = NULL, max_ #' #' @return `data.frame` with all of the response pages. #' -#' @rdname pipelinesListPipelines +#' @rdname list_pipeline_pipelines +#' @alias pipelinesListPipelines #' @export -pipelinesListPipelines <- function(client, filter = NULL, max_results = NULL, order_by = NULL, +list_pipeline_pipelines <- function(client, filter = NULL, max_results = NULL, order_by = NULL, page_token = NULL) { query <- list(filter = filter, max_results = max_results, order_by = order_by, page_token = page_token) @@ -183,6 +219,10 @@ pipelinesListPipelines <- function(client, filter = NULL, max_results = NULL, or return(results) } + +#' @rdname list_pipeline_pipelines +#' @export +pipelinesListPipelines <- list_pipeline_pipelines #' List pipeline updates. #' #' List updates for an active pipeline. @@ -193,14 +233,19 @@ pipelinesListPipelines <- function(client, filter = NULL, max_results = NULL, or #' @param pipeline_id Required. The pipeline to return updates for. #' @param until_update_id If present, returns updates until and including this update_id. #' -#' @rdname pipelinesListUpdates +#' @rdname list_pipeline_updates +#' @alias pipelinesListUpdates #' @export -pipelinesListUpdates <- function(client, pipeline_id, max_results = NULL, page_token = NULL, +list_pipeline_updates <- function(client, pipeline_id, max_results = NULL, page_token = NULL, until_update_id = NULL) { query <- list(max_results = max_results, page_token = page_token, until_update_id = until_update_id) client$do("GET", paste("/api/2.0/pipelines/", pipeline_id, "/updates", , sep = ""), query = query) } + +#' @rdname list_pipeline_updates +#' @export +pipelinesListUpdates <- list_pipeline_updates #' Set pipeline permissions. #' #' Sets permissions on a pipeline. Pipelines can inherit permissions from their @@ -210,13 +255,18 @@ pipelinesListUpdates <- function(client, pipeline_id, max_results = NULL, page_t #' @param access_control_list This field has no description yet. #' @param pipeline_id Required. The pipeline for which to get or manage permissions. #' -#' @rdname pipelinesSetPermissions +#' @rdname set_pipeline_permissions +#' @alias pipelinesSetPermissions #' @export -pipelinesSetPermissions <- function(client, pipeline_id, access_control_list = NULL) { +set_pipeline_permissions <- function(client, pipeline_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", paste("/api/2.0/permissions/pipelines/", pipeline_id, sep = ""), body = body) } + +#' @rdname set_pipeline_permissions +#' @export +pipelinesSetPermissions <- set_pipeline_permissions #' Start a pipeline. #' #' Starts a new update for the pipeline. If there is already an active update @@ -231,15 +281,20 @@ pipelinesSetPermissions <- function(client, pipeline_id, access_control_list = N #' @param refresh_selection A list of tables to update without fullRefresh. #' @param validate_only If true, this update only validates the correctness of pipeline source code but does not materialize or publish any datasets. #' -#' @rdname pipelinesStartUpdate +#' @rdname start_pipeline_update +#' @alias pipelinesStartUpdate #' @export -pipelinesStartUpdate <- function(client, pipeline_id, cause = NULL, full_refresh = NULL, +start_pipeline_update <- function(client, pipeline_id, cause = NULL, full_refresh = NULL, full_refresh_selection = NULL, refresh_selection = NULL, validate_only = NULL) { body <- list(cause = cause, full_refresh = full_refresh, full_refresh_selection = full_refresh_selection, refresh_selection = refresh_selection, validate_only = validate_only) client$do("POST", paste("/api/2.0/pipelines/", pipeline_id, "/updates", , sep = ""), body = body) } + +#' @rdname start_pipeline_update +#' @export +pipelinesStartUpdate <- start_pipeline_update #' Stop a pipeline. #' #' Stops the pipeline by canceling the active update. If there is no active @@ -248,12 +303,17 @@ pipelinesStartUpdate <- function(client, pipeline_id, cause = NULL, full_refresh #' #' @param pipeline_id Required. This field has no description yet. #' -#' @rdname pipelinesStop +#' @rdname stop_pipeline +#' @alias pipelinesStop #' @export -pipelinesStop <- function(client, pipeline_id) { +stop_pipeline <- function(client, pipeline_id) { client$do("POST", paste("/api/2.0/pipelines/", pipeline_id, "/stop", , sep = "")) } + +#' @rdname stop_pipeline +#' @export +pipelinesStop <- stop_pipeline #' Edit a pipeline. #' #' Updates a pipeline with the supplied configuration. @@ -280,9 +340,10 @@ pipelinesStop <- function(client, pipeline_id) { #' @param target Target schema (database) to add tables in this pipeline to. #' @param trigger Which pipeline trigger to use. #' -#' @rdname pipelinesUpdate +#' @rdname update_pipeline +#' @alias pipelinesUpdate #' @export -pipelinesUpdate <- function(client, pipeline_id, allow_duplicate_names = NULL, catalog = NULL, +update_pipeline <- function(client, pipeline_id, allow_duplicate_names = NULL, catalog = NULL, channel = NULL, clusters = NULL, configuration = NULL, continuous = NULL, development = NULL, edition = NULL, expected_last_modified = NULL, filters = NULL, id = NULL, libraries = NULL, name = NULL, notifications = NULL, photon = NULL, serverless = NULL, storage = NULL, @@ -295,6 +356,10 @@ pipelinesUpdate <- function(client, pipeline_id, allow_duplicate_names = NULL, c target = target, trigger = trigger) client$do("PUT", paste("/api/2.0/pipelines/", pipeline_id, sep = ""), body = body) } + +#' @rdname update_pipeline +#' @export +pipelinesUpdate <- update_pipeline #' Update pipeline permissions. #' #' Updates the permissions on a pipeline. Pipelines can inherit permissions from @@ -304,14 +369,19 @@ pipelinesUpdate <- function(client, pipeline_id, allow_duplicate_names = NULL, c #' @param access_control_list This field has no description yet. #' @param pipeline_id Required. The pipeline for which to get or manage permissions. #' -#' @rdname pipelinesUpdatePermissions +#' @rdname update_pipeline_permissions +#' @alias pipelinesUpdatePermissions #' @export -pipelinesUpdatePermissions <- function(client, pipeline_id, access_control_list = NULL) { +update_pipeline_permissions <- function(client, pipeline_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", paste("/api/2.0/permissions/pipelines/", pipeline_id, sep = ""), body = body) } +#' @rdname update_pipeline_permissions +#' @export +pipelinesUpdatePermissions <- update_pipeline_permissions + @@ -339,9 +409,9 @@ pipelinesUpdatePermissions <- function(client, pipeline_id, access_control_list #' #' @param pipeline_id Required. This field has no description yet. #' -#' @rdname pipelinesStopAndWait +#' @rdname stop_pipeline_and_wait #' @export -pipelinesStopAndWait <- function(client, pipeline_id, timeout = 20, callback = cli_reporter) { +stop_pipeline_and_wait <- function(client, pipeline_id, timeout = 20, callback = cli_reporter) { op_response <- client$do("POST", paste("/api/2.0/pipelines/", pipeline_id, "/stop", , sep = "")) diff --git a/R/policy_families.R b/R/policy_families.R index b2642070..c646ad88 100755 --- a/R/policy_families.R +++ b/R/policy_families.R @@ -10,12 +10,17 @@ NULL #' #' @param policy_family_id Required. This field has no description yet. #' -#' @rdname policyFamiliesGet +#' @rdname get_cluster_policy_family +#' @alias policyFamiliesGet #' @export -policyFamiliesGet <- function(client, policy_family_id) { +get_cluster_policy_family <- function(client, policy_family_id) { client$do("GET", paste("/api/2.0/policy-families/", policy_family_id, sep = "")) } + +#' @rdname get_cluster_policy_family +#' @export +policyFamiliesGet <- get_cluster_policy_family #' List policy families. #' #' Retrieve a list of policy families. This API is paginated. @@ -26,9 +31,10 @@ policyFamiliesGet <- function(client, policy_family_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname policyFamiliesList +#' @rdname list_cluster_policy_families +#' @alias policyFamiliesList #' @export -policyFamiliesList <- function(client, max_results = NULL, page_token = NULL) { +list_cluster_policy_families <- function(client, max_results = NULL, page_token = NULL) { query <- list(max_results = max_results, page_token = page_token) results <- data.frame() @@ -48,4 +54,8 @@ policyFamiliesList <- function(client, max_results = NULL, page_token = NULL) { } +#' @rdname list_cluster_policy_families +#' @export +policyFamiliesList <- list_cluster_policy_families + diff --git a/R/providers.R b/R/providers.R index 093d8505..f1ebddc7 100755 --- a/R/providers.R +++ b/R/providers.R @@ -14,13 +14,18 @@ NULL #' @param name Required. The name of the Provider. #' @param recipient_profile_str This field is required when the __authentication_type__ is **TOKEN** or not provided. #' -#' @rdname providersCreate +#' @rdname create_provider +#' @alias providersCreate #' @export -providersCreate <- function(client, name, authentication_type, comment = NULL, recipient_profile_str = NULL) { +create_provider <- function(client, name, authentication_type, comment = NULL, recipient_profile_str = NULL) { body <- list(authentication_type = authentication_type, comment = comment, name = name, recipient_profile_str = recipient_profile_str) client$do("POST", "/api/2.1/unity-catalog/providers", body = body) } + +#' @rdname create_provider +#' @export +providersCreate <- create_provider #' Delete a provider. #' #' Deletes an authentication provider, if the caller is a metastore admin or is @@ -29,12 +34,17 @@ providersCreate <- function(client, name, authentication_type, comment = NULL, r #' #' @param name Required. Name of the provider. #' -#' @rdname providersDelete +#' @rdname delete_provider +#' @alias providersDelete #' @export -providersDelete <- function(client, name) { +delete_provider <- function(client, name) { client$do("DELETE", paste("/api/2.1/unity-catalog/providers/", name, sep = "")) } + +#' @rdname delete_provider +#' @export +providersDelete <- delete_provider #' Get a provider. #' #' Gets a specific authentication provider. The caller must supply the name of @@ -44,12 +54,17 @@ providersDelete <- function(client, name) { #' #' @param name Required. Name of the provider. #' -#' @rdname providersGet +#' @rdname get_provider +#' @alias providersGet #' @export -providersGet <- function(client, name) { +get_provider <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/providers/", name, sep = "")) } + +#' @rdname get_provider +#' @export +providersGet <- get_provider #' List providers. #' #' Gets an array of available authentication providers. The caller must either @@ -62,15 +77,20 @@ providersGet <- function(client, name) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname providersList +#' @rdname list_providers +#' @alias providersList #' @export -providersList <- function(client, data_provider_global_metastore_id = NULL) { +list_providers <- function(client, data_provider_global_metastore_id = NULL) { query <- list(data_provider_global_metastore_id = data_provider_global_metastore_id) json <- client$do("GET", "/api/2.1/unity-catalog/providers", query = query) return(json$providers) } + +#' @rdname list_providers +#' @export +providersList <- list_providers #' List shares by Provider. #' #' Gets an array of a specified provider's shares within the metastore where: @@ -82,9 +102,10 @@ providersList <- function(client, data_provider_global_metastore_id = NULL) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname providersListShares +#' @rdname list_provider_shares +#' @alias providersListShares #' @export -providersListShares <- function(client, name) { +list_provider_shares <- function(client, name) { json <- client$do("GET", paste("/api/2.1/unity-catalog/providers/", name, "/shares", @@ -92,6 +113,10 @@ providersListShares <- function(client, name) { return(json$shares) } + +#' @rdname list_provider_shares +#' @export +providersListShares <- list_provider_shares #' Update a provider. #' #' Updates the information for an authentication provider, if the caller is a @@ -106,15 +131,20 @@ providersListShares <- function(client, name) { #' @param owner Username of Provider owner. #' @param recipient_profile_str This field is required when the __authentication_type__ is **TOKEN** or not provided. #' -#' @rdname providersUpdate +#' @rdname update_provider +#' @alias providersUpdate #' @export -providersUpdate <- function(client, name, comment = NULL, new_name = NULL, owner = NULL, +update_provider <- function(client, name, comment = NULL, new_name = NULL, owner = NULL, recipient_profile_str = NULL) { body <- list(comment = comment, new_name = new_name, owner = owner, recipient_profile_str = recipient_profile_str) client$do("PATCH", paste("/api/2.1/unity-catalog/providers/", name, sep = ""), body = body) } +#' @rdname update_provider +#' @export +providersUpdate <- update_provider + diff --git a/R/queries.R b/R/queries.R index 07041f53..06d03d50 100755 --- a/R/queries.R +++ b/R/queries.R @@ -24,14 +24,19 @@ NULL #' @param query The text of the query to be run. #' @param run_as_role Sets the **Run as** role for the object. #' -#' @rdname queriesCreate +#' @rdname create_query +#' @alias queriesCreate #' @export -queriesCreate <- function(client, data_source_id = NULL, description = NULL, name = NULL, +create_query <- function(client, data_source_id = NULL, description = NULL, name = NULL, options = NULL, parent = NULL, query = NULL, run_as_role = NULL) { body <- list(data_source_id = data_source_id, description = description, name = name, options = options, parent = parent, query = query, run_as_role = run_as_role) client$do("POST", "/api/2.0/preview/sql/queries", body = body) } + +#' @rdname create_query +#' @export +queriesCreate <- create_query #' Delete a query. #' #' Moves a query to the trash. Trashed queries immediately disappear from @@ -41,12 +46,17 @@ queriesCreate <- function(client, data_source_id = NULL, description = NULL, nam #' #' @param query_id Required. This field has no description yet. #' -#' @rdname queriesDelete +#' @rdname delete_query +#' @alias queriesDelete #' @export -queriesDelete <- function(client, query_id) { +delete_query <- function(client, query_id) { client$do("DELETE", paste("/api/2.0/preview/sql/queries/", query_id, sep = "")) } + +#' @rdname delete_query +#' @export +queriesDelete <- delete_query #' Get a query definition. #' #' Retrieve a query object definition along with contextual permissions @@ -55,12 +65,17 @@ queriesDelete <- function(client, query_id) { #' #' @param query_id Required. This field has no description yet. #' -#' @rdname queriesGet +#' @rdname get_query +#' @alias queriesGet #' @export -queriesGet <- function(client, query_id) { +get_query <- function(client, query_id) { client$do("GET", paste("/api/2.0/preview/sql/queries/", query_id, sep = "")) } + +#' @rdname get_query +#' @export +queriesGet <- get_query #' Get a list of queries. #' #' Gets a list of queries. Optionally, this list can be filtered by a search @@ -77,9 +92,10 @@ queriesGet <- function(client, query_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname queriesList +#' @rdname list_queries +#' @alias queriesList #' @export -queriesList <- function(client, order = NULL, page = NULL, page_size = NULL, q = NULL) { +list_queries <- function(client, order = NULL, page = NULL, page_size = NULL, q = NULL) { query <- list(order = order, page = page, page_size = page_size, q = q) query$page = 1 @@ -98,6 +114,10 @@ queriesList <- function(client, order = NULL, page = NULL, page_size = NULL, q = return(results) } + +#' @rdname list_queries +#' @export +queriesList <- list_queries #' Restore a query. #' #' Restore a query that has been moved to the trash. A restored query appears in @@ -106,12 +126,17 @@ queriesList <- function(client, order = NULL, page = NULL, page_size = NULL, q = #' #' @param query_id Required. This field has no description yet. #' -#' @rdname queriesRestore +#' @rdname restore_query +#' @alias queriesRestore #' @export -queriesRestore <- function(client, query_id) { +restore_query <- function(client, query_id) { client$do("POST", paste("/api/2.0/preview/sql/queries/trash/", query_id, sep = "")) } + +#' @rdname restore_query +#' @export +queriesRestore <- restore_query #' Change a query definition. #' #' Modify this query definition. @@ -127,9 +152,10 @@ queriesRestore <- function(client, query_id) { #' @param query_id Required. This field has no description yet. #' @param run_as_role Sets the **Run as** role for the object. #' -#' @rdname queriesUpdate +#' @rdname update_query +#' @alias queriesUpdate #' @export -queriesUpdate <- function(client, query_id, data_source_id = NULL, description = NULL, +update_query <- function(client, query_id, data_source_id = NULL, description = NULL, name = NULL, options = NULL, query = NULL, run_as_role = NULL) { body <- list(data_source_id = data_source_id, description = description, name = name, options = options, query = query, run_as_role = run_as_role) @@ -137,6 +163,10 @@ queriesUpdate <- function(client, query_id, data_source_id = NULL, description = body = body) } +#' @rdname update_query +#' @export +queriesUpdate <- update_query + diff --git a/R/query_history.R b/R/query_history.R index 6fc1a687..27b572f5 100755 --- a/R/query_history.R +++ b/R/query_history.R @@ -17,10 +17,11 @@ NULL #' #' @return `data.frame` with all of the response pages. #' -#' @rdname queryHistoryList +#' @rdname list_query_history +#' @alias queryHistoryList #' @export -queryHistoryList <- function(client, filter_by = NULL, include_metrics = NULL, max_results = NULL, - page_token = NULL) { +list_query_history <- function(client, filter_by = NULL, include_metrics = NULL, + max_results = NULL, page_token = NULL) { query <- list(filter_by = filter_by, include_metrics = include_metrics, max_results = max_results, page_token = page_token) @@ -41,3 +42,7 @@ queryHistoryList <- function(client, filter_by = NULL, include_metrics = NULL, m } +#' @rdname list_query_history +#' @export +queryHistoryList <- list_query_history + diff --git a/R/query_visualizations.R b/R/query_visualizations.R index cff13c70..19f46dd6 100755 --- a/R/query_visualizations.R +++ b/R/query_visualizations.R @@ -12,25 +12,35 @@ NULL #' @param query_id Required. The identifier returned by :method:queries/create. #' @param type Required. The type of visualization: chart, table, pivot table, and so on. #' -#' @rdname queryVisualizationsCreate +#' @rdname create_query_visualization +#' @alias queryVisualizationsCreate #' @export -queryVisualizationsCreate <- function(client, query_id, type, options, description = NULL, +create_query_visualization <- function(client, query_id, type, options, description = NULL, name = NULL) { body <- list(description = description, name = name, options = options, query_id = query_id, type = type) client$do("POST", "/api/2.0/preview/sql/visualizations", body = body) } + +#' @rdname create_query_visualization +#' @export +queryVisualizationsCreate <- create_query_visualization #' Remove visualization. #' @param client Required. Instance of DatabricksClient() #' #' @param id Required. Widget ID returned by :method:queryvizualisations/create. #' -#' @rdname queryVisualizationsDelete +#' @rdname delete_query_visualization +#' @alias queryVisualizationsDelete #' @export -queryVisualizationsDelete <- function(client, id) { +delete_query_visualization <- function(client, id) { client$do("DELETE", paste("/api/2.0/preview/sql/visualizations/", id, sep = "")) } + +#' @rdname delete_query_visualization +#' @export +queryVisualizationsDelete <- delete_query_visualization #' Edit existing visualization. #' @param client Required. Instance of DatabricksClient() #' @@ -42,9 +52,10 @@ queryVisualizationsDelete <- function(client, id) { #' @param type The type of visualization: chart, table, pivot table, and so on. #' @param updated_at This field has no description yet. #' -#' @rdname queryVisualizationsUpdate +#' @rdname update_query_visualization +#' @alias queryVisualizationsUpdate #' @export -queryVisualizationsUpdate <- function(client, id, created_at = NULL, description = NULL, +update_query_visualization <- function(client, id, created_at = NULL, description = NULL, name = NULL, options = NULL, type = NULL, updated_at = NULL) { body <- list(created_at = created_at, description = description, id = id, name = name, options = options, type = type, updated_at = updated_at) @@ -52,5 +63,9 @@ queryVisualizationsUpdate <- function(client, id, created_at = NULL, description body = body) } +#' @rdname update_query_visualization +#' @export +queryVisualizationsUpdate <- update_query_visualization + diff --git a/R/recipient_activation.R b/R/recipient_activation.R index 9613b9da..bcf2d88d 100755 --- a/R/recipient_activation.R +++ b/R/recipient_activation.R @@ -10,13 +10,18 @@ NULL #' #' @param activation_url Required. The one time activation url. #' -#' @rdname recipientActivationGetActivationUrlInfo +#' @rdname get_recipient_activation_url_info +#' @alias recipientActivationGetActivationUrlInfo #' @export -recipientActivationGetActivationUrlInfo <- function(client, activation_url) { +get_recipient_activation_url_info <- function(client, activation_url) { client$do("GET", paste("/api/2.1/unity-catalog/public/data_sharing_activation_info/", activation_url, sep = "")) } + +#' @rdname get_recipient_activation_url_info +#' @export +recipientActivationGetActivationUrlInfo <- get_recipient_activation_url_info #' Get an access token. #' #' Retrieve access token with an activation url. This is a public API without @@ -25,12 +30,17 @@ recipientActivationGetActivationUrlInfo <- function(client, activation_url) { #' #' @param activation_url Required. The one time activation url. #' -#' @rdname recipientActivationRetrieveToken +#' @rdname retrieve_recipient_activation_token +#' @alias recipientActivationRetrieveToken #' @export -recipientActivationRetrieveToken <- function(client, activation_url) { +retrieve_recipient_activation_token <- function(client, activation_url) { client$do("GET", paste("/api/2.1/unity-catalog/public/data_sharing_activation/", activation_url, sep = "")) } +#' @rdname retrieve_recipient_activation_token +#' @export +recipientActivationRetrieveToken <- retrieve_recipient_activation_token + diff --git a/R/recipients.R b/R/recipients.R index f839c7f9..5405e69f 100755 --- a/R/recipients.R +++ b/R/recipients.R @@ -19,15 +19,20 @@ NULL #' @param properties_kvpairs Recipient properties as map of string key-value pairs. #' @param sharing_code The one-time sharing code provided by the data recipient. #' -#' @rdname recipientsCreate +#' @rdname create_recipient +#' @alias recipientsCreate #' @export -recipientsCreate <- function(client, name, authentication_type, comment = NULL, data_recipient_global_metastore_id = NULL, +create_recipient <- function(client, name, authentication_type, comment = NULL, data_recipient_global_metastore_id = NULL, ip_access_list = NULL, owner = NULL, properties_kvpairs = NULL, sharing_code = NULL) { body <- list(authentication_type = authentication_type, comment = comment, data_recipient_global_metastore_id = data_recipient_global_metastore_id, ip_access_list = ip_access_list, name = name, owner = owner, properties_kvpairs = properties_kvpairs, sharing_code = sharing_code) client$do("POST", "/api/2.1/unity-catalog/recipients", body = body) } + +#' @rdname create_recipient +#' @export +recipientsCreate <- create_recipient #' Delete a share recipient. #' #' Deletes the specified recipient from the metastore. The caller must be the @@ -36,12 +41,17 @@ recipientsCreate <- function(client, name, authentication_type, comment = NULL, #' #' @param name Required. Name of the recipient. #' -#' @rdname recipientsDelete +#' @rdname delete_recipient +#' @alias recipientsDelete #' @export -recipientsDelete <- function(client, name) { +delete_recipient <- function(client, name) { client$do("DELETE", paste("/api/2.1/unity-catalog/recipients/", name, sep = "")) } + +#' @rdname delete_recipient +#' @export +recipientsDelete <- delete_recipient #' Get a share recipient. #' #' Gets a share recipient from the metastore if: @@ -51,12 +61,17 @@ recipientsDelete <- function(client, name) { #' #' @param name Required. Name of the recipient. #' -#' @rdname recipientsGet +#' @rdname get_recipient +#' @alias recipientsGet #' @export -recipientsGet <- function(client, name) { +get_recipient <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/recipients/", name, sep = "")) } + +#' @rdname get_recipient +#' @export +recipientsGet <- get_recipient #' List share recipients. #' #' Gets an array of all share recipients within the current metastore where: @@ -69,15 +84,20 @@ recipientsGet <- function(client, name) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname recipientsList +#' @rdname list_recipients +#' @alias recipientsList #' @export -recipientsList <- function(client, data_recipient_global_metastore_id = NULL) { +list_recipients <- function(client, data_recipient_global_metastore_id = NULL) { query <- list(data_recipient_global_metastore_id = data_recipient_global_metastore_id) json <- client$do("GET", "/api/2.1/unity-catalog/recipients", query = query) return(json$recipients) } + +#' @rdname list_recipients +#' @export +recipientsList <- list_recipients #' Rotate a token. #' #' Refreshes the specified recipient's delta sharing authentication token with @@ -87,13 +107,18 @@ recipientsList <- function(client, data_recipient_global_metastore_id = NULL) { #' @param existing_token_expire_in_seconds Required. The expiration time of the bearer token in ISO 8601 format. #' @param name Required. The name of the recipient. #' -#' @rdname recipientsRotateToken +#' @rdname rotate_recipient_token +#' @alias recipientsRotateToken #' @export -recipientsRotateToken <- function(client, name, existing_token_expire_in_seconds) { +rotate_recipient_token <- function(client, name, existing_token_expire_in_seconds) { body <- list(existing_token_expire_in_seconds = existing_token_expire_in_seconds) client$do("POST", paste("/api/2.1/unity-catalog/recipients/", name, "/rotate-token", , sep = ""), body = body) } + +#' @rdname rotate_recipient_token +#' @export +recipientsRotateToken <- rotate_recipient_token #' Get recipient share permissions. #' #' Gets the share permissions for the specified Recipient. The caller must be a @@ -102,13 +127,18 @@ recipientsRotateToken <- function(client, name, existing_token_expire_in_seconds #' #' @param name Required. The name of the Recipient. #' -#' @rdname recipientsSharePermissions +#' @rdname share_recipient_permissions +#' @alias recipientsSharePermissions #' @export -recipientsSharePermissions <- function(client, name) { +share_recipient_permissions <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/recipients/", name, "/share-permissions", , sep = "")) } + +#' @rdname share_recipient_permissions +#' @export +recipientsSharePermissions <- share_recipient_permissions #' Update a share recipient. #' #' Updates an existing recipient in the metastore. The caller must be a @@ -124,9 +154,10 @@ recipientsSharePermissions <- function(client, name) { #' @param owner Username of the recipient owner. #' @param properties_kvpairs Recipient properties as map of string key-value pairs. #' -#' @rdname recipientsUpdate +#' @rdname update_recipient +#' @alias recipientsUpdate #' @export -recipientsUpdate <- function(client, name, comment = NULL, ip_access_list = NULL, +update_recipient <- function(client, name, comment = NULL, ip_access_list = NULL, new_name = NULL, owner = NULL, properties_kvpairs = NULL) { body <- list(comment = comment, ip_access_list = ip_access_list, new_name = new_name, owner = owner, properties_kvpairs = properties_kvpairs) @@ -134,6 +165,10 @@ recipientsUpdate <- function(client, name, comment = NULL, ip_access_list = NULL body = body) } +#' @rdname update_recipient +#' @export +recipientsUpdate <- update_recipient + diff --git a/R/registered_models.R b/R/registered_models.R index 4d9d6f59..dab5916c 100755 --- a/R/registered_models.R +++ b/R/registered_models.R @@ -25,14 +25,19 @@ NULL #' @param schema_name Required. The name of the schema where the registered model resides. #' @param storage_location The storage location on the cloud under which model version data files are stored. #' -#' @rdname registeredModelsCreate +#' @rdname create_registered_model +#' @alias registeredModelsCreate #' @export -registeredModelsCreate <- function(client, catalog_name, schema_name, name, comment = NULL, +create_registered_model <- function(client, catalog_name, schema_name, name, comment = NULL, storage_location = NULL) { body <- list(catalog_name = catalog_name, comment = comment, name = name, schema_name = schema_name, storage_location = storage_location) client$do("POST", "/api/2.1/unity-catalog/models", body = body) } + +#' @rdname create_registered_model +#' @export +registeredModelsCreate <- create_registered_model #' Delete a Registered Model. #' #' Deletes a registered model and all its model versions from the specified @@ -46,12 +51,17 @@ registeredModelsCreate <- function(client, catalog_name, schema_name, name, comm #' #' @param full_name Required. The three-level (fully qualified) name of the registered model. #' -#' @rdname registeredModelsDelete +#' @rdname delete_registered_model +#' @alias registeredModelsDelete #' @export -registeredModelsDelete <- function(client, full_name) { +delete_registered_model <- function(client, full_name) { client$do("DELETE", paste("/api/2.1/unity-catalog/models/", full_name, sep = "")) } + +#' @rdname delete_registered_model +#' @export +registeredModelsDelete <- delete_registered_model #' Delete a Registered Model Alias. #' #' Deletes a registered model alias. @@ -65,13 +75,18 @@ registeredModelsDelete <- function(client, full_name) { #' @param alias Required. The name of the alias. #' @param full_name Required. The three-level (fully qualified) name of the registered model. #' -#' @rdname registeredModelsDeleteAlias +#' @rdname delete_registered_model_alias +#' @alias registeredModelsDeleteAlias #' @export -registeredModelsDeleteAlias <- function(client, full_name, alias) { +delete_registered_model_alias <- function(client, full_name, alias) { client$do("DELETE", paste("/api/2.1/unity-catalog/models/", full_name, "/aliases/", alias, sep = "")) } + +#' @rdname delete_registered_model_alias +#' @export +registeredModelsDeleteAlias <- delete_registered_model_alias #' Get a Registered Model. #' #' Get a registered model. @@ -85,13 +100,18 @@ registeredModelsDeleteAlias <- function(client, full_name, alias) { #' @param full_name Required. The three-level (fully qualified) name of the registered model. #' @param include_browse Whether to include registered models in the response for which the principal can only access selective metadata for. #' -#' @rdname registeredModelsGet +#' @rdname get_registered_model +#' @alias registeredModelsGet #' @export -registeredModelsGet <- function(client, full_name, include_browse = NULL) { +get_registered_model <- function(client, full_name, include_browse = NULL) { query <- list(include_browse = include_browse) client$do("GET", paste("/api/2.1/unity-catalog/models/", full_name, sep = ""), query = query) } + +#' @rdname get_registered_model +#' @export +registeredModelsGet <- get_registered_model #' List Registered Models. #' #' List registered models. You can list registered models under a particular @@ -116,9 +136,10 @@ registeredModelsGet <- function(client, full_name, include_browse = NULL) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname registeredModelsList +#' @rdname list_registered_models +#' @alias registeredModelsList #' @export -registeredModelsList <- function(client, catalog_name = NULL, include_browse = NULL, +list_registered_models <- function(client, catalog_name = NULL, include_browse = NULL, max_results = NULL, page_token = NULL, schema_name = NULL) { query <- list(catalog_name = catalog_name, include_browse = include_browse, max_results = max_results, page_token = page_token, schema_name = schema_name) @@ -139,6 +160,10 @@ registeredModelsList <- function(client, catalog_name = NULL, include_browse = N return(results) } + +#' @rdname list_registered_models +#' @export +registeredModelsList <- list_registered_models #' Set a Registered Model Alias. #' #' Set an alias on the specified registered model. @@ -153,13 +178,18 @@ registeredModelsList <- function(client, catalog_name = NULL, include_browse = N #' @param full_name Required. Full name of the registered model. #' @param version_num Required. The version number of the model version to which the alias points. #' -#' @rdname registeredModelsSetAlias +#' @rdname set_registered_model_alias +#' @alias registeredModelsSetAlias #' @export -registeredModelsSetAlias <- function(client, full_name, alias, version_num) { +set_registered_model_alias <- function(client, full_name, alias, version_num) { body <- list(alias = alias, full_name = full_name, version_num = version_num) client$do("PUT", paste("/api/2.1/unity-catalog/models/", full_name, "/aliases/", alias, sep = ""), body = body) } + +#' @rdname set_registered_model_alias +#' @export +registeredModelsSetAlias <- set_registered_model_alias #' Update a Registered Model. #' #' Updates the specified registered model. @@ -178,15 +208,20 @@ registeredModelsSetAlias <- function(client, full_name, alias, version_num) { #' @param new_name New name for the registered model. #' @param owner The identifier of the user who owns the registered model. #' -#' @rdname registeredModelsUpdate +#' @rdname update_registered_model +#' @alias registeredModelsUpdate #' @export -registeredModelsUpdate <- function(client, full_name, comment = NULL, new_name = NULL, +update_registered_model <- function(client, full_name, comment = NULL, new_name = NULL, owner = NULL) { body <- list(comment = comment, new_name = new_name, owner = owner) client$do("PATCH", paste("/api/2.1/unity-catalog/models/", full_name, sep = ""), body = body) } +#' @rdname update_registered_model +#' @export +registeredModelsUpdate <- update_registered_model + diff --git a/R/repos.R b/R/repos.R index 5f011ef9..5384a7a8 100755 --- a/R/repos.R +++ b/R/repos.R @@ -15,13 +15,18 @@ NULL #' @param sparse_checkout If specified, the repo will be created with sparse checkout enabled. #' @param url Required. URL of the Git repository to be linked. #' -#' @rdname reposCreate +#' @rdname create_repo +#' @alias reposCreate #' @export -reposCreate <- function(client, url, provider, path = NULL, sparse_checkout = NULL) { +create_repo <- function(client, url, provider, path = NULL, sparse_checkout = NULL) { body <- list(path = path, provider = provider, sparse_checkout = sparse_checkout, url = url) client$do("POST", "/api/2.0/repos", body = body) } + +#' @rdname create_repo +#' @export +reposCreate <- create_repo #' Delete a repo. #' #' Deletes the specified repo. @@ -29,12 +34,17 @@ reposCreate <- function(client, url, provider, path = NULL, sparse_checkout = NU #' #' @param repo_id Required. The ID for the corresponding repo to access. #' -#' @rdname reposDelete +#' @rdname delete_repo +#' @alias reposDelete #' @export -reposDelete <- function(client, repo_id) { +delete_repo <- function(client, repo_id) { client$do("DELETE", paste("/api/2.0/repos/", repo_id, sep = "")) } + +#' @rdname delete_repo +#' @export +reposDelete <- delete_repo #' Get a repo. #' #' Returns the repo with the given repo ID. @@ -42,12 +52,17 @@ reposDelete <- function(client, repo_id) { #' #' @param repo_id Required. The ID for the corresponding repo to access. #' -#' @rdname reposGet +#' @rdname get_repo +#' @alias reposGet #' @export -reposGet <- function(client, repo_id) { +get_repo <- function(client, repo_id) { client$do("GET", paste("/api/2.0/repos/", repo_id, sep = "")) } + +#' @rdname get_repo +#' @export +reposGet <- get_repo #' Get repo permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -55,13 +70,18 @@ reposGet <- function(client, repo_id) { #' #' @param repo_id Required. The repo for which to get or manage permissions. #' -#' @rdname reposGetPermissionLevels +#' @rdname get_repo_permission_levels +#' @alias reposGetPermissionLevels #' @export -reposGetPermissionLevels <- function(client, repo_id) { +get_repo_permission_levels <- function(client, repo_id) { client$do("GET", paste("/api/2.0/permissions/repos/", repo_id, "/permissionLevels", , sep = "")) } + +#' @rdname get_repo_permission_levels +#' @export +reposGetPermissionLevels <- get_repo_permission_levels #' Get repo permissions. #' #' Gets the permissions of a repo. Repos can inherit permissions from their root @@ -70,12 +90,17 @@ reposGetPermissionLevels <- function(client, repo_id) { #' #' @param repo_id Required. The repo for which to get or manage permissions. #' -#' @rdname reposGetPermissions +#' @rdname get_repo_permissions +#' @alias reposGetPermissions #' @export -reposGetPermissions <- function(client, repo_id) { +get_repo_permissions <- function(client, repo_id) { client$do("GET", paste("/api/2.0/permissions/repos/", repo_id, sep = "")) } + +#' @rdname get_repo_permissions +#' @export +reposGetPermissions <- get_repo_permissions #' Get repos. #' #' Returns repos that the calling user has Manage permissions on. Results are @@ -87,9 +112,10 @@ reposGetPermissions <- function(client, repo_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname reposList +#' @rdname list_repos +#' @alias reposList #' @export -reposList <- function(client, next_page_token = NULL, path_prefix = NULL) { +list_repos <- function(client, next_page_token = NULL, path_prefix = NULL) { query <- list(next_page_token = next_page_token, path_prefix = path_prefix) results <- data.frame() @@ -108,6 +134,10 @@ reposList <- function(client, next_page_token = NULL, path_prefix = NULL) { return(results) } + +#' @rdname list_repos +#' @export +reposList <- list_repos #' Set repo permissions. #' #' Sets permissions on a repo. Repos can inherit permissions from their root @@ -117,12 +147,17 @@ reposList <- function(client, next_page_token = NULL, path_prefix = NULL) { #' @param access_control_list This field has no description yet. #' @param repo_id Required. The repo for which to get or manage permissions. #' -#' @rdname reposSetPermissions +#' @rdname set_repo_permissions +#' @alias reposSetPermissions #' @export -reposSetPermissions <- function(client, repo_id, access_control_list = NULL) { +set_repo_permissions <- function(client, repo_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", paste("/api/2.0/permissions/repos/", repo_id, sep = ""), body = body) } + +#' @rdname set_repo_permissions +#' @export +reposSetPermissions <- set_repo_permissions #' Update a repo. #' #' Updates the repo to a different branch or tag, or updates the repo to the @@ -134,12 +169,17 @@ reposSetPermissions <- function(client, repo_id, access_control_list = NULL) { #' @param sparse_checkout If specified, update the sparse checkout settings. #' @param tag Tag that the local version of the repo is checked out to. #' -#' @rdname reposUpdate +#' @rdname update_repo +#' @alias reposUpdate #' @export -reposUpdate <- function(client, repo_id, branch = NULL, sparse_checkout = NULL, tag = NULL) { +update_repo <- function(client, repo_id, branch = NULL, sparse_checkout = NULL, tag = NULL) { body <- list(branch = branch, sparse_checkout = sparse_checkout, tag = tag) client$do("PATCH", paste("/api/2.0/repos/", repo_id, sep = ""), body = body) } + +#' @rdname update_repo +#' @export +reposUpdate <- update_repo #' Update repo permissions. #' #' Updates the permissions on a repo. Repos can inherit permissions from their @@ -149,13 +189,18 @@ reposUpdate <- function(client, repo_id, branch = NULL, sparse_checkout = NULL, #' @param access_control_list This field has no description yet. #' @param repo_id Required. The repo for which to get or manage permissions. #' -#' @rdname reposUpdatePermissions +#' @rdname update_repo_permissions +#' @alias reposUpdatePermissions #' @export -reposUpdatePermissions <- function(client, repo_id, access_control_list = NULL) { +update_repo_permissions <- function(client, repo_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", paste("/api/2.0/permissions/repos/", repo_id, sep = ""), body = body) } +#' @rdname update_repo_permissions +#' @export +reposUpdatePermissions <- update_repo_permissions + diff --git a/R/restrict_workspace_admins.R b/R/restrict_workspace_admins.R index 5db17299..6cd61343 100755 --- a/R/restrict_workspace_admins.R +++ b/R/restrict_workspace_admins.R @@ -15,13 +15,18 @@ NULL #' #' @param etag etag used for versioning. #' -#' @rdname restrictWorkspaceAdminsDelete +#' @rdname delete_restrict_workspace_admin +#' @alias restrictWorkspaceAdminsDelete #' @export -restrictWorkspaceAdminsDelete <- function(client, etag = NULL) { +delete_restrict_workspace_admin <- function(client, etag = NULL) { query <- list(etag = etag) client$do("DELETE", "/api/2.0/settings/types/restrict_workspace_admins/names/default", query = query) } + +#' @rdname delete_restrict_workspace_admin +#' @export +restrictWorkspaceAdminsDelete <- delete_restrict_workspace_admin #' Get the restrict workspace admins setting. #' #' Gets the restrict workspace admins setting. @@ -29,13 +34,18 @@ restrictWorkspaceAdminsDelete <- function(client, etag = NULL) { #' #' @param etag etag used for versioning. #' -#' @rdname restrictWorkspaceAdminsGet +#' @rdname get_restrict_workspace_admin +#' @alias restrictWorkspaceAdminsGet #' @export -restrictWorkspaceAdminsGet <- function(client, etag = NULL) { +get_restrict_workspace_admin <- function(client, etag = NULL) { query <- list(etag = etag) client$do("GET", "/api/2.0/settings/types/restrict_workspace_admins/names/default", query = query) } + +#' @rdname get_restrict_workspace_admin +#' @export +restrictWorkspaceAdminsGet <- get_restrict_workspace_admin #' Update the restrict workspace admins setting. #' #' Updates the restrict workspace admins setting for the workspace. A fresh etag @@ -49,13 +59,18 @@ restrictWorkspaceAdminsGet <- function(client, etag = NULL) { #' @param field_mask Required. Field mask is required to be passed into the PATCH request. #' @param setting Required. This field has no description yet. #' -#' @rdname restrictWorkspaceAdminsUpdate +#' @rdname update_restrict_workspace_admin +#' @alias restrictWorkspaceAdminsUpdate #' @export -restrictWorkspaceAdminsUpdate <- function(client, allow_missing, setting, field_mask) { +update_restrict_workspace_admin <- function(client, allow_missing, setting, field_mask) { body <- list(allow_missing = allow_missing, field_mask = field_mask, setting = setting) client$do("PATCH", "/api/2.0/settings/types/restrict_workspace_admins/names/default", body = body) } +#' @rdname update_restrict_workspace_admin +#' @export +restrictWorkspaceAdminsUpdate <- update_restrict_workspace_admin + diff --git a/R/schemas.R b/R/schemas.R index 3d732388..a9ee08c8 100755 --- a/R/schemas.R +++ b/R/schemas.R @@ -16,14 +16,19 @@ NULL #' @param properties A map of key-value properties attached to the securable. #' @param storage_root Storage root URL for managed tables within schema. #' -#' @rdname schemasCreate +#' @rdname create_schema +#' @alias schemasCreate #' @export -schemasCreate <- function(client, name, catalog_name, comment = NULL, properties = NULL, +create_schema <- function(client, name, catalog_name, comment = NULL, properties = NULL, storage_root = NULL) { body <- list(catalog_name = catalog_name, comment = comment, name = name, properties = properties, storage_root = storage_root) client$do("POST", "/api/2.1/unity-catalog/schemas", body = body) } + +#' @rdname create_schema +#' @export +schemasCreate <- create_schema #' Delete a schema. #' #' Deletes the specified schema from the parent catalog. The caller must be the @@ -32,12 +37,17 @@ schemasCreate <- function(client, name, catalog_name, comment = NULL, properties #' #' @param full_name Required. Full name of the schema. #' -#' @rdname schemasDelete +#' @rdname delete_schema +#' @alias schemasDelete #' @export -schemasDelete <- function(client, full_name) { +delete_schema <- function(client, full_name) { client$do("DELETE", paste("/api/2.1/unity-catalog/schemas/", full_name, sep = "")) } + +#' @rdname delete_schema +#' @export +schemasDelete <- delete_schema #' Get a schema. #' #' Gets the specified schema within the metastore. The caller must be a @@ -48,13 +58,18 @@ schemasDelete <- function(client, full_name) { #' @param full_name Required. Full name of the schema. #' @param include_browse Whether to include schemas in the response for which the principal can only access selective metadata for. #' -#' @rdname schemasGet +#' @rdname get_schema +#' @alias schemasGet #' @export -schemasGet <- function(client, full_name, include_browse = NULL) { +get_schema <- function(client, full_name, include_browse = NULL) { query <- list(include_browse = include_browse) client$do("GET", paste("/api/2.1/unity-catalog/schemas/", full_name, sep = ""), query = query) } + +#' @rdname get_schema +#' @export +schemasGet <- get_schema #' List schemas. #' #' Gets an array of schemas for a catalog in the metastore. If the caller is the @@ -71,9 +86,10 @@ schemasGet <- function(client, full_name, include_browse = NULL) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname schemasList +#' @rdname list_schemas +#' @alias schemasList #' @export -schemasList <- function(client, catalog_name, include_browse = NULL, max_results = NULL, +list_schemas <- function(client, catalog_name, include_browse = NULL, max_results = NULL, page_token = NULL) { query <- list(catalog_name = catalog_name, include_browse = include_browse, max_results = max_results, page_token = page_token) @@ -94,6 +110,10 @@ schemasList <- function(client, catalog_name, include_browse = NULL, max_results return(results) } + +#' @rdname list_schemas +#' @export +schemasList <- list_schemas #' Update a schema. #' #' Updates a schema for a catalog. The caller must be the owner of the schema or @@ -110,9 +130,10 @@ schemasList <- function(client, catalog_name, include_browse = NULL, max_results #' @param owner Username of current owner of schema. #' @param properties A map of key-value properties attached to the securable. #' -#' @rdname schemasUpdate +#' @rdname update_schema +#' @alias schemasUpdate #' @export -schemasUpdate <- function(client, full_name, comment = NULL, enable_predictive_optimization = NULL, +update_schema <- function(client, full_name, comment = NULL, enable_predictive_optimization = NULL, new_name = NULL, owner = NULL, properties = NULL) { body <- list(comment = comment, enable_predictive_optimization = enable_predictive_optimization, new_name = new_name, owner = owner, properties = properties) @@ -120,6 +141,10 @@ schemasUpdate <- function(client, full_name, comment = NULL, enable_predictive_o body = body) } +#' @rdname update_schema +#' @export +schemasUpdate <- update_schema + diff --git a/R/secrets.R b/R/secrets.R index feb693e4..d40b9879 100755 --- a/R/secrets.R +++ b/R/secrets.R @@ -14,14 +14,19 @@ NULL #' @param scope Required. Scope name requested by the user. #' @param scope_backend_type The backend type the scope will be created with. #' -#' @rdname secretsCreateScope +#' @rdname create_secret_scope +#' @alias secretsCreateScope #' @export -secretsCreateScope <- function(client, scope, backend_azure_keyvault = NULL, initial_manage_principal = NULL, +create_secret_scope <- function(client, scope, backend_azure_keyvault = NULL, initial_manage_principal = NULL, scope_backend_type = NULL) { body <- list(backend_azure_keyvault = backend_azure_keyvault, initial_manage_principal = initial_manage_principal, scope = scope, scope_backend_type = scope_backend_type) client$do("POST", "/api/2.0/secrets/scopes/create", body = body) } + +#' @rdname create_secret_scope +#' @export +secretsCreateScope <- create_secret_scope #' Delete an ACL. #' #' Deletes the given ACL on the given scope. @@ -35,12 +40,17 @@ secretsCreateScope <- function(client, scope, backend_azure_keyvault = NULL, ini #' @param principal Required. The principal to remove an existing ACL from. #' @param scope Required. The name of the scope to remove permissions from. #' -#' @rdname secretsDeleteAcl +#' @rdname delete_secret_acl +#' @alias secretsDeleteAcl #' @export -secretsDeleteAcl <- function(client, scope, principal) { +delete_secret_acl <- function(client, scope, principal) { body <- list(principal = principal, scope = scope) client$do("POST", "/api/2.0/secrets/acls/delete", body = body) } + +#' @rdname delete_secret_acl +#' @export +secretsDeleteAcl <- delete_secret_acl #' Delete a secret scope. #' #' Deletes a secret scope. @@ -52,12 +62,17 @@ secretsDeleteAcl <- function(client, scope, principal) { #' #' @param scope Required. Name of the scope to delete. #' -#' @rdname secretsDeleteScope +#' @rdname delete_secret_scope +#' @alias secretsDeleteScope #' @export -secretsDeleteScope <- function(client, scope) { +delete_secret_scope <- function(client, scope) { body <- list(scope = scope) client$do("POST", "/api/2.0/secrets/scopes/delete", body = body) } + +#' @rdname delete_secret_scope +#' @export +secretsDeleteScope <- delete_secret_scope #' Delete a secret. #' #' Deletes the secret stored in this secret scope. You must have `WRITE` or @@ -71,12 +86,17 @@ secretsDeleteScope <- function(client, scope) { #' @param key Required. Name of the secret to delete. #' @param scope Required. The name of the scope that contains the secret to delete. #' -#' @rdname secretsDeleteSecret +#' @rdname delete_secret +#' @alias secretsDeleteSecret #' @export -secretsDeleteSecret <- function(client, scope, key) { +delete_secret <- function(client, scope, key) { body <- list(key = key, scope = scope) client$do("POST", "/api/2.0/secrets/delete", body = body) } + +#' @rdname delete_secret +#' @export +secretsDeleteSecret <- delete_secret #' Get secret ACL details. #' #' Gets the details about the given ACL, such as the group and permission. Users @@ -90,12 +110,17 @@ secretsDeleteSecret <- function(client, scope, key) { #' @param principal Required. The principal to fetch ACL information for. #' @param scope Required. The name of the scope to fetch ACL information from. #' -#' @rdname secretsGetAcl +#' @rdname get_secret_acl +#' @alias secretsGetAcl #' @export -secretsGetAcl <- function(client, scope, principal) { +get_secret_acl <- function(client, scope, principal) { query <- list(principal = principal, scope = scope) client$do("GET", "/api/2.0/secrets/acls/get", query = query) } + +#' @rdname get_secret_acl +#' @export +secretsGetAcl <- get_secret_acl #' Get a secret. #' #' Gets the bytes representation of a secret value for the specified scope and @@ -115,12 +140,17 @@ secretsGetAcl <- function(client, scope, principal) { #' @param key Required. The key to fetch secret for. #' @param scope Required. The name of the scope to fetch secret information from. #' -#' @rdname secretsGetSecret +#' @rdname get_secret +#' @alias secretsGetSecret #' @export -secretsGetSecret <- function(client, scope, key) { +get_secret <- function(client, scope, key) { query <- list(key = key, scope = scope) client$do("GET", "/api/2.0/secrets/get", query = query) } + +#' @rdname get_secret +#' @export +secretsGetSecret <- get_secret #' Lists ACLs. #' #' List the ACLs for a given secret scope. Users must have the `MANAGE` @@ -135,15 +165,20 @@ secretsGetSecret <- function(client, scope, key) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname secretsListAcls +#' @rdname list_secret_acls +#' @alias secretsListAcls #' @export -secretsListAcls <- function(client, scope) { +list_secret_acls <- function(client, scope) { query <- list(scope = scope) json <- client$do("GET", "/api/2.0/secrets/acls/list", query = query) return(json$items) } + +#' @rdname list_secret_acls +#' @export +secretsListAcls <- list_secret_acls #' List all scopes. #' #' Lists all secret scopes available in the workspace. @@ -154,14 +189,19 @@ secretsListAcls <- function(client, scope) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname secretsListScopes +#' @rdname list_secret_scopes +#' @alias secretsListScopes #' @export -secretsListScopes <- function(client) { +list_secret_scopes <- function(client) { json <- client$do("GET", "/api/2.0/secrets/scopes/list") return(json$scopes) } + +#' @rdname list_secret_scopes +#' @export +secretsListScopes <- list_secret_scopes #' List secret keys. #' #' Lists the secret keys that are stored at this scope. This is a metadata-only @@ -178,15 +218,20 @@ secretsListScopes <- function(client) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname secretsListSecrets +#' @rdname list_secret_secrets +#' @alias secretsListSecrets #' @export -secretsListSecrets <- function(client, scope) { +list_secret_secrets <- function(client, scope) { query <- list(scope = scope) json <- client$do("GET", "/api/2.0/secrets/list", query = query) return(json$secrets) } + +#' @rdname list_secret_secrets +#' @export +secretsListSecrets <- list_secret_secrets #' Create/update an ACL. #' #' Creates or overwrites the Access Control List (ACL) associated with the given @@ -221,12 +266,17 @@ secretsListSecrets <- function(client, scope) { #' @param principal Required. The principal in which the permission is applied. #' @param scope Required. The name of the scope to apply permissions to. #' -#' @rdname secretsPutAcl +#' @rdname put_secret_acl +#' @alias secretsPutAcl #' @export -secretsPutAcl <- function(client, scope, principal, permission) { +put_secret_acl <- function(client, scope, principal, permission) { body <- list(permission = permission, principal = principal, scope = scope) client$do("POST", "/api/2.0/secrets/acls/put", body = body) } + +#' @rdname put_secret_acl +#' @export +secretsPutAcl <- put_secret_acl #' Add a secret. #' #' Inserts a secret under the provided scope with the given name. If a secret @@ -255,13 +305,18 @@ secretsPutAcl <- function(client, scope, principal, permission) { #' @param scope Required. The name of the scope to which the secret will be associated with. #' @param string_value If specified, note that the value will be stored in UTF-8 (MB4) form. #' -#' @rdname secretsPutSecret +#' @rdname put_secret +#' @alias secretsPutSecret #' @export -secretsPutSecret <- function(client, scope, key, bytes_value = NULL, string_value = NULL) { +put_secret <- function(client, scope, key, bytes_value = NULL, string_value = NULL) { body <- list(bytes_value = bytes_value, key = key, scope = scope, string_value = string_value) client$do("POST", "/api/2.0/secrets/put", body = body) } +#' @rdname put_secret +#' @export +secretsPutSecret <- put_secret + diff --git a/R/service_principals.R b/R/service_principals.R index 15d56754..650aec20 100755 --- a/R/service_principals.R +++ b/R/service_principals.R @@ -18,9 +18,10 @@ NULL #' @param roles Corresponds to AWS instance profile/arn role. #' @param schemas The schema of the List response. #' -#' @rdname servicePrincipalsCreate +#' @rdname create_service_principal +#' @alias servicePrincipalsCreate #' @export -servicePrincipalsCreate <- function(client, active = NULL, application_id = NULL, +create_service_principal <- function(client, active = NULL, application_id = NULL, display_name = NULL, entitlements = NULL, external_id = NULL, groups = NULL, id = NULL, roles = NULL, schemas = NULL) { body <- list(active = active, applicationId = application_id, displayName = display_name, @@ -28,6 +29,10 @@ servicePrincipalsCreate <- function(client, active = NULL, application_id = NULL roles = roles, schemas = schemas) client$do("POST", "/api/2.0/preview/scim/v2/ServicePrincipals", body = body) } + +#' @rdname create_service_principal +#' @export +servicePrincipalsCreate <- create_service_principal #' Delete a service principal. #' #' Delete a single service principal in the Databricks workspace. @@ -35,13 +40,18 @@ servicePrincipalsCreate <- function(client, active = NULL, application_id = NULL #' #' @param id Required. Unique ID for a service principal in the Databricks workspace. #' -#' @rdname servicePrincipalsDelete +#' @rdname delete_service_principal +#' @alias servicePrincipalsDelete #' @export -servicePrincipalsDelete <- function(client, id) { +delete_service_principal <- function(client, id) { client$do("DELETE", paste("/api/2.0/preview/scim/v2/ServicePrincipals/", id, sep = "")) } + +#' @rdname delete_service_principal +#' @export +servicePrincipalsDelete <- delete_service_principal #' Get service principal details. #' #' Gets the details for a single service principal define in the Databricks @@ -50,12 +60,17 @@ servicePrincipalsDelete <- function(client, id) { #' #' @param id Required. Unique ID for a service principal in the Databricks workspace. #' -#' @rdname servicePrincipalsGet +#' @rdname get_service_principal +#' @alias servicePrincipalsGet #' @export -servicePrincipalsGet <- function(client, id) { +get_service_principal <- function(client, id) { client$do("GET", paste("/api/2.0/preview/scim/v2/ServicePrincipals/", id, sep = "")) } + +#' @rdname get_service_principal +#' @export +servicePrincipalsGet <- get_service_principal #' List service principals. #' #' Gets the set of service principals associated with a Databricks workspace. @@ -71,9 +86,10 @@ servicePrincipalsGet <- function(client, id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname servicePrincipalsList +#' @rdname list_service_principals +#' @alias servicePrincipalsList #' @export -servicePrincipalsList <- function(client, attributes = NULL, count = NULL, excluded_attributes = NULL, +list_service_principals <- function(client, attributes = NULL, count = NULL, excluded_attributes = NULL, filter = NULL, sort_by = NULL, sort_order = NULL, start_index = NULL) { query <- list(attributes = attributes, count = count, excludedAttributes = excluded_attributes, filter = filter, sortBy = sort_by, sortOrder = sort_order, startIndex = start_index) @@ -94,6 +110,10 @@ servicePrincipalsList <- function(client, attributes = NULL, count = NULL, exclu return(results) } + +#' @rdname list_service_principals +#' @export +servicePrincipalsList <- list_service_principals #' Update service principal details. #' #' Partially updates the details of a single service principal in the Databricks @@ -104,13 +124,18 @@ servicePrincipalsList <- function(client, attributes = NULL, count = NULL, exclu #' @param operations This field has no description yet. #' @param schemas The schema of the patch request. #' -#' @rdname servicePrincipalsPatch +#' @rdname patch_service_principal +#' @alias servicePrincipalsPatch #' @export -servicePrincipalsPatch <- function(client, id, operations = NULL, schemas = NULL) { +patch_service_principal <- function(client, id, operations = NULL, schemas = NULL) { body <- list(Operations = operations, schemas = schemas) client$do("PATCH", paste("/api/2.0/preview/scim/v2/ServicePrincipals/", id, sep = ""), body = body) } + +#' @rdname patch_service_principal +#' @export +servicePrincipalsPatch <- patch_service_principal #' Replace service principal. #' #' Updates the details of a single service principal. @@ -128,9 +153,10 @@ servicePrincipalsPatch <- function(client, id, operations = NULL, schemas = NULL #' @param roles Corresponds to AWS instance profile/arn role. #' @param schemas The schema of the List response. #' -#' @rdname servicePrincipalsUpdate +#' @rdname update_service_principal +#' @alias servicePrincipalsUpdate #' @export -servicePrincipalsUpdate <- function(client, id, active = NULL, application_id = NULL, +update_service_principal <- function(client, id, active = NULL, application_id = NULL, display_name = NULL, entitlements = NULL, external_id = NULL, groups = NULL, roles = NULL, schemas = NULL) { body <- list(active = active, applicationId = application_id, displayName = display_name, @@ -140,6 +166,10 @@ servicePrincipalsUpdate <- function(client, id, active = NULL, application_id = body = body) } +#' @rdname update_service_principal +#' @export +servicePrincipalsUpdate <- update_service_principal + diff --git a/R/serving_endpoints.R b/R/serving_endpoints.R index c05d1862..111d56e4 100755 --- a/R/serving_endpoints.R +++ b/R/serving_endpoints.R @@ -11,13 +11,18 @@ NULL #' @param name Required. The name of the serving endpoint that the served model belongs to. #' @param served_model_name Required. The name of the served model that build logs will be retrieved for. #' -#' @rdname servingEndpointsBuildLogs +#' @rdname build_serving_endpoint_logs +#' @alias servingEndpointsBuildLogs #' @export -servingEndpointsBuildLogs <- function(client, name, served_model_name) { +build_serving_endpoint_logs <- function(client, name, served_model_name) { client$do("GET", paste("/api/2.0/serving-endpoints/", name, "/served-models/", served_model_name, "/build-logs", , sep = "")) } + +#' @rdname build_serving_endpoint_logs +#' @export +servingEndpointsBuildLogs <- build_serving_endpoint_logs #' Create a new serving endpoint. #' @param client Required. Instance of DatabricksClient() #' @@ -26,23 +31,33 @@ servingEndpointsBuildLogs <- function(client, name, served_model_name) { #' @param rate_limits Rate limits to be applied to the serving endpoint. #' @param tags Tags to be attached to the serving endpoint and automatically propagated to billing logs. #' -#' @rdname servingEndpointsCreate +#' @rdname create_serving_endpoint +#' @alias servingEndpointsCreate #' @export -servingEndpointsCreate <- function(client, name, config, rate_limits = NULL, tags = NULL) { +create_serving_endpoint <- function(client, name, config, rate_limits = NULL, tags = NULL) { body <- list(config = config, name = name, rate_limits = rate_limits, tags = tags) client$do("POST", "/api/2.0/serving-endpoints", body = body) } + +#' @rdname create_serving_endpoint +#' @export +servingEndpointsCreate <- create_serving_endpoint #' Delete a serving endpoint. #' @param client Required. Instance of DatabricksClient() #' #' @param name Required. The name of the serving endpoint. #' -#' @rdname servingEndpointsDelete +#' @rdname delete_serving_endpoint +#' @alias servingEndpointsDelete #' @export -servingEndpointsDelete <- function(client, name) { +delete_serving_endpoint <- function(client, name) { client$do("DELETE", paste("/api/2.0/serving-endpoints/", name, sep = "")) } + +#' @rdname delete_serving_endpoint +#' @export +servingEndpointsDelete <- delete_serving_endpoint #' Get metrics of a serving endpoint. #' #' Retrieves the metrics associated with the provided serving endpoint in either @@ -51,12 +66,17 @@ servingEndpointsDelete <- function(client, name) { #' #' @param name Required. The name of the serving endpoint to retrieve metrics for. #' -#' @rdname servingEndpointsExportMetrics +#' @rdname export_serving_endpoint_metrics +#' @alias servingEndpointsExportMetrics #' @export -servingEndpointsExportMetrics <- function(client, name) { +export_serving_endpoint_metrics <- function(client, name) { client$do("GET", paste("/api/2.0/serving-endpoints/", name, "/metrics", , sep = "")) } + +#' @rdname export_serving_endpoint_metrics +#' @export +servingEndpointsExportMetrics <- export_serving_endpoint_metrics #' Get a single serving endpoint. #' #' Retrieves the details for a single serving endpoint. @@ -64,12 +84,17 @@ servingEndpointsExportMetrics <- function(client, name) { #' #' @param name Required. The name of the serving endpoint. #' -#' @rdname servingEndpointsGet +#' @rdname get_serving_endpoint +#' @alias servingEndpointsGet #' @export -servingEndpointsGet <- function(client, name) { +get_serving_endpoint <- function(client, name) { client$do("GET", paste("/api/2.0/serving-endpoints/", name, sep = "")) } + +#' @rdname get_serving_endpoint +#' @export +servingEndpointsGet <- get_serving_endpoint #' Get serving endpoint permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -77,13 +102,18 @@ servingEndpointsGet <- function(client, name) { #' #' @param serving_endpoint_id Required. The serving endpoint for which to get or manage permissions. #' -#' @rdname servingEndpointsGetPermissionLevels +#' @rdname get_serving_endpoint_permission_levels +#' @alias servingEndpointsGetPermissionLevels #' @export -servingEndpointsGetPermissionLevels <- function(client, serving_endpoint_id) { +get_serving_endpoint_permission_levels <- function(client, serving_endpoint_id) { client$do("GET", paste("/api/2.0/permissions/serving-endpoints/", serving_endpoint_id, "/permissionLevels", , sep = "")) } + +#' @rdname get_serving_endpoint_permission_levels +#' @export +servingEndpointsGetPermissionLevels <- get_serving_endpoint_permission_levels #' Get serving endpoint permissions. #' #' Gets the permissions of a serving endpoint. Serving endpoints can inherit @@ -92,26 +122,36 @@ servingEndpointsGetPermissionLevels <- function(client, serving_endpoint_id) { #' #' @param serving_endpoint_id Required. The serving endpoint for which to get or manage permissions. #' -#' @rdname servingEndpointsGetPermissions +#' @rdname get_serving_endpoint_permissions +#' @alias servingEndpointsGetPermissions #' @export -servingEndpointsGetPermissions <- function(client, serving_endpoint_id) { +get_serving_endpoint_permissions <- function(client, serving_endpoint_id) { client$do("GET", paste("/api/2.0/permissions/serving-endpoints/", serving_endpoint_id, sep = "")) } + +#' @rdname get_serving_endpoint_permissions +#' @export +servingEndpointsGetPermissions <- get_serving_endpoint_permissions #' Get all serving endpoints. #' @param client Required. Instance of DatabricksClient() #' #' @return `data.frame` with all of the response pages. #' -#' @rdname servingEndpointsList +#' @rdname list_serving_endpoints +#' @alias servingEndpointsList #' @export -servingEndpointsList <- function(client) { +list_serving_endpoints <- function(client) { json <- client$do("GET", "/api/2.0/serving-endpoints") return(json$endpoints) } + +#' @rdname list_serving_endpoints +#' @export +servingEndpointsList <- list_serving_endpoints #' Get the latest logs for a served model. #' #' Retrieves the service logs associated with the provided served model. @@ -120,13 +160,18 @@ servingEndpointsList <- function(client) { #' @param name Required. The name of the serving endpoint that the served model belongs to. #' @param served_model_name Required. The name of the served model that logs will be retrieved for. #' -#' @rdname servingEndpointsLogs +#' @rdname logs_serving_endpoint +#' @alias servingEndpointsLogs #' @export -servingEndpointsLogs <- function(client, name, served_model_name) { +logs_serving_endpoint <- function(client, name, served_model_name) { client$do("GET", paste("/api/2.0/serving-endpoints/", name, "/served-models/", served_model_name, "/logs", , sep = "")) } + +#' @rdname logs_serving_endpoint +#' @export +servingEndpointsLogs <- logs_serving_endpoint #' Update tags of a serving endpoint. #' #' Used to batch add and delete tags from a serving endpoint with a single API @@ -137,13 +182,18 @@ servingEndpointsLogs <- function(client, name, served_model_name) { #' @param delete_tags List of tag keys to delete. #' @param name Required. The name of the serving endpoint who's tags to patch. #' -#' @rdname servingEndpointsPatch +#' @rdname patch_serving_endpoint +#' @alias servingEndpointsPatch #' @export -servingEndpointsPatch <- function(client, name, add_tags = NULL, delete_tags = NULL) { +patch_serving_endpoint <- function(client, name, add_tags = NULL, delete_tags = NULL) { body <- list(add_tags = add_tags, delete_tags = delete_tags) client$do("PATCH", paste("/api/2.0/serving-endpoints/", name, "/tags", , sep = ""), body = body) } + +#' @rdname patch_serving_endpoint +#' @export +servingEndpointsPatch <- patch_serving_endpoint #' Update rate limits of a serving endpoint. #' #' Used to update the rate limits of a serving endpoint. NOTE: only external and @@ -153,13 +203,18 @@ servingEndpointsPatch <- function(client, name, add_tags = NULL, delete_tags = N #' @param name Required. The name of the serving endpoint whose rate limits are being updated. #' @param rate_limits The list of endpoint rate limits. #' -#' @rdname servingEndpointsPut +#' @rdname put_serving_endpoint +#' @alias servingEndpointsPut #' @export -servingEndpointsPut <- function(client, name, rate_limits = NULL) { +put_serving_endpoint <- function(client, name, rate_limits = NULL) { body <- list(rate_limits = rate_limits) client$do("PUT", paste("/api/2.0/serving-endpoints/", name, "/rate-limits", , sep = ""), body = body) } + +#' @rdname put_serving_endpoint +#' @export +servingEndpointsPut <- put_serving_endpoint #' Query a serving endpoint. #' @param client Required. Instance of DatabricksClient() #' @@ -178,9 +233,10 @@ servingEndpointsPut <- function(client, name, rate_limits = NULL) { #' @param stream The stream field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. #' @param temperature The temperature field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints. #' -#' @rdname servingEndpointsQuery +#' @rdname query_serving_endpoint +#' @alias servingEndpointsQuery #' @export -servingEndpointsQuery <- function(client, name, dataframe_records = NULL, dataframe_split = NULL, +query_serving_endpoint <- function(client, name, dataframe_records = NULL, dataframe_split = NULL, extra_params = NULL, input = NULL, inputs = NULL, instances = NULL, max_tokens = NULL, messages = NULL, n = NULL, prompt = NULL, stop = NULL, stream = NULL, temperature = NULL) { body <- list(dataframe_records = dataframe_records, dataframe_split = dataframe_split, @@ -190,6 +246,10 @@ servingEndpointsQuery <- function(client, name, dataframe_records = NULL, datafr client$do("POST", paste("/serving-endpoints/", name, "/invocations", , sep = ""), body = body) } + +#' @rdname query_serving_endpoint +#' @export +servingEndpointsQuery <- query_serving_endpoint #' Set serving endpoint permissions. #' #' Sets permissions on a serving endpoint. Serving endpoints can inherit @@ -199,13 +259,18 @@ servingEndpointsQuery <- function(client, name, dataframe_records = NULL, datafr #' @param access_control_list This field has no description yet. #' @param serving_endpoint_id Required. The serving endpoint for which to get or manage permissions. #' -#' @rdname servingEndpointsSetPermissions +#' @rdname set_serving_endpoint_permissions +#' @alias servingEndpointsSetPermissions #' @export -servingEndpointsSetPermissions <- function(client, serving_endpoint_id, access_control_list = NULL) { +set_serving_endpoint_permissions <- function(client, serving_endpoint_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", paste("/api/2.0/permissions/serving-endpoints/", serving_endpoint_id, sep = ""), body = body) } + +#' @rdname set_serving_endpoint_permissions +#' @export +servingEndpointsSetPermissions <- set_serving_endpoint_permissions #' Update config of a serving endpoint. #' #' Updates any combination of the serving endpoint's served entities, the @@ -220,15 +285,20 @@ servingEndpointsSetPermissions <- function(client, serving_endpoint_id, access_c #' @param served_models (Deprecated, use served_entities instead) A list of served models for the endpoint to serve. #' @param traffic_config The traffic config defining how invocations to the serving endpoint should be routed. #' -#' @rdname servingEndpointsUpdateConfig +#' @rdname update_serving_endpoint_config +#' @alias servingEndpointsUpdateConfig #' @export -servingEndpointsUpdateConfig <- function(client, name, auto_capture_config = NULL, +update_serving_endpoint_config <- function(client, name, auto_capture_config = NULL, served_entities = NULL, served_models = NULL, traffic_config = NULL) { body <- list(auto_capture_config = auto_capture_config, served_entities = served_entities, served_models = served_models, traffic_config = traffic_config) client$do("PUT", paste("/api/2.0/serving-endpoints/", name, "/config", , sep = ""), body = body) } + +#' @rdname update_serving_endpoint_config +#' @export +servingEndpointsUpdateConfig <- update_serving_endpoint_config #' Update serving endpoint permissions. #' #' Updates the permissions on a serving endpoint. Serving endpoints can inherit @@ -238,14 +308,19 @@ servingEndpointsUpdateConfig <- function(client, name, auto_capture_config = NUL #' @param access_control_list This field has no description yet. #' @param serving_endpoint_id Required. The serving endpoint for which to get or manage permissions. #' -#' @rdname servingEndpointsUpdatePermissions +#' @rdname update_serving_endpoint_permissions +#' @alias servingEndpointsUpdatePermissions #' @export -servingEndpointsUpdatePermissions <- function(client, serving_endpoint_id, access_control_list = NULL) { +update_serving_endpoint_permissions <- function(client, serving_endpoint_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", paste("/api/2.0/permissions/serving-endpoints/", serving_endpoint_id, sep = ""), body = body) } +#' @rdname update_serving_endpoint_permissions +#' @export +servingEndpointsUpdatePermissions <- update_serving_endpoint_permissions + #' Create a new serving endpoint. #' @param client Required. Instance of DatabricksClient() @@ -263,9 +338,9 @@ servingEndpointsUpdatePermissions <- function(client, serving_endpoint_id, acces #' @param rate_limits Rate limits to be applied to the serving endpoint. #' @param tags Tags to be attached to the serving endpoint and automatically propagated to billing logs. #' -#' @rdname servingEndpointsCreateAndWait +#' @rdname create_serving_endpoint_and_wait #' @export -servingEndpointsCreateAndWait <- function(client, name, config, rate_limits = NULL, +create_serving_endpoint_and_wait <- function(client, name, config, rate_limits = NULL, tags = NULL, timeout = 20, callback = cli_reporter) { body <- list(config = config, name = name, rate_limits = rate_limits, tags = tags) op_response <- client$do("POST", "/api/2.0/serving-endpoints", body = body) @@ -340,9 +415,9 @@ servingEndpointsCreateAndWait <- function(client, name, config, rate_limits = NU #' @param served_models (Deprecated, use served_entities instead) A list of served models for the endpoint to serve. #' @param traffic_config The traffic config defining how invocations to the serving endpoint should be routed. #' -#' @rdname servingEndpointsUpdateConfigAndWait +#' @rdname update_serving_endpoint_config_and_wait #' @export -servingEndpointsUpdateConfigAndWait <- function(client, name, auto_capture_config = NULL, +update_serving_endpoint_config_and_wait <- function(client, name, auto_capture_config = NULL, served_entities = NULL, served_models = NULL, traffic_config = NULL, timeout = 20, callback = cli_reporter) { body <- list(auto_capture_config = auto_capture_config, served_entities = served_entities, diff --git a/R/shares.R b/R/shares.R index 422a47a5..9cc5c112 100755 --- a/R/shares.R +++ b/R/shares.R @@ -13,12 +13,17 @@ NULL #' @param comment User-provided free-form text description. #' @param name Required. Name of the share. #' -#' @rdname sharesCreate +#' @rdname create_share +#' @alias sharesCreate #' @export -sharesCreate <- function(client, name, comment = NULL) { +create_share <- function(client, name, comment = NULL) { body <- list(comment = comment, name = name) client$do("POST", "/api/2.1/unity-catalog/shares", body = body) } + +#' @rdname create_share +#' @export +sharesCreate <- create_share #' Delete a share. #' #' Deletes a data object share from the metastore. The caller must be an owner @@ -27,12 +32,17 @@ sharesCreate <- function(client, name, comment = NULL) { #' #' @param name Required. The name of the share. #' -#' @rdname sharesDelete +#' @rdname delete_share +#' @alias sharesDelete #' @export -sharesDelete <- function(client, name) { +delete_share <- function(client, name) { client$do("DELETE", paste("/api/2.1/unity-catalog/shares/", name, sep = "")) } + +#' @rdname delete_share +#' @export +sharesDelete <- delete_share #' Get a share. #' #' Gets a data object share from the metastore. The caller must be a metastore @@ -42,12 +52,17 @@ sharesDelete <- function(client, name) { #' @param include_shared_data Query for data to include in the share. #' @param name Required. The name of the share. #' -#' @rdname sharesGet +#' @rdname get_share +#' @alias sharesGet #' @export -sharesGet <- function(client, name, include_shared_data = NULL) { +get_share <- function(client, name, include_shared_data = NULL) { query <- list(include_shared_data = include_shared_data) client$do("GET", paste("/api/2.1/unity-catalog/shares/", name, sep = ""), query = query) } + +#' @rdname get_share +#' @export +sharesGet <- get_share #' List shares. #' #' Gets an array of data object shares from the metastore. The caller must be a @@ -57,14 +72,19 @@ sharesGet <- function(client, name, include_shared_data = NULL) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname sharesList +#' @rdname list_shares +#' @alias sharesList #' @export -sharesList <- function(client) { +list_shares <- function(client) { json <- client$do("GET", "/api/2.1/unity-catalog/shares") return(json$shares) } + +#' @rdname list_shares +#' @export +sharesList <- list_shares #' Get permissions. #' #' Gets the permissions for a data share from the metastore. The caller must be @@ -73,13 +93,18 @@ sharesList <- function(client) { #' #' @param name Required. The name of the share. #' -#' @rdname sharesSharePermissions +#' @rdname share_permissions +#' @alias sharesSharePermissions #' @export -sharesSharePermissions <- function(client, name) { +share_permissions <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/shares/", name, "/permissions", , sep = "")) } + +#' @rdname share_permissions +#' @export +sharesSharePermissions <- share_permissions #' Update a share. #' #' Updates the share with the changes and data objects in the request. The @@ -105,13 +130,18 @@ sharesSharePermissions <- function(client, name) { #' @param owner Username of current owner of share. #' @param updates Array of shared data object updates. #' -#' @rdname sharesUpdate +#' @rdname update_share +#' @alias sharesUpdate #' @export -sharesUpdate <- function(client, name, comment = NULL, new_name = NULL, owner = NULL, +update_share <- function(client, name, comment = NULL, new_name = NULL, owner = NULL, updates = NULL) { body <- list(comment = comment, new_name = new_name, owner = owner, updates = updates) client$do("PATCH", paste("/api/2.1/unity-catalog/shares/", name, sep = ""), body = body) } + +#' @rdname update_share +#' @export +sharesUpdate <- update_share #' Update permissions. #' #' Updates the permissions for a data share in the metastore. The caller must be @@ -124,14 +154,19 @@ sharesUpdate <- function(client, name, comment = NULL, new_name = NULL, owner = #' @param changes Array of permission changes. #' @param name Required. The name of the share. #' -#' @rdname sharesUpdatePermissions +#' @rdname update_share_permissions +#' @alias sharesUpdatePermissions #' @export -sharesUpdatePermissions <- function(client, name, changes = NULL) { +update_share_permissions <- function(client, name, changes = NULL) { body <- list(changes = changes) client$do("PATCH", paste("/api/2.1/unity-catalog/shares/", name, "/permissions", , sep = ""), body = body) } +#' @rdname update_share_permissions +#' @export +sharesUpdatePermissions <- update_share_permissions + diff --git a/R/statement_execution.R b/R/statement_execution.R index 40b8177d..c1992878 100755 --- a/R/statement_execution.R +++ b/R/statement_execution.R @@ -11,13 +11,18 @@ NULL #' #' @param statement_id Required. The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls. #' -#' @rdname statementExecutionCancelExecution +#' @rdname cancel_statement_execution +#' @alias statementExecutionCancelExecution #' @export -statementExecutionCancelExecution <- function(client, statement_id) { +cancel_statement_execution <- function(client, statement_id) { client$do("POST", paste("/api/2.0/sql/statements/", statement_id, "/cancel", , sep = "")) } + +#' @rdname cancel_statement_execution +#' @export +statementExecutionCancelExecution <- cancel_statement_execution #' Execute a SQL statement. #' @param client Required. Instance of DatabricksClient() #' @@ -33,9 +38,10 @@ statementExecutionCancelExecution <- function(client, statement_id) { #' @param wait_timeout The time in seconds the call will wait for the statement's result set as `Ns`, where `N` can be set to 0 or to a value between 5 and 50. #' @param warehouse_id Required. Warehouse upon which to execute a statement. #' -#' @rdname statementExecutionExecuteStatement +#' @rdname execute_statement +#' @alias statementExecutionExecuteStatement #' @export -statementExecutionExecuteStatement <- function(client, statement, warehouse_id, byte_limit = NULL, +execute_statement <- function(client, statement, warehouse_id, byte_limit = NULL, catalog = NULL, disposition = NULL, format = NULL, on_wait_timeout = NULL, parameters = NULL, row_limit = NULL, schema = NULL, wait_timeout = NULL) { body <- list(byte_limit = byte_limit, catalog = catalog, disposition = disposition, @@ -44,6 +50,10 @@ statementExecutionExecuteStatement <- function(client, statement, warehouse_id, warehouse_id = warehouse_id) client$do("POST", "/api/2.0/sql/statements/", body = body) } + +#' @rdname execute_statement +#' @export +statementExecutionExecuteStatement <- execute_statement #' Get status, manifest, and result first chunk. #' #' This request can be used to poll for the statement's status. When the @@ -59,12 +69,17 @@ statementExecutionExecuteStatement <- function(client, statement, warehouse_id, #' #' @param statement_id Required. The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls. #' -#' @rdname statementExecutionGetStatement +#' @rdname get_statement_execution +#' @alias statementExecutionGetStatement #' @export -statementExecutionGetStatement <- function(client, statement_id) { +get_statement_execution <- function(client, statement_id) { client$do("GET", paste("/api/2.0/sql/statements/", statement_id, sep = "")) } + +#' @rdname get_statement_execution +#' @export +statementExecutionGetStatement <- get_statement_execution #' Get result chunk by index. #' #' After the statement execution has `SUCCEEDED`, this request can be used to @@ -80,14 +95,19 @@ statementExecutionGetStatement <- function(client, statement_id) { #' @param chunk_index Required. This field has no description yet. #' @param statement_id Required. The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls. #' -#' @rdname statementExecutionGetStatementResultChunkN +#' @rdname get_statement_execution_result_chunk_n +#' @alias statementExecutionGetStatementResultChunkN #' @export -statementExecutionGetStatementResultChunkN <- function(client, statement_id, chunk_index) { +get_statement_execution_result_chunk_n <- function(client, statement_id, chunk_index) { client$do("GET", paste("/api/2.0/sql/statements/", statement_id, "/result/chunks/", chunk_index, sep = "")) } +#' @rdname get_statement_execution_result_chunk_n +#' @export +statementExecutionGetStatementResultChunkN <- get_statement_execution_result_chunk_n + diff --git a/R/storage_credentials.R b/R/storage_credentials.R index 59da4547..6c268633 100755 --- a/R/storage_credentials.R +++ b/R/storage_credentials.R @@ -18,9 +18,10 @@ NULL #' @param read_only Whether the storage credential is only usable for read operations. #' @param skip_validation Supplying true to this argument skips validation of the created credential. #' -#' @rdname storageCredentialsCreate +#' @rdname create_storage_credential +#' @alias storageCredentialsCreate #' @export -storageCredentialsCreate <- function(client, name, aws_iam_role = NULL, azure_managed_identity = NULL, +create_storage_credential <- function(client, name, aws_iam_role = NULL, azure_managed_identity = NULL, azure_service_principal = NULL, cloudflare_api_token = NULL, comment = NULL, databricks_gcp_service_account = NULL, read_only = NULL, skip_validation = NULL) { body <- list(aws_iam_role = aws_iam_role, azure_managed_identity = azure_managed_identity, @@ -29,6 +30,10 @@ storageCredentialsCreate <- function(client, name, aws_iam_role = NULL, azure_ma name = name, read_only = read_only, skip_validation = skip_validation) client$do("POST", "/api/2.1/unity-catalog/storage-credentials", body = body) } + +#' @rdname create_storage_credential +#' @export +storageCredentialsCreate <- create_storage_credential #' Delete a credential. #' #' Deletes a storage credential from the metastore. The caller must be an owner @@ -38,13 +43,18 @@ storageCredentialsCreate <- function(client, name, aws_iam_role = NULL, azure_ma #' @param force Force deletion even if there are dependent external locations or external tables. #' @param name Required. Name of the storage credential. #' -#' @rdname storageCredentialsDelete +#' @rdname delete_storage_credential +#' @alias storageCredentialsDelete #' @export -storageCredentialsDelete <- function(client, name, force = NULL) { +delete_storage_credential <- function(client, name, force = NULL) { query <- list(force = force) client$do("DELETE", paste("/api/2.1/unity-catalog/storage-credentials/", name, sep = ""), query = query) } + +#' @rdname delete_storage_credential +#' @export +storageCredentialsDelete <- delete_storage_credential #' Get a credential. #' #' Gets a storage credential from the metastore. The caller must be a metastore @@ -54,12 +64,17 @@ storageCredentialsDelete <- function(client, name, force = NULL) { #' #' @param name Required. Name of the storage credential. #' -#' @rdname storageCredentialsGet +#' @rdname get_storage_credential +#' @alias storageCredentialsGet #' @export -storageCredentialsGet <- function(client, name) { +get_storage_credential <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/storage-credentials/", name, sep = "")) } + +#' @rdname get_storage_credential +#' @export +storageCredentialsGet <- get_storage_credential #' List credentials. #' #' Gets an array of storage credentials (as __StorageCredentialInfo__ objects). @@ -74,9 +89,10 @@ storageCredentialsGet <- function(client, name) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname storageCredentialsList +#' @rdname list_storage_credentials +#' @alias storageCredentialsList #' @export -storageCredentialsList <- function(client, max_results = NULL, page_token = NULL) { +list_storage_credentials <- function(client, max_results = NULL, page_token = NULL) { query <- list(max_results = max_results, page_token = page_token) results <- data.frame() @@ -95,6 +111,10 @@ storageCredentialsList <- function(client, max_results = NULL, page_token = NULL return(results) } + +#' @rdname list_storage_credentials +#' @export +storageCredentialsList <- list_storage_credentials #' Update a credential. #' #' Updates a storage credential on the metastore. @@ -113,9 +133,10 @@ storageCredentialsList <- function(client, max_results = NULL, page_token = NULL #' @param read_only Whether the storage credential is only usable for read operations. #' @param skip_validation Supplying true to this argument skips validation of the updated credential. #' -#' @rdname storageCredentialsUpdate +#' @rdname update_storage_credential +#' @alias storageCredentialsUpdate #' @export -storageCredentialsUpdate <- function(client, name, aws_iam_role = NULL, azure_managed_identity = NULL, +update_storage_credential <- function(client, name, aws_iam_role = NULL, azure_managed_identity = NULL, azure_service_principal = NULL, cloudflare_api_token = NULL, comment = NULL, databricks_gcp_service_account = NULL, force = NULL, new_name = NULL, owner = NULL, read_only = NULL, skip_validation = NULL) { @@ -127,6 +148,10 @@ storageCredentialsUpdate <- function(client, name, aws_iam_role = NULL, azure_ma client$do("PATCH", paste("/api/2.1/unity-catalog/storage-credentials/", name, sep = ""), body = body) } + +#' @rdname update_storage_credential +#' @export +storageCredentialsUpdate <- update_storage_credential #' Validate a storage credential. #' #' Validates a storage credential. At least one of __external_location_name__ @@ -153,9 +178,10 @@ storageCredentialsUpdate <- function(client, name, aws_iam_role = NULL, azure_ma #' @param storage_credential_name The name of the storage credential to validate. #' @param url The external location url to validate. #' -#' @rdname storageCredentialsValidate +#' @rdname validate_storage_credential +#' @alias storageCredentialsValidate #' @export -storageCredentialsValidate <- function(client, aws_iam_role = NULL, azure_managed_identity = NULL, +validate_storage_credential <- function(client, aws_iam_role = NULL, azure_managed_identity = NULL, azure_service_principal = NULL, cloudflare_api_token = NULL, databricks_gcp_service_account = NULL, external_location_name = NULL, read_only = NULL, storage_credential_name = NULL, url = NULL) { @@ -167,6 +193,10 @@ storageCredentialsValidate <- function(client, aws_iam_role = NULL, azure_manage client$do("POST", "/api/2.1/unity-catalog/validate-storage-credentials", body = body) } +#' @rdname validate_storage_credential +#' @export +storageCredentialsValidate <- validate_storage_credential + diff --git a/R/system_schemas.R b/R/system_schemas.R index fdc69ca3..bc8cb6fd 100755 --- a/R/system_schemas.R +++ b/R/system_schemas.R @@ -12,13 +12,18 @@ NULL #' @param metastore_id Required. The metastore ID under which the system schema lives. #' @param schema_name Required. Full name of the system schema. #' -#' @rdname systemSchemasDisable +#' @rdname disable_system_schema +#' @alias systemSchemasDisable #' @export -systemSchemasDisable <- function(client, metastore_id, schema_name) { +disable_system_schema <- function(client, metastore_id, schema_name) { client$do("DELETE", paste("/api/2.1/unity-catalog/metastores/", metastore_id, "/systemschemas/", schema_name, sep = "")) } + +#' @rdname disable_system_schema +#' @export +systemSchemasDisable <- disable_system_schema #' Enable a system schema. #' #' Enables the system schema and adds it to the system catalog. The caller must @@ -28,13 +33,18 @@ systemSchemasDisable <- function(client, metastore_id, schema_name) { #' @param metastore_id Required. The metastore ID under which the system schema lives. #' @param schema_name Required. Full name of the system schema. #' -#' @rdname systemSchemasEnable +#' @rdname enable_system_schema +#' @alias systemSchemasEnable #' @export -systemSchemasEnable <- function(client, metastore_id, schema_name) { +enable_system_schema <- function(client, metastore_id, schema_name) { client$do("PUT", paste("/api/2.1/unity-catalog/metastores/", metastore_id, "/systemschemas/", schema_name, sep = "")) } + +#' @rdname enable_system_schema +#' @export +systemSchemasEnable <- enable_system_schema #' List system schemas. #' #' Gets an array of system schemas for a metastore. The caller must be an @@ -45,9 +55,10 @@ systemSchemasEnable <- function(client, metastore_id, schema_name) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname systemSchemasList +#' @rdname list_system_schemas +#' @alias systemSchemasList #' @export -systemSchemasList <- function(client, metastore_id) { +list_system_schemas <- function(client, metastore_id) { json <- client$do("GET", paste("/api/2.1/unity-catalog/metastores/", metastore_id, @@ -56,5 +67,9 @@ systemSchemasList <- function(client, metastore_id) { } +#' @rdname list_system_schemas +#' @export +systemSchemasList <- list_system_schemas + diff --git a/R/table_constraints.R b/R/table_constraints.R index 782053f3..0174e741 100755 --- a/R/table_constraints.R +++ b/R/table_constraints.R @@ -20,12 +20,17 @@ NULL #' @param constraint Required. A table constraint, as defined by *one* of the following fields being set: __primary_key_constraint__, __foreign_key_constraint__, __named_table_constraint__. #' @param full_name_arg Required. The full name of the table referenced by the constraint. #' -#' @rdname tableConstraintsCreate +#' @rdname create_table_constraint +#' @alias tableConstraintsCreate #' @export -tableConstraintsCreate <- function(client, full_name_arg, constraint) { +create_table_constraint <- function(client, full_name_arg, constraint) { body <- list(constraint = constraint, full_name_arg = full_name_arg) client$do("POST", "/api/2.1/unity-catalog/constraints", body = body) } + +#' @rdname create_table_constraint +#' @export +tableConstraintsCreate <- create_table_constraint #' Delete a table constraint. #' #' Deletes a table constraint. @@ -43,12 +48,17 @@ tableConstraintsCreate <- function(client, full_name_arg, constraint) { #' @param constraint_name Required. The name of the constraint to delete. #' @param full_name Required. Full name of the table referenced by the constraint. #' -#' @rdname tableConstraintsDelete +#' @rdname delete_table_constraint +#' @alias tableConstraintsDelete #' @export -tableConstraintsDelete <- function(client, full_name, constraint_name, cascade) { +delete_table_constraint <- function(client, full_name, constraint_name, cascade) { query <- list(cascade = cascade, constraint_name = constraint_name) client$do("DELETE", paste("/api/2.1/unity-catalog/constraints/", full_name, sep = ""), query = query) } +#' @rdname delete_table_constraint +#' @export +tableConstraintsDelete <- delete_table_constraint + diff --git a/R/tables.R b/R/tables.R index c4b7de68..f7d94d83 100755 --- a/R/tables.R +++ b/R/tables.R @@ -14,12 +14,17 @@ NULL #' #' @param full_name Required. Full name of the table. #' -#' @rdname tablesDelete +#' @rdname delete_table +#' @alias tablesDelete #' @export -tablesDelete <- function(client, full_name) { +delete_table <- function(client, full_name) { client$do("DELETE", paste("/api/2.1/unity-catalog/tables/", full_name, sep = "")) } + +#' @rdname delete_table +#' @export +tablesDelete <- delete_table #' Get boolean reflecting if table exists. #' #' Gets if a table exists in the metastore for a specific catalog and schema. @@ -34,13 +39,18 @@ tablesDelete <- function(client, full_name) { #' #' @param full_name Required. Full name of the table. #' -#' @rdname tablesExists +#' @rdname exists_table +#' @alias tablesExists #' @export -tablesExists <- function(client, full_name) { +exists_table <- function(client, full_name) { client$do("GET", paste("/api/2.1/unity-catalog/tables/", full_name, "/exists", , sep = "")) } + +#' @rdname exists_table +#' @export +tablesExists <- exists_table #' Get a table. #' #' Gets a table from the metastore for a specific catalog and schema. The caller @@ -56,13 +66,18 @@ tablesExists <- function(client, full_name) { #' @param include_browse Whether to include tables in the response for which the principal can only access selective metadata for. #' @param include_delta_metadata Whether delta metadata should be included in the response. #' -#' @rdname tablesGet +#' @rdname get_table +#' @alias tablesGet #' @export -tablesGet <- function(client, full_name, include_browse = NULL, include_delta_metadata = NULL) { +get_table <- function(client, full_name, include_browse = NULL, include_delta_metadata = NULL) { query <- list(include_browse = include_browse, include_delta_metadata = include_delta_metadata) client$do("GET", paste("/api/2.1/unity-catalog/tables/", full_name, sep = ""), query = query) } + +#' @rdname get_table +#' @export +tablesGet <- get_table #' List tables. #' #' Gets an array of all tables for the current metastore under the parent @@ -84,9 +99,10 @@ tablesGet <- function(client, full_name, include_browse = NULL, include_delta_me #' #' @return `data.frame` with all of the response pages. #' -#' @rdname tablesList +#' @rdname list_tables +#' @alias tablesList #' @export -tablesList <- function(client, catalog_name, schema_name, include_browse = NULL, +list_tables <- function(client, catalog_name, schema_name, include_browse = NULL, include_delta_metadata = NULL, max_results = NULL, omit_columns = NULL, omit_properties = NULL, page_token = NULL) { query <- list(catalog_name = catalog_name, include_browse = include_browse, include_delta_metadata = include_delta_metadata, @@ -109,6 +125,10 @@ tablesList <- function(client, catalog_name, schema_name, include_browse = NULL, return(results) } + +#' @rdname list_tables +#' @export +tablesList <- list_tables #' List table summaries. #' #' Gets an array of summaries for tables for a schema and catalog within the @@ -132,9 +152,10 @@ tablesList <- function(client, catalog_name, schema_name, include_browse = NULL, #' #' @return `data.frame` with all of the response pages. #' -#' @rdname tablesListSummaries +#' @rdname list_table_summaries +#' @alias tablesListSummaries #' @export -tablesListSummaries <- function(client, catalog_name, max_results = NULL, page_token = NULL, +list_table_summaries <- function(client, catalog_name, max_results = NULL, page_token = NULL, schema_name_pattern = NULL, table_name_pattern = NULL) { query <- list(catalog_name = catalog_name, max_results = max_results, page_token = page_token, schema_name_pattern = schema_name_pattern, table_name_pattern = table_name_pattern) @@ -155,6 +176,10 @@ tablesListSummaries <- function(client, catalog_name, max_results = NULL, page_t return(results) } + +#' @rdname list_table_summaries +#' @export +tablesListSummaries <- list_table_summaries #' Update a table owner. #' #' Change the owner of the table. The caller must be the owner of the parent @@ -167,14 +192,19 @@ tablesListSummaries <- function(client, catalog_name, max_results = NULL, page_t #' @param full_name Required. Full name of the table. #' @param owner This field has no description yet. #' -#' @rdname tablesUpdate +#' @rdname update_table +#' @alias tablesUpdate #' @export -tablesUpdate <- function(client, full_name, owner = NULL) { +update_table <- function(client, full_name, owner = NULL) { body <- list(owner = owner) client$do("PATCH", paste("/api/2.1/unity-catalog/tables/", full_name, sep = ""), body = body) } +#' @rdname update_table +#' @export +tablesUpdate <- update_table + diff --git a/R/token_management.R b/R/token_management.R index 11aeba10..fe2625f4 100755 --- a/R/token_management.R +++ b/R/token_management.R @@ -12,13 +12,17 @@ NULL #' @param comment Comment that describes the purpose of the token. #' @param lifetime_seconds The number of seconds before the token expires. #' -#' @rdname tokenManagementCreateOboToken +#' @rdname create_obo_token +#' @alias tokenManagementCreateOboToken #' @export -tokenManagementCreateOboToken <- function(client, application_id, comment = NULL, - lifetime_seconds = NULL) { +create_obo_token <- function(client, application_id, comment = NULL, lifetime_seconds = NULL) { body <- list(application_id = application_id, comment = comment, lifetime_seconds = lifetime_seconds) client$do("POST", "/api/2.0/token-management/on-behalf-of/tokens", body = body) } + +#' @rdname create_obo_token +#' @export +tokenManagementCreateOboToken <- create_obo_token #' Delete a token. #' #' Deletes a token, specified by its ID. @@ -26,12 +30,17 @@ tokenManagementCreateOboToken <- function(client, application_id, comment = NULL #' #' @param token_id Required. The ID of the token to get. #' -#' @rdname tokenManagementDelete +#' @rdname delete_token_management +#' @alias tokenManagementDelete #' @export -tokenManagementDelete <- function(client, token_id) { +delete_token_management <- function(client, token_id) { client$do("DELETE", paste("/api/2.0/token-management/tokens/", token_id, sep = "")) } + +#' @rdname delete_token_management +#' @export +tokenManagementDelete <- delete_token_management #' Get token info. #' #' Gets information about a token, specified by its ID. @@ -39,33 +48,48 @@ tokenManagementDelete <- function(client, token_id) { #' #' @param token_id Required. The ID of the token to get. #' -#' @rdname tokenManagementGet +#' @rdname get_token_management +#' @alias tokenManagementGet #' @export -tokenManagementGet <- function(client, token_id) { +get_token_management <- function(client, token_id) { client$do("GET", paste("/api/2.0/token-management/tokens/", token_id, sep = "")) } + +#' @rdname get_token_management +#' @export +tokenManagementGet <- get_token_management #' Get token permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname tokenManagementGetPermissionLevels +#' @rdname get_token_management_permission_levels +#' @alias tokenManagementGetPermissionLevels #' @export -tokenManagementGetPermissionLevels <- function(client) { +get_token_management_permission_levels <- function(client) { client$do("GET", "/api/2.0/permissions/authorization/tokens/permissionLevels") } + +#' @rdname get_token_management_permission_levels +#' @export +tokenManagementGetPermissionLevels <- get_token_management_permission_levels #' Get token permissions. #' #' Gets the permissions of all tokens. Tokens can inherit permissions from their #' root object. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname tokenManagementGetPermissions +#' @rdname get_token_management_permissions +#' @alias tokenManagementGetPermissions #' @export -tokenManagementGetPermissions <- function(client) { +get_token_management_permissions <- function(client) { client$do("GET", "/api/2.0/permissions/authorization/tokens") } + +#' @rdname get_token_management_permissions +#' @export +tokenManagementGetPermissions <- get_token_management_permissions #' List all tokens. #' #' Lists all tokens associated with the specified workspace or user. @@ -76,15 +100,20 @@ tokenManagementGetPermissions <- function(client) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname tokenManagementList +#' @rdname list_token_management +#' @alias tokenManagementList #' @export -tokenManagementList <- function(client, created_by_id = NULL, created_by_username = NULL) { +list_token_management <- function(client, created_by_id = NULL, created_by_username = NULL) { query <- list(created_by_id = created_by_id, created_by_username = created_by_username) json <- client$do("GET", "/api/2.0/token-management/tokens", query = query) return(json$token_infos) } + +#' @rdname list_token_management +#' @export +tokenManagementList <- list_token_management #' Set token permissions. #' #' Sets permissions on all tokens. Tokens can inherit permissions from their @@ -93,12 +122,17 @@ tokenManagementList <- function(client, created_by_id = NULL, created_by_usernam #' #' @param access_control_list This field has no description yet. #' -#' @rdname tokenManagementSetPermissions +#' @rdname set_token_management_permissions +#' @alias tokenManagementSetPermissions #' @export -tokenManagementSetPermissions <- function(client, access_control_list = NULL) { +set_token_management_permissions <- function(client, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", "/api/2.0/permissions/authorization/tokens", body = body) } + +#' @rdname set_token_management_permissions +#' @export +tokenManagementSetPermissions <- set_token_management_permissions #' Update token permissions. #' #' Updates the permissions on all tokens. Tokens can inherit permissions from @@ -107,13 +141,18 @@ tokenManagementSetPermissions <- function(client, access_control_list = NULL) { #' #' @param access_control_list This field has no description yet. #' -#' @rdname tokenManagementUpdatePermissions +#' @rdname update_token_management_permissions +#' @alias tokenManagementUpdatePermissions #' @export -tokenManagementUpdatePermissions <- function(client, access_control_list = NULL) { +update_token_management_permissions <- function(client, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", "/api/2.0/permissions/authorization/tokens", body = body) } +#' @rdname update_token_management_permissions +#' @export +tokenManagementUpdatePermissions <- update_token_management_permissions + diff --git a/R/tokens.R b/R/tokens.R index 1a0eba02..5ffd6350 100755 --- a/R/tokens.R +++ b/R/tokens.R @@ -14,12 +14,17 @@ NULL #' @param comment Optional description to attach to the token. #' @param lifetime_seconds The lifetime of the token, in seconds. #' -#' @rdname tokensCreate +#' @rdname create_token +#' @alias tokensCreate #' @export -tokensCreate <- function(client, comment = NULL, lifetime_seconds = NULL) { +create_token <- function(client, comment = NULL, lifetime_seconds = NULL) { body <- list(comment = comment, lifetime_seconds = lifetime_seconds) client$do("POST", "/api/2.0/token/create", body = body) } + +#' @rdname create_token +#' @export +tokensCreate <- create_token #' Revoke token. #' #' Revokes an access token. @@ -30,12 +35,17 @@ tokensCreate <- function(client, comment = NULL, lifetime_seconds = NULL) { #' #' @param token_id Required. The ID of the token to be revoked. #' -#' @rdname tokensDelete +#' @rdname delete_token +#' @alias tokensDelete #' @export -tokensDelete <- function(client, token_id) { +delete_token <- function(client, token_id) { body <- list(token_id = token_id) client$do("POST", "/api/2.0/token/delete", body = body) } + +#' @rdname delete_token +#' @export +tokensDelete <- delete_token #' List tokens. #' #' Lists all the valid tokens for a user-workspace pair. @@ -43,14 +53,19 @@ tokensDelete <- function(client, token_id) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname tokensList +#' @rdname list_tokens +#' @alias tokensList #' @export -tokensList <- function(client) { +list_tokens <- function(client) { json <- client$do("GET", "/api/2.0/token/list") return(json$token_infos) } +#' @rdname list_tokens +#' @export +tokensList <- list_tokens + diff --git a/R/users.R b/R/users.R index d6d90c0b..41bb5908 100755 --- a/R/users.R +++ b/R/users.R @@ -21,9 +21,10 @@ NULL #' @param schemas The schema of the user. #' @param user_name Email address of the Databricks user. #' -#' @rdname usersCreate +#' @rdname create_user +#' @alias usersCreate #' @export -usersCreate <- function(client, active = NULL, display_name = NULL, emails = NULL, +create_user <- function(client, active = NULL, display_name = NULL, emails = NULL, entitlements = NULL, external_id = NULL, groups = NULL, id = NULL, name = NULL, roles = NULL, schemas = NULL, user_name = NULL) { body <- list(active = active, displayName = display_name, emails = emails, entitlements = entitlements, @@ -31,6 +32,10 @@ usersCreate <- function(client, active = NULL, display_name = NULL, emails = NUL schemas = schemas, userName = user_name) client$do("POST", "/api/2.0/preview/scim/v2/Users", body = body) } + +#' @rdname create_user +#' @export +usersCreate <- create_user #' Delete a user. #' #' Deletes a user. Deleting a user from a Databricks workspace also removes @@ -39,12 +44,17 @@ usersCreate <- function(client, active = NULL, display_name = NULL, emails = NUL #' #' @param id Required. Unique ID for a user in the Databricks workspace. #' -#' @rdname usersDelete +#' @rdname delete_user +#' @alias usersDelete #' @export -usersDelete <- function(client, id) { +delete_user <- function(client, id) { client$do("DELETE", paste("/api/2.0/preview/scim/v2/Users/", id, sep = "")) } + +#' @rdname delete_user +#' @export +usersDelete <- delete_user #' Get user details. #' #' Gets information for a specific user in Databricks workspace. @@ -59,35 +69,50 @@ usersDelete <- function(client, id) { #' @param sort_order The order to sort the results. #' @param start_index Specifies the index of the first result. #' -#' @rdname usersGet +#' @rdname get_user +#' @alias usersGet #' @export -usersGet <- function(client, id, attributes = NULL, count = NULL, excluded_attributes = NULL, +get_user <- function(client, id, attributes = NULL, count = NULL, excluded_attributes = NULL, filter = NULL, sort_by = NULL, sort_order = NULL, start_index = NULL) { query <- list(attributes = attributes, count = count, excludedAttributes = excluded_attributes, filter = filter, sortBy = sort_by, sortOrder = sort_order, startIndex = start_index) client$do("GET", paste("/api/2.0/preview/scim/v2/Users/", id, sep = ""), query = query) } + +#' @rdname get_user +#' @export +usersGet <- get_user #' Get password permission levels. #' #' Gets the permission levels that a user can have on an object. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname usersGetPermissionLevels +#' @rdname get_user_permission_levels +#' @alias usersGetPermissionLevels #' @export -usersGetPermissionLevels <- function(client) { +get_user_permission_levels <- function(client) { client$do("GET", "/api/2.0/permissions/authorization/passwords/permissionLevels") } + +#' @rdname get_user_permission_levels +#' @export +usersGetPermissionLevels <- get_user_permission_levels #' Get password permissions. #' #' Gets the permissions of all passwords. Passwords can inherit permissions from #' their root object. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname usersGetPermissions +#' @rdname get_user_permissions +#' @alias usersGetPermissions #' @export -usersGetPermissions <- function(client) { +get_user_permissions <- function(client) { client$do("GET", "/api/2.0/permissions/authorization/passwords") } + +#' @rdname get_user_permissions +#' @export +usersGetPermissions <- get_user_permissions #' List users. #' #' Gets details for all the users associated with a Databricks workspace. @@ -103,9 +128,10 @@ usersGetPermissions <- function(client) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname usersList +#' @rdname list_users +#' @alias usersList #' @export -usersList <- function(client, attributes = NULL, count = NULL, excluded_attributes = NULL, +list_users <- function(client, attributes = NULL, count = NULL, excluded_attributes = NULL, filter = NULL, sort_by = NULL, sort_order = NULL, start_index = NULL) { query <- list(attributes = attributes, count = count, excludedAttributes = excluded_attributes, filter = filter, sortBy = sort_by, sortOrder = sort_order, startIndex = start_index) @@ -126,6 +152,10 @@ usersList <- function(client, attributes = NULL, count = NULL, excluded_attribut return(results) } + +#' @rdname list_users +#' @export +usersList <- list_users #' Update user details. #' #' Partially updates a user resource by applying the supplied operations on @@ -136,12 +166,17 @@ usersList <- function(client, attributes = NULL, count = NULL, excluded_attribut #' @param operations This field has no description yet. #' @param schemas The schema of the patch request. #' -#' @rdname usersPatch +#' @rdname patch_user +#' @alias usersPatch #' @export -usersPatch <- function(client, id, operations = NULL, schemas = NULL) { +patch_user <- function(client, id, operations = NULL, schemas = NULL) { body <- list(Operations = operations, schemas = schemas) client$do("PATCH", paste("/api/2.0/preview/scim/v2/Users/", id, sep = ""), body = body) } + +#' @rdname patch_user +#' @export +usersPatch <- patch_user #' Set password permissions. #' #' Sets permissions on all passwords. Passwords can inherit permissions from @@ -150,12 +185,17 @@ usersPatch <- function(client, id, operations = NULL, schemas = NULL) { #' #' @param access_control_list This field has no description yet. #' -#' @rdname usersSetPermissions +#' @rdname set_user_permissions +#' @alias usersSetPermissions #' @export -usersSetPermissions <- function(client, access_control_list = NULL) { +set_user_permissions <- function(client, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", "/api/2.0/permissions/authorization/passwords", body = body) } + +#' @rdname set_user_permissions +#' @export +usersSetPermissions <- set_user_permissions #' Replace a user. #' #' Replaces a user's information with the data supplied in request. @@ -173,9 +213,10 @@ usersSetPermissions <- function(client, access_control_list = NULL) { #' @param schemas The schema of the user. #' @param user_name Email address of the Databricks user. #' -#' @rdname usersUpdate +#' @rdname update_user +#' @alias usersUpdate #' @export -usersUpdate <- function(client, id, active = NULL, display_name = NULL, emails = NULL, +update_user <- function(client, id, active = NULL, display_name = NULL, emails = NULL, entitlements = NULL, external_id = NULL, groups = NULL, name = NULL, roles = NULL, schemas = NULL, user_name = NULL) { body <- list(active = active, displayName = display_name, emails = emails, entitlements = entitlements, @@ -183,6 +224,10 @@ usersUpdate <- function(client, id, active = NULL, display_name = NULL, emails = schemas = schemas, userName = user_name) client$do("PUT", paste("/api/2.0/preview/scim/v2/Users/", id, sep = ""), body = body) } + +#' @rdname update_user +#' @export +usersUpdate <- update_user #' Update password permissions. #' #' Updates the permissions on all passwords. Passwords can inherit permissions @@ -191,13 +236,18 @@ usersUpdate <- function(client, id, active = NULL, display_name = NULL, emails = #' #' @param access_control_list This field has no description yet. #' -#' @rdname usersUpdatePermissions +#' @rdname update_user_permissions +#' @alias usersUpdatePermissions #' @export -usersUpdatePermissions <- function(client, access_control_list = NULL) { +update_user_permissions <- function(client, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", "/api/2.0/permissions/authorization/passwords", body = body) } +#' @rdname update_user_permissions +#' @export +usersUpdatePermissions <- update_user_permissions + diff --git a/R/vector_search_endpoints.R b/R/vector_search_endpoints.R index 6d83d643..99f2c5b8 100755 --- a/R/vector_search_endpoints.R +++ b/R/vector_search_endpoints.R @@ -11,35 +11,50 @@ NULL #' @param endpoint_type Required. Type of endpoint. #' @param name Required. Name of endpoint. #' -#' @rdname vectorSearchEndpointsCreateEndpoint +#' @rdname create_vector_search_endpoint +#' @alias vectorSearchEndpointsCreateEndpoint #' @export -vectorSearchEndpointsCreateEndpoint <- function(client, name, endpoint_type) { +create_vector_search_endpoint <- function(client, name, endpoint_type) { body <- list(endpoint_type = endpoint_type, name = name) client$do("POST", "/api/2.0/vector-search/endpoints", body = body) } + +#' @rdname create_vector_search_endpoint +#' @export +vectorSearchEndpointsCreateEndpoint <- create_vector_search_endpoint #' Delete an endpoint. #' @param client Required. Instance of DatabricksClient() #' #' @param endpoint_name Required. Name of the endpoint. #' -#' @rdname vectorSearchEndpointsDeleteEndpoint +#' @rdname delete_vector_search_endpoint +#' @alias vectorSearchEndpointsDeleteEndpoint #' @export -vectorSearchEndpointsDeleteEndpoint <- function(client, endpoint_name) { +delete_vector_search_endpoint <- function(client, endpoint_name) { client$do("DELETE", paste("/api/2.0/vector-search/endpoints/", endpoint_name, sep = "")) } + +#' @rdname delete_vector_search_endpoint +#' @export +vectorSearchEndpointsDeleteEndpoint <- delete_vector_search_endpoint #' Get an endpoint. #' @param client Required. Instance of DatabricksClient() #' #' @param endpoint_name Required. Name of the endpoint. #' -#' @rdname vectorSearchEndpointsGetEndpoint +#' @rdname get_vector_search_endpoint +#' @alias vectorSearchEndpointsGetEndpoint #' @export -vectorSearchEndpointsGetEndpoint <- function(client, endpoint_name) { +get_vector_search_endpoint <- function(client, endpoint_name) { client$do("GET", paste("/api/2.0/vector-search/endpoints/", endpoint_name, sep = "")) } + +#' @rdname get_vector_search_endpoint +#' @export +vectorSearchEndpointsGetEndpoint <- get_vector_search_endpoint #' List all endpoints. #' @param client Required. Instance of DatabricksClient() #' @@ -47,9 +62,10 @@ vectorSearchEndpointsGetEndpoint <- function(client, endpoint_name) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname vectorSearchEndpointsListEndpoints +#' @rdname list_vector_search_endpoint_endpoints +#' @alias vectorSearchEndpointsListEndpoints #' @export -vectorSearchEndpointsListEndpoints <- function(client, page_token = NULL) { +list_vector_search_endpoint_endpoints <- function(client, page_token = NULL) { query <- list(page_token = page_token) results <- data.frame() @@ -68,6 +84,10 @@ vectorSearchEndpointsListEndpoints <- function(client, page_token = NULL) { return(results) } + +#' @rdname list_vector_search_endpoint_endpoints +#' @export +vectorSearchEndpointsListEndpoints <- list_vector_search_endpoint_endpoints #' Create an endpoint. #' #' Create a new endpoint. @@ -85,10 +105,10 @@ vectorSearchEndpointsListEndpoints <- function(client, page_token = NULL) { #' @param endpoint_type Required. Type of endpoint. #' @param name Required. Name of endpoint. #' -#' @rdname vectorSearchEndpointsCreateEndpointAndWait +#' @rdname create_vector_search_endpoint_and_wait #' @export -vectorSearchEndpointsCreateEndpointAndWait <- function(client, name, endpoint_type, - timeout = 20, callback = cli_reporter) { +create_vector_search_endpoint_and_wait <- function(client, name, endpoint_type, timeout = 20, + callback = cli_reporter) { body <- list(endpoint_type = endpoint_type, name = name) op_response <- client$do("POST", "/api/2.0/vector-search/endpoints", body = body) started <- as.numeric(Sys.time()) diff --git a/R/vector_search_indexes.R b/R/vector_search_indexes.R index b89b9eb0..7f790f3f 100755 --- a/R/vector_search_indexes.R +++ b/R/vector_search_indexes.R @@ -15,14 +15,19 @@ NULL #' @param name Required. Name of the index. #' @param primary_key Required. Primary key of the index. #' -#' @rdname vectorSearchIndexesCreateIndex +#' @rdname create_vector_search_index +#' @alias vectorSearchIndexesCreateIndex #' @export -vectorSearchIndexesCreateIndex <- function(client, name, endpoint_name, primary_key, +create_vector_search_index <- function(client, name, endpoint_name, primary_key, index_type, delta_sync_index_spec = NULL, direct_access_index_spec = NULL) { body <- list(delta_sync_index_spec = delta_sync_index_spec, direct_access_index_spec = direct_access_index_spec, endpoint_name = endpoint_name, index_type = index_type, name = name, primary_key = primary_key) client$do("POST", "/api/2.0/vector-search/indexes", body = body) } + +#' @rdname create_vector_search_index +#' @export +vectorSearchIndexesCreateIndex <- create_vector_search_index #' Delete data from index. #' #' Handles the deletion of data from a specified vector index. @@ -31,13 +36,18 @@ vectorSearchIndexesCreateIndex <- function(client, name, endpoint_name, primary_ #' @param index_name Required. Name of the vector index where data is to be deleted. #' @param primary_keys Required. List of primary keys for the data to be deleted. #' -#' @rdname vectorSearchIndexesDeleteDataVectorIndex +#' @rdname delete_vector_search_index_data +#' @alias vectorSearchIndexesDeleteDataVectorIndex #' @export -vectorSearchIndexesDeleteDataVectorIndex <- function(client, index_name, primary_keys) { +delete_vector_search_index_data <- function(client, index_name, primary_keys) { body <- list(primary_keys = primary_keys) client$do("POST", paste("/api/2.0/vector-search/indexes/", index_name, "/delete-data", , sep = ""), body = body) } + +#' @rdname delete_vector_search_index_data +#' @export +vectorSearchIndexesDeleteDataVectorIndex <- delete_vector_search_index_data #' Delete an index. #' #' Delete an index. @@ -45,12 +55,17 @@ vectorSearchIndexesDeleteDataVectorIndex <- function(client, index_name, primary #' #' @param index_name Required. Name of the index. #' -#' @rdname vectorSearchIndexesDeleteIndex +#' @rdname delete_vector_search_index +#' @alias vectorSearchIndexesDeleteIndex #' @export -vectorSearchIndexesDeleteIndex <- function(client, index_name) { +delete_vector_search_index <- function(client, index_name) { client$do("DELETE", paste("/api/2.0/vector-search/indexes/", index_name, sep = "")) } + +#' @rdname delete_vector_search_index +#' @export +vectorSearchIndexesDeleteIndex <- delete_vector_search_index #' Get an index. #' #' Get an index. @@ -58,12 +73,17 @@ vectorSearchIndexesDeleteIndex <- function(client, index_name) { #' #' @param index_name Required. Name of the index. #' -#' @rdname vectorSearchIndexesGetIndex +#' @rdname get_vector_search_index +#' @alias vectorSearchIndexesGetIndex #' @export -vectorSearchIndexesGetIndex <- function(client, index_name) { +get_vector_search_index <- function(client, index_name) { client$do("GET", paste("/api/2.0/vector-search/indexes/", index_name, sep = "")) } + +#' @rdname get_vector_search_index +#' @export +vectorSearchIndexesGetIndex <- get_vector_search_index #' List indexes. #' #' List all indexes in the given endpoint. @@ -74,9 +94,10 @@ vectorSearchIndexesGetIndex <- function(client, index_name) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname vectorSearchIndexesListIndexes +#' @rdname list_vector_search_index_indexes +#' @alias vectorSearchIndexesListIndexes #' @export -vectorSearchIndexesListIndexes <- function(client, endpoint_name, page_token = NULL) { +list_vector_search_index_indexes <- function(client, endpoint_name, page_token = NULL) { query <- list(endpoint_name = endpoint_name, page_token = page_token) results <- data.frame() @@ -95,6 +116,10 @@ vectorSearchIndexesListIndexes <- function(client, endpoint_name, page_token = N return(results) } + +#' @rdname list_vector_search_index_indexes +#' @export +vectorSearchIndexesListIndexes <- list_vector_search_index_indexes #' Query an index. #' #' Query the specified vector index. @@ -108,15 +133,20 @@ vectorSearchIndexesListIndexes <- function(client, endpoint_name, page_token = N #' @param query_vector Query vector. #' @param score_threshold Threshold for the approximate nearest neighbor search. #' -#' @rdname vectorSearchIndexesQueryIndex +#' @rdname query_vector_search_index +#' @alias vectorSearchIndexesQueryIndex #' @export -vectorSearchIndexesQueryIndex <- function(client, index_name, columns, filters_json = NULL, +query_vector_search_index <- function(client, index_name, columns, filters_json = NULL, num_results = NULL, query_text = NULL, query_vector = NULL, score_threshold = NULL) { body <- list(columns = columns, filters_json = filters_json, num_results = num_results, query_text = query_text, query_vector = query_vector, score_threshold = score_threshold) client$do("POST", paste("/api/2.0/vector-search/indexes/", index_name, "/query", , sep = ""), body = body) } + +#' @rdname query_vector_search_index +#' @export +vectorSearchIndexesQueryIndex <- query_vector_search_index #' Synchronize an index. #' #' Triggers a synchronization process for a specified vector index. @@ -124,13 +154,18 @@ vectorSearchIndexesQueryIndex <- function(client, index_name, columns, filters_j #' #' @param index_name Required. Name of the vector index to synchronize. #' -#' @rdname vectorSearchIndexesSyncIndex +#' @rdname sync_vector_search_index +#' @alias vectorSearchIndexesSyncIndex #' @export -vectorSearchIndexesSyncIndex <- function(client, index_name) { +sync_vector_search_index <- function(client, index_name) { client$do("POST", paste("/api/2.0/vector-search/indexes/", index_name, "/sync", , sep = "")) } + +#' @rdname sync_vector_search_index +#' @export +vectorSearchIndexesSyncIndex <- sync_vector_search_index #' Upsert data into an index. #' #' Handles the upserting of data into a specified vector index. @@ -139,14 +174,19 @@ vectorSearchIndexesSyncIndex <- function(client, index_name) { #' @param index_name Required. Name of the vector index where data is to be upserted. #' @param inputs_json Required. JSON string representing the data to be upserted. #' -#' @rdname vectorSearchIndexesUpsertDataVectorIndex +#' @rdname upsert_vector_search_index_data +#' @alias vectorSearchIndexesUpsertDataVectorIndex #' @export -vectorSearchIndexesUpsertDataVectorIndex <- function(client, index_name, inputs_json) { +upsert_vector_search_index_data <- function(client, index_name, inputs_json) { body <- list(inputs_json = inputs_json) client$do("POST", paste("/api/2.0/vector-search/indexes/", index_name, "/upsert-data", , sep = ""), body = body) } +#' @rdname upsert_vector_search_index_data +#' @export +vectorSearchIndexesUpsertDataVectorIndex <- upsert_vector_search_index_data + diff --git a/R/volumes.R b/R/volumes.R index 2624f88b..e655b759 100755 --- a/R/volumes.R +++ b/R/volumes.R @@ -32,14 +32,19 @@ NULL #' @param storage_location The storage location on the cloud. #' @param volume_type Required. This field has no description yet. #' -#' @rdname volumesCreate +#' @rdname create_volume +#' @alias volumesCreate #' @export -volumesCreate <- function(client, catalog_name, schema_name, name, volume_type, comment = NULL, +create_volume <- function(client, catalog_name, schema_name, name, volume_type, comment = NULL, storage_location = NULL) { body <- list(catalog_name = catalog_name, comment = comment, name = name, schema_name = schema_name, storage_location = storage_location, volume_type = volume_type) client$do("POST", "/api/2.1/unity-catalog/volumes", body = body) } + +#' @rdname create_volume +#' @export +volumesCreate <- create_volume #' Delete a Volume. #' #' Deletes a volume from the specified parent catalog and schema. @@ -52,12 +57,17 @@ volumesCreate <- function(client, catalog_name, schema_name, name, volume_type, #' #' @param name Required. The three-level (fully qualified) name of the volume. #' -#' @rdname volumesDelete +#' @rdname delete_volume +#' @alias volumesDelete #' @export -volumesDelete <- function(client, name) { +delete_volume <- function(client, name) { client$do("DELETE", paste("/api/2.1/unity-catalog/volumes/", name, sep = "")) } + +#' @rdname delete_volume +#' @export +volumesDelete <- delete_volume #' List Volumes. #' #' Gets an array of volumes for the current metastore under the parent catalog @@ -81,9 +91,10 @@ volumesDelete <- function(client, name) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname volumesList +#' @rdname list_volumes +#' @alias volumesList #' @export -volumesList <- function(client, catalog_name, schema_name, include_browse = NULL, +list_volumes <- function(client, catalog_name, schema_name, include_browse = NULL, max_results = NULL, page_token = NULL) { query <- list(catalog_name = catalog_name, include_browse = include_browse, max_results = max_results, page_token = page_token, schema_name = schema_name) @@ -104,6 +115,10 @@ volumesList <- function(client, catalog_name, schema_name, include_browse = NULL return(results) } + +#' @rdname list_volumes +#' @export +volumesList <- list_volumes #' Get a Volume. #' #' Gets a volume from the metastore for a specific catalog and schema. @@ -117,12 +132,17 @@ volumesList <- function(client, catalog_name, schema_name, include_browse = NULL #' @param include_browse Whether to include volumes in the response for which the principal can only access selective metadata for. #' @param name Required. The three-level (fully qualified) name of the volume. #' -#' @rdname volumesRead +#' @rdname read_volume +#' @alias volumesRead #' @export -volumesRead <- function(client, name, include_browse = NULL) { +read_volume <- function(client, name, include_browse = NULL) { query <- list(include_browse = include_browse) client$do("GET", paste("/api/2.1/unity-catalog/volumes/", name, sep = ""), query = query) } + +#' @rdname read_volume +#' @export +volumesRead <- read_volume #' Update a Volume. #' #' Updates the specified volume under the specified parent catalog and schema. @@ -141,14 +161,19 @@ volumesRead <- function(client, name, include_browse = NULL) { #' @param new_name New name for the volume. #' @param owner The identifier of the user who owns the volume. #' -#' @rdname volumesUpdate +#' @rdname update_volume +#' @alias volumesUpdate #' @export -volumesUpdate <- function(client, name, comment = NULL, new_name = NULL, owner = NULL) { +update_volume <- function(client, name, comment = NULL, new_name = NULL, owner = NULL) { body <- list(comment = comment, new_name = new_name, owner = owner) client$do("PATCH", paste("/api/2.1/unity-catalog/volumes/", name, sep = ""), body = body) } +#' @rdname update_volume +#' @export +volumesUpdate <- update_volume + diff --git a/R/warehouses.R b/R/warehouses.R index 8f6fee7e..f2a94402 100755 --- a/R/warehouses.R +++ b/R/warehouses.R @@ -22,9 +22,10 @@ NULL #' @param tags A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. #' @param warehouse_type Warehouse type: `PRO` or `CLASSIC`. #' -#' @rdname warehousesCreate +#' @rdname create_warehouse +#' @alias warehousesCreate #' @export -warehousesCreate <- function(client, auto_stop_mins = NULL, channel = NULL, cluster_size = NULL, +create_warehouse <- function(client, auto_stop_mins = NULL, channel = NULL, cluster_size = NULL, creator_name = NULL, enable_photon = NULL, enable_serverless_compute = NULL, instance_profile_arn = NULL, max_num_clusters = NULL, min_num_clusters = NULL, name = NULL, spot_instance_policy = NULL, tags = NULL, warehouse_type = NULL) { @@ -35,6 +36,10 @@ warehousesCreate <- function(client, auto_stop_mins = NULL, channel = NULL, clus tags = tags, warehouse_type = warehouse_type) client$do("POST", "/api/2.0/sql/warehouses", body = body) } + +#' @rdname create_warehouse +#' @export +warehousesCreate <- create_warehouse #' Delete a warehouse. #' #' Deletes a SQL warehouse. @@ -42,12 +47,17 @@ warehousesCreate <- function(client, auto_stop_mins = NULL, channel = NULL, clus #' #' @param id Required. Required. #' -#' @rdname warehousesDelete +#' @rdname delete_warehouse +#' @alias warehousesDelete #' @export -warehousesDelete <- function(client, id) { +delete_warehouse <- function(client, id) { client$do("DELETE", paste("/api/2.0/sql/warehouses/", id, sep = "")) } + +#' @rdname delete_warehouse +#' @export +warehousesDelete <- delete_warehouse #' Update a warehouse. #' #' Updates the configuration for a SQL warehouse. @@ -68,9 +78,10 @@ warehousesDelete <- function(client, id) { #' @param tags A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. #' @param warehouse_type Warehouse type: `PRO` or `CLASSIC`. #' -#' @rdname warehousesEdit +#' @rdname edit_warehouse +#' @alias warehousesEdit #' @export -warehousesEdit <- function(client, id, auto_stop_mins = NULL, channel = NULL, cluster_size = NULL, +edit_warehouse <- function(client, id, auto_stop_mins = NULL, channel = NULL, cluster_size = NULL, creator_name = NULL, enable_photon = NULL, enable_serverless_compute = NULL, instance_profile_arn = NULL, max_num_clusters = NULL, min_num_clusters = NULL, name = NULL, spot_instance_policy = NULL, tags = NULL, warehouse_type = NULL) { @@ -82,6 +93,10 @@ warehousesEdit <- function(client, id, auto_stop_mins = NULL, channel = NULL, cl client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/edit", , sep = ""), body = body) } + +#' @rdname edit_warehouse +#' @export +warehousesEdit <- edit_warehouse #' Get warehouse info. #' #' Gets the information for a single SQL warehouse. @@ -89,12 +104,17 @@ warehousesEdit <- function(client, id, auto_stop_mins = NULL, channel = NULL, cl #' #' @param id Required. Required. #' -#' @rdname warehousesGet +#' @rdname get_warehouse +#' @alias warehousesGet #' @export -warehousesGet <- function(client, id) { +get_warehouse <- function(client, id) { client$do("GET", paste("/api/2.0/sql/warehouses/", id, sep = "")) } + +#' @rdname get_warehouse +#' @export +warehousesGet <- get_warehouse #' Get SQL warehouse permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -102,13 +122,18 @@ warehousesGet <- function(client, id) { #' #' @param warehouse_id Required. The SQL warehouse for which to get or manage permissions. #' -#' @rdname warehousesGetPermissionLevels +#' @rdname get_warehouse_permission_levels +#' @alias warehousesGetPermissionLevels #' @export -warehousesGetPermissionLevels <- function(client, warehouse_id) { +get_warehouse_permission_levels <- function(client, warehouse_id) { client$do("GET", paste("/api/2.0/permissions/warehouses/", warehouse_id, "/permissionLevels", , sep = "")) } + +#' @rdname get_warehouse_permission_levels +#' @export +warehousesGetPermissionLevels <- get_warehouse_permission_levels #' Get SQL warehouse permissions. #' #' Gets the permissions of a SQL warehouse. SQL warehouses can inherit @@ -117,23 +142,33 @@ warehousesGetPermissionLevels <- function(client, warehouse_id) { #' #' @param warehouse_id Required. The SQL warehouse for which to get or manage permissions. #' -#' @rdname warehousesGetPermissions +#' @rdname get_warehouse_permissions +#' @alias warehousesGetPermissions #' @export -warehousesGetPermissions <- function(client, warehouse_id) { +get_warehouse_permissions <- function(client, warehouse_id) { client$do("GET", paste("/api/2.0/permissions/warehouses/", warehouse_id, sep = "")) } + +#' @rdname get_warehouse_permissions +#' @export +warehousesGetPermissions <- get_warehouse_permissions #' Get the workspace configuration. #' #' Gets the workspace level configuration that is shared by all SQL warehouses #' in a workspace. #' @param client Required. Instance of DatabricksClient() #' -#' @rdname warehousesGetWorkspaceWarehouseConfig +#' @rdname get_warehouse_workspace_config +#' @alias warehousesGetWorkspaceWarehouseConfig #' @export -warehousesGetWorkspaceWarehouseConfig <- function(client) { +get_warehouse_workspace_config <- function(client) { client$do("GET", "/api/2.0/sql/config/warehouses") } + +#' @rdname get_warehouse_workspace_config +#' @export +warehousesGetWorkspaceWarehouseConfig <- get_warehouse_workspace_config #' List warehouses. #' #' Lists all SQL warehouses that a user has manager permissions on. @@ -143,15 +178,20 @@ warehousesGetWorkspaceWarehouseConfig <- function(client) { #' #' @return `data.frame` with all of the response pages. #' -#' @rdname warehousesList +#' @rdname list_warehouses +#' @alias warehousesList #' @export -warehousesList <- function(client, run_as_user_id = NULL) { +list_warehouses <- function(client, run_as_user_id = NULL) { query <- list(run_as_user_id = run_as_user_id) json <- client$do("GET", "/api/2.0/sql/warehouses", query = query) return(json$warehouses) } + +#' @rdname list_warehouses +#' @export +warehousesList <- list_warehouses #' Set SQL warehouse permissions. #' #' Sets permissions on a SQL warehouse. SQL warehouses can inherit permissions @@ -161,13 +201,18 @@ warehousesList <- function(client, run_as_user_id = NULL) { #' @param access_control_list This field has no description yet. #' @param warehouse_id Required. The SQL warehouse for which to get or manage permissions. #' -#' @rdname warehousesSetPermissions +#' @rdname set_warehouse_permissions +#' @alias warehousesSetPermissions #' @export -warehousesSetPermissions <- function(client, warehouse_id, access_control_list = NULL) { +set_warehouse_permissions <- function(client, warehouse_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", paste("/api/2.0/permissions/warehouses/", warehouse_id, sep = ""), body = body) } + +#' @rdname set_warehouse_permissions +#' @export +warehousesSetPermissions <- set_warehouse_permissions #' Set the workspace configuration. #' #' Sets the workspace level configuration that is shared by all SQL warehouses @@ -184,9 +229,10 @@ warehousesSetPermissions <- function(client, warehouse_id, access_control_list = #' @param security_policy Security policy for warehouses. #' @param sql_configuration_parameters SQL configuration parameters. #' -#' @rdname warehousesSetWorkspaceWarehouseConfig +#' @rdname set_warehouse_workspace_config +#' @alias warehousesSetWorkspaceWarehouseConfig #' @export -warehousesSetWorkspaceWarehouseConfig <- function(client, channel = NULL, config_param = NULL, +set_warehouse_workspace_config <- function(client, channel = NULL, config_param = NULL, data_access_config = NULL, enabled_warehouse_types = NULL, global_param = NULL, google_service_account = NULL, instance_profile_arn = NULL, security_policy = NULL, sql_configuration_parameters = NULL) { @@ -196,6 +242,10 @@ warehousesSetWorkspaceWarehouseConfig <- function(client, channel = NULL, config security_policy = security_policy, sql_configuration_parameters = sql_configuration_parameters) client$do("PUT", "/api/2.0/sql/config/warehouses", body = body) } + +#' @rdname set_warehouse_workspace_config +#' @export +warehousesSetWorkspaceWarehouseConfig <- set_warehouse_workspace_config #' Start a warehouse. #' #' Starts a SQL warehouse. @@ -203,12 +253,17 @@ warehousesSetWorkspaceWarehouseConfig <- function(client, channel = NULL, config #' #' @param id Required. Required. #' -#' @rdname warehousesStart +#' @rdname start_warehouse +#' @alias warehousesStart #' @export -warehousesStart <- function(client, id) { +start_warehouse <- function(client, id) { client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/start", , sep = "")) } + +#' @rdname start_warehouse +#' @export +warehousesStart <- start_warehouse #' Stop a warehouse. #' #' Stops a SQL warehouse. @@ -216,12 +271,17 @@ warehousesStart <- function(client, id) { #' #' @param id Required. Required. #' -#' @rdname warehousesStop +#' @rdname stop_warehouse +#' @alias warehousesStop #' @export -warehousesStop <- function(client, id) { +stop_warehouse <- function(client, id) { client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/stop", , sep = "")) } + +#' @rdname stop_warehouse +#' @export +warehousesStop <- stop_warehouse #' Update SQL warehouse permissions. #' #' Updates the permissions on a SQL warehouse. SQL warehouses can inherit @@ -231,13 +291,18 @@ warehousesStop <- function(client, id) { #' @param access_control_list This field has no description yet. #' @param warehouse_id Required. The SQL warehouse for which to get or manage permissions. #' -#' @rdname warehousesUpdatePermissions +#' @rdname update_warehouse_permissions +#' @alias warehousesUpdatePermissions #' @export -warehousesUpdatePermissions <- function(client, warehouse_id, access_control_list = NULL) { +update_warehouse_permissions <- function(client, warehouse_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", paste("/api/2.0/permissions/warehouses/", warehouse_id, sep = ""), body = body) } + +#' @rdname update_warehouse_permissions +#' @export +warehousesUpdatePermissions <- update_warehouse_permissions #' Create a warehouse. #' #' Creates a new SQL warehouse. @@ -266,9 +331,9 @@ warehousesUpdatePermissions <- function(client, warehouse_id, access_control_lis #' @param tags A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. #' @param warehouse_type Warehouse type: `PRO` or `CLASSIC`. #' -#' @rdname warehousesCreateAndWait +#' @rdname create_warehouse_and_wait #' @export -warehousesCreateAndWait <- function(client, auto_stop_mins = NULL, channel = NULL, +create_warehouse_and_wait <- function(client, auto_stop_mins = NULL, channel = NULL, cluster_size = NULL, creator_name = NULL, enable_photon = NULL, enable_serverless_compute = NULL, instance_profile_arn = NULL, max_num_clusters = NULL, min_num_clusters = NULL, name = NULL, spot_instance_policy = NULL, tags = NULL, warehouse_type = NULL, @@ -348,9 +413,9 @@ warehousesCreateAndWait <- function(client, auto_stop_mins = NULL, channel = NUL #' @param tags A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse. #' @param warehouse_type Warehouse type: `PRO` or `CLASSIC`. #' -#' @rdname warehousesEditAndWait +#' @rdname edit_warehouse_and_wait #' @export -warehousesEditAndWait <- function(client, id, auto_stop_mins = NULL, channel = NULL, +edit_warehouse_and_wait <- function(client, id, auto_stop_mins = NULL, channel = NULL, cluster_size = NULL, creator_name = NULL, enable_photon = NULL, enable_serverless_compute = NULL, instance_profile_arn = NULL, max_num_clusters = NULL, min_num_clusters = NULL, name = NULL, spot_instance_policy = NULL, tags = NULL, warehouse_type = NULL, @@ -424,9 +489,9 @@ warehousesEditAndWait <- function(client, id, auto_stop_mins = NULL, channel = N #' #' @param id Required. Required. #' -#' @rdname warehousesStartAndWait +#' @rdname start_warehouse_and_wait #' @export -warehousesStartAndWait <- function(client, id, timeout = 20, callback = cli_reporter) { +start_warehouse_and_wait <- function(client, id, timeout = 20, callback = cli_reporter) { op_response <- client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/start", , sep = "")) @@ -485,9 +550,9 @@ warehousesStartAndWait <- function(client, id, timeout = 20, callback = cli_repo #' #' @param id Required. Required. #' -#' @rdname warehousesStopAndWait +#' @rdname stop_warehouse_and_wait #' @export -warehousesStopAndWait <- function(client, id, timeout = 20, callback = cli_reporter) { +stop_warehouse_and_wait <- function(client, id, timeout = 20, callback = cli_reporter) { op_response <- client$do("POST", paste("/api/2.0/sql/warehouses/", id, "/stop", , sep = "")) diff --git a/R/workspace.R b/R/workspace.R index 3c269ccf..f0d5deba 100755 --- a/R/workspace.R +++ b/R/workspace.R @@ -18,12 +18,17 @@ NULL #' @param path Required. The absolute path of the notebook or directory. #' @param recursive The flag that specifies whether to delete the object recursively. #' -#' @rdname workspaceDelete +#' @rdname delete_notebook +#' @alias workspaceDelete #' @export -workspaceDelete <- function(client, path, recursive = NULL) { +delete_notebook <- function(client, path, recursive = NULL) { body <- list(path = path, recursive = recursive) client$do("POST", "/api/2.0/workspace/delete", body = body) } + +#' @rdname delete_notebook +#' @export +workspaceDelete <- delete_notebook #' Export a workspace object. #' #' Exports an object or the contents of an entire directory. @@ -39,12 +44,17 @@ workspaceDelete <- function(client, path, recursive = NULL) { #' @param format This specifies the format of the exported file. #' @param path Required. The absolute path of the object or directory. #' -#' @rdname workspaceExport +#' @rdname export_notebook +#' @alias workspaceExport #' @export -workspaceExport <- function(client, path, format = NULL) { +export_notebook <- function(client, path, format = NULL) { query <- list(format = format, path = path) client$do("GET", "/api/2.0/workspace/export", query = query) } + +#' @rdname export_notebook +#' @export +workspaceExport <- export_notebook #' Get workspace object permission levels. #' #' Gets the permission levels that a user can have on an object. @@ -53,13 +63,18 @@ workspaceExport <- function(client, path, format = NULL) { #' @param workspace_object_id Required. The workspace object for which to get or manage permissions. #' @param workspace_object_type Required. The workspace object type for which to get or manage permissions. #' -#' @rdname workspaceGetPermissionLevels +#' @rdname get_notebook_permission_levels +#' @alias workspaceGetPermissionLevels #' @export -workspaceGetPermissionLevels <- function(client, workspace_object_type, workspace_object_id) { +get_notebook_permission_levels <- function(client, workspace_object_type, workspace_object_id) { client$do("GET", paste("/api/2.0/permissions/", workspace_object_type, "/", workspace_object_id, "/permissionLevels", , sep = "")) } + +#' @rdname get_notebook_permission_levels +#' @export +workspaceGetPermissionLevels <- get_notebook_permission_levels #' Get workspace object permissions. #' #' Gets the permissions of a workspace object. Workspace objects can inherit @@ -69,13 +84,18 @@ workspaceGetPermissionLevels <- function(client, workspace_object_type, workspac #' @param workspace_object_id Required. The workspace object for which to get or manage permissions. #' @param workspace_object_type Required. The workspace object type for which to get or manage permissions. #' -#' @rdname workspaceGetPermissions +#' @rdname get_notebook_permissions +#' @alias workspaceGetPermissions #' @export -workspaceGetPermissions <- function(client, workspace_object_type, workspace_object_id) { +get_notebook_permissions <- function(client, workspace_object_type, workspace_object_id) { client$do("GET", paste("/api/2.0/permissions/", workspace_object_type, "/", workspace_object_id, sep = "")) } + +#' @rdname get_notebook_permissions +#' @export +workspaceGetPermissions <- get_notebook_permissions #' Get status. #' #' Gets the status of an object or a directory. If `path` does not exist, this @@ -84,12 +104,17 @@ workspaceGetPermissions <- function(client, workspace_object_type, workspace_obj #' #' @param path Required. The absolute path of the notebook or directory. #' -#' @rdname workspaceGetStatus +#' @rdname get_notebook_status +#' @alias workspaceGetStatus #' @export -workspaceGetStatus <- function(client, path) { +get_notebook_status <- function(client, path) { query <- list(path = path) client$do("GET", "/api/2.0/workspace/get-status", query = query) } + +#' @rdname get_notebook_status +#' @export +workspaceGetStatus <- get_notebook_status #' Import a workspace object. #' #' Imports a workspace object (for example, a notebook or file) or the contents @@ -106,14 +131,19 @@ workspaceGetStatus <- function(client, path) { #' @param overwrite The flag that specifies whether to overwrite existing object. #' @param path Required. The absolute path of the object or directory. #' -#' @rdname workspaceImport +#' @rdname import_notebook +#' @alias workspaceImport #' @export -workspaceImport <- function(client, path, content = NULL, format = NULL, language = NULL, +import_notebook <- function(client, path, content = NULL, format = NULL, language = NULL, overwrite = NULL) { body <- list(content = content, format = format, language = language, overwrite = overwrite, path = path) client$do("POST", "/api/2.0/workspace/import", body = body) } + +#' @rdname import_notebook +#' @export +workspaceImport <- import_notebook #' List contents. #' #' Lists the contents of a directory, or the object if it is not a directory. If @@ -126,15 +156,20 @@ workspaceImport <- function(client, path, content = NULL, format = NULL, languag #' #' @return `data.frame` with all of the response pages. #' -#' @rdname workspaceList +#' @rdname list_notebooks +#' @alias workspaceList #' @export -workspaceList <- function(client, path, notebooks_modified_after = NULL) { +list_notebooks <- function(client, path, notebooks_modified_after = NULL) { query <- list(notebooks_modified_after = notebooks_modified_after, path = path) json <- client$do("GET", "/api/2.0/workspace/list", query = query) return(json$objects) } + +#' @rdname list_notebooks +#' @export +workspaceList <- list_notebooks #' Create a directory. #' #' Creates the specified directory (and necessary parent directories if they do @@ -147,12 +182,17 @@ workspaceList <- function(client, path, notebooks_modified_after = NULL) { #' #' @param path Required. The absolute path of the directory. #' -#' @rdname workspaceMkdirs +#' @rdname mkdirs_notebook +#' @alias workspaceMkdirs #' @export -workspaceMkdirs <- function(client, path) { +mkdirs_notebook <- function(client, path) { body <- list(path = path) client$do("POST", "/api/2.0/workspace/mkdirs", body = body) } + +#' @rdname mkdirs_notebook +#' @export +workspaceMkdirs <- mkdirs_notebook #' Set workspace object permissions. #' #' Sets permissions on a workspace object. Workspace objects can inherit @@ -163,14 +203,19 @@ workspaceMkdirs <- function(client, path) { #' @param workspace_object_id Required. The workspace object for which to get or manage permissions. #' @param workspace_object_type Required. The workspace object type for which to get or manage permissions. #' -#' @rdname workspaceSetPermissions +#' @rdname set_notebook_permissions +#' @alias workspaceSetPermissions #' @export -workspaceSetPermissions <- function(client, workspace_object_type, workspace_object_id, +set_notebook_permissions <- function(client, workspace_object_type, workspace_object_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PUT", paste("/api/2.0/permissions/", workspace_object_type, "/", workspace_object_id, sep = ""), body = body) } + +#' @rdname set_notebook_permissions +#' @export +workspaceSetPermissions <- set_notebook_permissions #' Update workspace object permissions. #' #' Updates the permissions on a workspace object. Workspace objects can inherit @@ -181,15 +226,20 @@ workspaceSetPermissions <- function(client, workspace_object_type, workspace_obj #' @param workspace_object_id Required. The workspace object for which to get or manage permissions. #' @param workspace_object_type Required. The workspace object type for which to get or manage permissions. #' -#' @rdname workspaceUpdatePermissions +#' @rdname update_notebook_permissions +#' @alias workspaceUpdatePermissions #' @export -workspaceUpdatePermissions <- function(client, workspace_object_type, workspace_object_id, +update_notebook_permissions <- function(client, workspace_object_type, workspace_object_id, access_control_list = NULL) { body <- list(access_control_list = access_control_list) client$do("PATCH", paste("/api/2.0/permissions/", workspace_object_type, "/", workspace_object_id, sep = ""), body = body) } +#' @rdname update_notebook_permissions +#' @export +workspaceUpdatePermissions <- update_notebook_permissions + diff --git a/R/workspace_bindings.R b/R/workspace_bindings.R index b046b8f9..5c4ea6fa 100755 --- a/R/workspace_bindings.R +++ b/R/workspace_bindings.R @@ -11,13 +11,18 @@ NULL #' #' @param name Required. The name of the catalog. #' -#' @rdname workspaceBindingsGet +#' @rdname get_workspace_binding +#' @alias workspaceBindingsGet #' @export -workspaceBindingsGet <- function(client, name) { +get_workspace_binding <- function(client, name) { client$do("GET", paste("/api/2.1/unity-catalog/workspace-bindings/catalogs/", name, sep = "")) } + +#' @rdname get_workspace_binding +#' @export +workspaceBindingsGet <- get_workspace_binding #' Get securable workspace bindings. #' #' Gets workspace bindings of the securable. The caller must be a metastore @@ -27,13 +32,18 @@ workspaceBindingsGet <- function(client, name) { #' @param securable_name Required. The name of the securable. #' @param securable_type Required. The type of the securable. #' -#' @rdname workspaceBindingsGetBindings +#' @rdname get_workspace_binding_bindings +#' @alias workspaceBindingsGetBindings #' @export -workspaceBindingsGetBindings <- function(client, securable_type, securable_name) { +get_workspace_binding_bindings <- function(client, securable_type, securable_name) { client$do("GET", paste("/api/2.1/unity-catalog/bindings/", securable_type, "/", securable_name, sep = "")) } + +#' @rdname get_workspace_binding_bindings +#' @export +workspaceBindingsGetBindings <- get_workspace_binding_bindings #' Update catalog workspace bindings. #' #' Updates workspace bindings of the catalog. The caller must be a metastore @@ -44,13 +54,18 @@ workspaceBindingsGetBindings <- function(client, securable_type, securable_name) #' @param name Required. The name of the catalog. #' @param unassign_workspaces A list of workspace IDs. #' -#' @rdname workspaceBindingsUpdate +#' @rdname update_workspace_binding +#' @alias workspaceBindingsUpdate #' @export -workspaceBindingsUpdate <- function(client, name, assign_workspaces = NULL, unassign_workspaces = NULL) { +update_workspace_binding <- function(client, name, assign_workspaces = NULL, unassign_workspaces = NULL) { body <- list(assign_workspaces = assign_workspaces, unassign_workspaces = unassign_workspaces) client$do("PATCH", paste("/api/2.1/unity-catalog/workspace-bindings/catalogs/", name, sep = ""), body = body) } + +#' @rdname update_workspace_binding +#' @export +workspaceBindingsUpdate <- update_workspace_binding #' Update securable workspace bindings. #' #' Updates workspace bindings of the securable. The caller must be a metastore @@ -62,15 +77,20 @@ workspaceBindingsUpdate <- function(client, name, assign_workspaces = NULL, unas #' @param securable_name Required. The name of the securable. #' @param securable_type Required. The type of the securable. #' -#' @rdname workspaceBindingsUpdateBindings +#' @rdname update_workspace_binding_bindings +#' @alias workspaceBindingsUpdateBindings #' @export -workspaceBindingsUpdateBindings <- function(client, securable_type, securable_name, +update_workspace_binding_bindings <- function(client, securable_type, securable_name, add = NULL, remove = NULL) { body <- list(add = add, remove = remove) client$do("PATCH", paste("/api/2.1/unity-catalog/bindings/", securable_type, "/", securable_name, sep = ""), body = body) } +#' @rdname update_workspace_binding_bindings +#' @export +workspaceBindingsUpdateBindings <- update_workspace_binding_bindings + diff --git a/R/workspace_conf.R b/R/workspace_conf.R index b16e4e7f..8b21386c 100755 --- a/R/workspace_conf.R +++ b/R/workspace_conf.R @@ -10,12 +10,17 @@ NULL #' #' @param keys Required. This field has no description yet. #' -#' @rdname workspaceConfGetStatus +#' @rdname get_workspace_conf_status +#' @alias workspaceConfGetStatus #' @export -workspaceConfGetStatus <- function(client, keys) { +get_workspace_conf_status <- function(client, keys) { query <- list(keys = keys) client$do("GET", "/api/2.0/workspace-conf", query = query) } + +#' @rdname get_workspace_conf_status +#' @export +workspaceConfGetStatus <- get_workspace_conf_status #' Enable/disable features. #' #' Sets the configuration status for a workspace, including enabling or @@ -23,11 +28,16 @@ workspaceConfGetStatus <- function(client, keys) { #' @param client Required. Instance of DatabricksClient() #' #' -#' @rdname workspaceConfSetStatus +#' @rdname set_workspace_conf_status +#' @alias workspaceConfSetStatus #' @export -workspaceConfSetStatus <- function(client) { +set_workspace_conf_status <- function(client) { client$do("PATCH", "/api/2.0/workspace-conf") } +#' @rdname set_workspace_conf_status +#' @export +workspaceConfSetStatus <- set_workspace_conf_status + diff --git a/README.md b/README.md index 681e205d..35e24078 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ It's recommended that you authenticate via the `.Renviron` file using `DATABRICKS_HOST` and `DATABRICKS_TOKEN` environment variables. You can also use [Databricks CLI Configuration Profiles](https://docs.databricks.com/dev-tools/auth.html#databricks-configuration-profiles) and `DATABRICKS_CONFIG_FILE` or `DATABRICKS_CONFIG_PROFILE` environment variables, but only the [PAT Authentication](https://docs.databricks.com/dev-tools/auth.html#databricks-personal-access-token-authentication) works at the moment. If you need more authentication methods, please fork this GitHub repository and send pull request with the feature suggestion. Example of overriding authentication profile. Look at `databricks auth profiles` to know which ones are working. -``` +```r client <- DatabricksClient(profile="your-cli-profile") ``` @@ -21,9 +21,9 @@ Databricks SDK for R comes with all public [workspace-level APIs](https://docs.d library(dplyr) library(databricks) client <- DatabricksClient() -running <- clustersList(client) %>% filter(state == 'RUNNING') -context <- commandExecutionCreateAndWait(client, cluster_id=running$cluster_id, language='python') -res <- commandExecutionExecuteAndWait(client, cluster_id=running$cluster_id, context_id=context$id, language='sql', command='show tables') +running <- list_clusters(client) %>% filter(state == 'RUNNING') +context <- create_command_execution_and_wait(client, cluster_id=running$cluster_id, language='python') +res <- execute_command_and_wait(client, cluster_id=running$cluster_id, context_id=context$id, language='sql', command='show tables') res ``` @@ -32,7 +32,7 @@ res All `list` methods (and those, which return any list of results), do consistently return a `data.frame` of all entries from all pages, regardless of the underlying implementation. ```r -> clustersList(client)[1:10,c("cluster_id", "cluster_name", "state")] +> list_clusters(client)[1:10,c("cluster_id", "cluster_name", "state")] cluster_id cluster_name state 1 1109-110110-kjfoeopq DEFAULT Test Cluster TERMINATED 2 0110-221212-oqqpodoa GO_SDK Test Cluster TERMINATED @@ -46,13 +46,13 @@ All `list` methods (and those, which return any list of results), do consistentl All long-running operations do poll Databricks backend until the entity reaches desired state: ```r -> clustersCreateAndWait(client, spark_version = "12.x-snapshot-scala2.12", cluster_name = "r-sdk-cluster", num_workers = 1, autotermination_minutes=20, node_type_id="i3.xlarge") +> create_cluster_and_wait(client, spark_version = "12.x-snapshot-scala2.12", cluster_name = "r-sdk-cluster", num_workers = 1, autotermination_minutes=20, node_type_id="i3.xlarge") PENDING: Finding instances for new nodes, acquiring more instances if necessary ``` ## Interface stability -API clients for all services are generated from specification files that are synchronized from the main platform. Databricks may have minor [documented](https://github.com/databricks/databricks-sdk-go/blob/main/CHANGELOG.md) backward-incompatible changes, such as renaming the methods or some type names to bring more consistency. +API clients for all services are generated from specification files that are synchronized from the main platform. Databricks may have minor [documented](https://github.com/databrickslabs/databricks-sdk-r/blob/main/CHANGELOG.md) backward-incompatible changes, such as renaming the methods or some type names to bring more consistency. ## Project Support diff --git a/man/accountAccessControlProxyGetAssignableRolesForResource.Rd b/man/accountAccessControlProxyGetAssignableRolesForResource.Rd deleted file mode 100644 index e95c5e64..00000000 --- a/man/accountAccessControlProxyGetAssignableRolesForResource.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/account_access_control_proxy.R -\name{accountAccessControlProxyGetAssignableRolesForResource} -\alias{accountAccessControlProxyGetAssignableRolesForResource} -\title{Get assignable roles for a resource.} -\usage{ -accountAccessControlProxyGetAssignableRolesForResource(client, resource) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{resource}{Required. The resource name for which assignable roles will be listed.} -} -\description{ -Gets all the roles that can be granted on an account-level resource. A role -is grantable if the rule set on the resource can contain an access rule of -the role. -} diff --git a/man/accountAccessControlProxyGetRuleSet.Rd b/man/accountAccessControlProxyGetRuleSet.Rd deleted file mode 100644 index 2a7abc1f..00000000 --- a/man/accountAccessControlProxyGetRuleSet.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/account_access_control_proxy.R -\name{accountAccessControlProxyGetRuleSet} -\alias{accountAccessControlProxyGetRuleSet} -\title{Get a rule set.} -\usage{ -accountAccessControlProxyGetRuleSet(client, name, etag) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The ruleset name associated with the request.} - -\item{etag}{Required. Etag used for versioning.} -} -\description{ -Get a rule set by its name. A rule set is always attached to a resource and -contains a list of access rules on the said resource. Currently only a -default rule set for each resource is supported. -} diff --git a/man/accountAccessControlProxyUpdateRuleSet.Rd b/man/accountAccessControlProxyUpdateRuleSet.Rd deleted file mode 100644 index 5e8d95e6..00000000 --- a/man/accountAccessControlProxyUpdateRuleSet.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/account_access_control_proxy.R -\name{accountAccessControlProxyUpdateRuleSet} -\alias{accountAccessControlProxyUpdateRuleSet} -\title{Update a rule set.} -\usage{ -accountAccessControlProxyUpdateRuleSet(client, name, rule_set) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the rule set.} - -\item{rule_set}{Required. This field has no description yet.} -} -\description{ -Replace the rules of a rule set. First, use a GET rule set request to read -the current version of the rule set before modifying it. This pattern helps -prevent conflicts between concurrent updates. -} diff --git a/man/add_dbfs_block.Rd b/man/add_dbfs_block.Rd new file mode 100644 index 00000000..8d1b17a0 --- /dev/null +++ b/man/add_dbfs_block.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbfs.R +\name{add_dbfs_block} +\alias{add_dbfs_block} +\alias{dbfsAddBlock} +\title{Append data block.} +\usage{ +add_dbfs_block(client, handle, data) + +dbfsAddBlock(client, handle, data) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{handle}{Required. The handle on an open stream.} + +\item{data}{Required. The base64-encoded data to append to the stream.} +} +\description{ +Appends a block of data to the stream specified by the input handle. If the +handle does not exist, this call will throw an exception with +\code{RESOURCE_DOES_NOT_EXIST}. +} +\details{ +If the block of data exceeds 1 MB, this call will throw an exception with +\code{MAX_BLOCK_SIZE_EXCEEDED}. +} diff --git a/man/add_instance_profile.Rd b/man/add_instance_profile.Rd new file mode 100644 index 00000000..b8823dde --- /dev/null +++ b/man/add_instance_profile.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/instance_profiles.R +\name{add_instance_profile} +\alias{add_instance_profile} +\alias{instanceProfilesAdd} +\title{Register an instance profile.} +\usage{ +add_instance_profile( + client, + instance_profile_arn, + iam_role_arn = NULL, + is_meta_instance_profile = NULL, + skip_validation = NULL +) + +instanceProfilesAdd( + client, + instance_profile_arn, + iam_role_arn = NULL, + is_meta_instance_profile = NULL, + skip_validation = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{instance_profile_arn}{Required. The AWS ARN of the instance profile to register with Databricks.} + +\item{iam_role_arn}{The AWS IAM role ARN of the role associated with the instance profile.} + +\item{is_meta_instance_profile}{Boolean flag indicating whether the instance profile should only be used in credential passthrough scenarios.} + +\item{skip_validation}{By default, Databricks validates that it has sufficient permissions to launch instances with the instance profile.} +} +\description{ +In the UI, you can select the instance profile when launching clusters. This +API is only available to admin users. +} diff --git a/man/alertsCreate.Rd b/man/alertsCreate.Rd deleted file mode 100644 index dc98955b..00000000 --- a/man/alertsCreate.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/alerts.R -\name{alertsCreate} -\alias{alertsCreate} -\title{Create an alert.} -\usage{ -alertsCreate(client, name, options, query_id, parent = NULL, rearm = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the alert.} - -\item{options}{Required. Alert configuration options.} - -\item{query_id}{Required. Query ID.} - -\item{parent}{The identifier of the workspace folder containing the object.} - -\item{rearm}{Number of seconds after being triggered before the alert rearms itself and can be triggered again.} -} -\description{ -Creates an alert. An alert is a Databricks SQL object that periodically runs -a query, evaluates a condition of its result, and notifies users or -notification destinations if the condition was met. -} diff --git a/man/alertsDelete.Rd b/man/alertsDelete.Rd deleted file mode 100644 index 5188f58a..00000000 --- a/man/alertsDelete.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/alerts.R -\name{alertsDelete} -\alias{alertsDelete} -\title{Delete an alert.} -\usage{ -alertsDelete(client, alert_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{alert_id}{Required. This field has no description yet.} -} -\description{ -Deletes an alert. Deleted alerts are no longer accessible and cannot be -restored. \strong{Note:} Unlike queries and dashboards, alerts cannot be moved to -the trash. -} diff --git a/man/alertsGet.Rd b/man/alertsGet.Rd deleted file mode 100644 index 76639d17..00000000 --- a/man/alertsGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/alerts.R -\name{alertsGet} -\alias{alertsGet} -\title{Get an alert.} -\usage{ -alertsGet(client, alert_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{alert_id}{Required. This field has no description yet.} -} -\description{ -Gets an alert. -} diff --git a/man/alertsList.Rd b/man/alertsList.Rd deleted file mode 100644 index 6b5f3827..00000000 --- a/man/alertsList.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/alerts.R -\name{alertsList} -\alias{alertsList} -\title{Get alerts.} -\usage{ -alertsList(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Gets a list of alerts. -} diff --git a/man/alertsUpdate.Rd b/man/alertsUpdate.Rd deleted file mode 100644 index 1906b415..00000000 --- a/man/alertsUpdate.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/alerts.R -\name{alertsUpdate} -\alias{alertsUpdate} -\title{Update an alert.} -\usage{ -alertsUpdate(client, alert_id, name, options, query_id, rearm = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{alert_id}{Required. This field has no description yet.} - -\item{name}{Required. Name of the alert.} - -\item{options}{Required. Alert configuration options.} - -\item{query_id}{Required. Query ID.} - -\item{rearm}{Number of seconds after being triggered before the alert rearms itself and can be triggered again.} -} -\description{ -Updates an alert. -} diff --git a/man/all_cluster_library_statuses.Rd b/man/all_cluster_library_statuses.Rd new file mode 100644 index 00000000..c1b12f0d --- /dev/null +++ b/man/all_cluster_library_statuses.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/libraries.R +\name{all_cluster_library_statuses} +\alias{all_cluster_library_statuses} +\alias{librariesAllClusterStatuses} +\title{Get all statuses.} +\usage{ +all_cluster_library_statuses(client) + +librariesAllClusterStatuses(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Get the status of all libraries on all clusters. A status will be available +for all libraries installed on this cluster via the API or the libraries UI +as well as libraries set to be installed on all clusters via the libraries +UI. +} diff --git a/man/approve_model_transition_request.Rd b/man/approve_model_transition_request.Rd new file mode 100644 index 00000000..10a5f788 --- /dev/null +++ b/man/approve_model_transition_request.Rd @@ -0,0 +1,41 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{approve_model_transition_request} +\alias{approve_model_transition_request} +\alias{modelRegistryApproveTransitionRequest} +\title{Approve transition request.} +\usage{ +approve_model_transition_request( + client, + name, + version, + stage, + archive_existing_versions, + comment = NULL +) + +modelRegistryApproveTransitionRequest( + client, + name, + version, + stage, + archive_existing_versions, + comment = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the model.} + +\item{version}{Required. Version of the model.} + +\item{stage}{Required. Target stage of the transition.} + +\item{archive_existing_versions}{Required. Specifies whether to archive all current model versions in the target stage.} + +\item{comment}{User-provided comment on the action.} +} +\description{ +Approves a model version stage transition request. +} diff --git a/man/appsCreate.Rd b/man/appsCreate.Rd deleted file mode 100644 index 89f00487..00000000 --- a/man/appsCreate.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/apps.R -\name{appsCreate} -\alias{appsCreate} -\title{Create and deploy an application.} -\usage{ -appsCreate(client, manifest, resources = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{manifest}{Required. Manifest that specifies the application requirements.} - -\item{resources}{Information passed at app deployment time to fulfill app dependencies.} -} -\description{ -Creates and deploys an application. -} diff --git a/man/appsDeleteApp.Rd b/man/appsDeleteApp.Rd deleted file mode 100644 index 7d7d0ad4..00000000 --- a/man/appsDeleteApp.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/apps.R -\name{appsDeleteApp} -\alias{appsDeleteApp} -\title{Delete an application.} -\usage{ -appsDeleteApp(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of an application.} -} -\description{ -Delete an application definition -} diff --git a/man/appsGetApp.Rd b/man/appsGetApp.Rd deleted file mode 100644 index 8e174175..00000000 --- a/man/appsGetApp.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/apps.R -\name{appsGetApp} -\alias{appsGetApp} -\title{Get definition for an application.} -\usage{ -appsGetApp(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of an application.} -} -\description{ -Get an application definition -} diff --git a/man/appsGetAppDeploymentStatus.Rd b/man/appsGetAppDeploymentStatus.Rd deleted file mode 100644 index 55b193f8..00000000 --- a/man/appsGetAppDeploymentStatus.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/apps.R -\name{appsGetAppDeploymentStatus} -\alias{appsGetAppDeploymentStatus} -\title{Get deployment status for an application.} -\usage{ -appsGetAppDeploymentStatus(client, deployment_id, include_app_log = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{deployment_id}{Required. The deployment id for an application.} - -\item{include_app_log}{Boolean flag to include application logs.} -} -\description{ -Get deployment status for an application -} diff --git a/man/appsGetApps.Rd b/man/appsGetApps.Rd deleted file mode 100644 index 083b3c04..00000000 --- a/man/appsGetApps.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/apps.R -\name{appsGetApps} -\alias{appsGetApps} -\title{List all applications.} -\usage{ -appsGetApps(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -List all available applications -} diff --git a/man/appsGetEvents.Rd b/man/appsGetEvents.Rd deleted file mode 100644 index 4777bbbb..00000000 --- a/man/appsGetEvents.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/apps.R -\name{appsGetEvents} -\alias{appsGetEvents} -\title{Get deployment events for an application.} -\usage{ -appsGetEvents(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of an application.} -} -\description{ -Get deployment events for an application -} diff --git a/man/artifactAllowlistsGet.Rd b/man/artifactAllowlistsGet.Rd deleted file mode 100644 index d63426f4..00000000 --- a/man/artifactAllowlistsGet.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/artifact_allowlists.R -\name{artifactAllowlistsGet} -\alias{artifactAllowlistsGet} -\title{Get an artifact allowlist.} -\usage{ -artifactAllowlistsGet(client, artifact_type) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{artifact_type}{Required. The artifact type of the allowlist.} -} -\description{ -Get the artifact allowlist of a certain artifact type. The caller must be a -metastore admin or have the \strong{MANAGE ALLOWLIST} privilege on the metastore. -} diff --git a/man/artifactAllowlistsUpdate.Rd b/man/artifactAllowlistsUpdate.Rd deleted file mode 100644 index e02e71dd..00000000 --- a/man/artifactAllowlistsUpdate.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/artifact_allowlists.R -\name{artifactAllowlistsUpdate} -\alias{artifactAllowlistsUpdate} -\title{Set an artifact allowlist.} -\usage{ -artifactAllowlistsUpdate(client, artifact_type, artifact_matchers) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{artifact_type}{Required. The artifact type of the allowlist.} - -\item{artifact_matchers}{Required. A list of allowed artifact match patterns.} -} -\description{ -Set the artifact allowlist of a certain artifact type. The whole artifact -allowlist is replaced with the new allowlist. The caller must be a metastore -admin or have the \strong{MANAGE ALLOWLIST} privilege on the metastore. -} diff --git a/man/assign_metastore.Rd b/man/assign_metastore.Rd new file mode 100644 index 00000000..d596f9d3 --- /dev/null +++ b/man/assign_metastore.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/metastores.R +\name{assign_metastore} +\alias{assign_metastore} +\alias{metastoresAssign} +\title{Create an assignment.} +\usage{ +assign_metastore(client, workspace_id, metastore_id, default_catalog_name) + +metastoresAssign(client, workspace_id, metastore_id, default_catalog_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{workspace_id}{Required. A workspace ID.} + +\item{metastore_id}{Required. The unique ID of the metastore.} + +\item{default_catalog_name}{Required. The name of the default catalog in the metastore.} +} +\description{ +Creates a new metastore assignment. If an assignment for the same +\strong{workspace_id} exists, it will be overwritten by the new \strong{metastore_id} +and \strong{default_catalog_name}. The caller must be an account admin. +} diff --git a/man/automaticClusterUpdateGet.Rd b/man/automaticClusterUpdateGet.Rd deleted file mode 100644 index 432c83fd..00000000 --- a/man/automaticClusterUpdateGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/automatic_cluster_update.R -\name{automaticClusterUpdateGet} -\alias{automaticClusterUpdateGet} -\title{Get the automatic cluster update setting.} -\usage{ -automaticClusterUpdateGet(client, etag = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{etag}{etag used for versioning.} -} -\description{ -Gets the automatic cluster update setting. -} diff --git a/man/automaticClusterUpdateUpdate.Rd b/man/automaticClusterUpdateUpdate.Rd deleted file mode 100644 index 8b67120e..00000000 --- a/man/automaticClusterUpdateUpdate.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/automatic_cluster_update.R -\name{automaticClusterUpdateUpdate} -\alias{automaticClusterUpdateUpdate} -\title{Update the automatic cluster update setting.} -\usage{ -automaticClusterUpdateUpdate(client, allow_missing, setting, field_mask) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{allow_missing}{Required. This should always be set to true for Settings API.} - -\item{setting}{Required. This field has no description yet.} - -\item{field_mask}{Required. Field mask is required to be passed into the PATCH request.} -} -\description{ -Updates the automatic cluster update setting for the workspace. A fresh etag -needs to be provided in \code{PATCH} requests (as part of the setting field). The -etag can be retrieved by making a \code{GET} request before the \code{PATCH} request. -If the setting is updated concurrently, \code{PATCH} fails with 409 and the -request must be retried by using the fresh etag in the 409 response. -} diff --git a/man/build_serving_endpoint_logs.Rd b/man/build_serving_endpoint_logs.Rd new file mode 100644 index 00000000..41844394 --- /dev/null +++ b/man/build_serving_endpoint_logs.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{build_serving_endpoint_logs} +\alias{build_serving_endpoint_logs} +\alias{servingEndpointsBuildLogs} +\title{Get build logs for a served model.} +\usage{ +build_serving_endpoint_logs(client, name, served_model_name) + +servingEndpointsBuildLogs(client, name, served_model_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint that the served model belongs to.} + +\item{served_model_name}{Required. The name of the served model that build logs will be retrieved for.} +} +\description{ +Retrieves the build logs associated with the provided served model. +} diff --git a/man/cancel_command_execution.Rd b/man/cancel_command_execution.Rd new file mode 100644 index 00000000..fab834a5 --- /dev/null +++ b/man/cancel_command_execution.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/command_execution.R +\name{cancel_command_execution} +\alias{cancel_command_execution} +\alias{commandExecutionCancel} +\title{Cancel a command.} +\usage{ +cancel_command_execution( + client, + cluster_id = NULL, + command_id = NULL, + context_id = NULL +) + +commandExecutionCancel( + client, + cluster_id = NULL, + command_id = NULL, + context_id = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{This field has no description yet.} + +\item{command_id}{This field has no description yet.} + +\item{context_id}{This field has no description yet.} +} +\description{ +Cancels a currently running command within an execution context. +} +\details{ +The command ID is obtained from a prior successful call to \strong{execute}. +} diff --git a/man/cancel_command_execution_and_wait.Rd b/man/cancel_command_execution_and_wait.Rd new file mode 100644 index 00000000..65c0c7ed --- /dev/null +++ b/man/cancel_command_execution_and_wait.Rd @@ -0,0 +1,39 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/command_execution.R +\name{cancel_command_execution_and_wait} +\alias{cancel_command_execution_and_wait} +\title{Cancel a command.} +\usage{ +cancel_command_execution_and_wait( + client, + cluster_id = NULL, + command_id = NULL, + context_id = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{This field has no description yet.} + +\item{command_id}{This field has no description yet.} + +\item{context_id}{This field has no description yet.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Command Execution on Databricks reach +Cancelled state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Command Execution is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Cancels a currently running command within an execution context. + +The command ID is obtained from a prior successful call to \strong{execute}. +} diff --git a/man/cancel_job_all_runs.Rd b/man/cancel_job_all_runs.Rd new file mode 100644 index 00000000..160c0690 --- /dev/null +++ b/man/cancel_job_all_runs.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{cancel_job_all_runs} +\alias{cancel_job_all_runs} +\alias{jobsCancelAllRuns} +\title{Cancel all runs of a job.} +\usage{ +cancel_job_all_runs(client, all_queued_runs = NULL, job_id = NULL) + +jobsCancelAllRuns(client, all_queued_runs = NULL, job_id = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{all_queued_runs}{Optional boolean parameter to cancel all queued runs.} + +\item{job_id}{The canonical identifier of the job to cancel all runs of.} +} +\description{ +Cancels all active runs of a job. The runs are canceled asynchronously, so it +doesn't prevent new runs from being started. +} diff --git a/man/cancel_job_run.Rd b/man/cancel_job_run.Rd new file mode 100644 index 00000000..56fdea1a --- /dev/null +++ b/man/cancel_job_run.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{cancel_job_run} +\alias{cancel_job_run} +\alias{jobsCancelRun} +\title{Cancel a run.} +\usage{ +cancel_job_run(client, run_id) + +jobsCancelRun(client, run_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. This field is required.} +} +\description{ +Cancels a job run or a task run. The run is canceled asynchronously, so it +may still be running when this request completes. +} diff --git a/man/cancel_job_run_and_wait.Rd b/man/cancel_job_run_and_wait.Rd new file mode 100644 index 00000000..3dd585e1 --- /dev/null +++ b/man/cancel_job_run_and_wait.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{cancel_job_run_and_wait} +\alias{cancel_job_run_and_wait} +\title{Cancel a run.} +\usage{ +cancel_job_run_and_wait(client, run_id, timeout = 20, callback = cli_reporter) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. This field is required.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Jobs on Databricks reach +TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Jobs is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Cancels a job run or a task run. The run is canceled asynchronously, so it +may still be running when this request completes. +} diff --git a/man/cancel_lakehouse_monitor_refresh.Rd b/man/cancel_lakehouse_monitor_refresh.Rd new file mode 100644 index 00000000..d1a451b8 --- /dev/null +++ b/man/cancel_lakehouse_monitor_refresh.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakehouse_monitors.R +\name{cancel_lakehouse_monitor_refresh} +\alias{cancel_lakehouse_monitor_refresh} +\alias{lakehouseMonitorsCancelRefresh} +\title{Cancel refresh.} +\usage{ +cancel_lakehouse_monitor_refresh(client, full_name, refresh_id) + +lakehouseMonitorsCancelRefresh(client, full_name, refresh_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the table.} + +\item{refresh_id}{Required. ID of the refresh.} +} +\description{ +Cancel an active monitor refresh for the given refresh ID. +} +\details{ +The caller must either: 1. be an owner of the table's parent catalog 2. have +\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's +parent schema 3. have the following permissions: - \strong{USE_CATALOG} on the +table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - be an +owner of the table + +Additionally, the call must be made from the workspace where the monitor was +created. +} diff --git a/man/cancel_statement_execution.Rd b/man/cancel_statement_execution.Rd new file mode 100644 index 00000000..83570a94 --- /dev/null +++ b/man/cancel_statement_execution.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/statement_execution.R +\name{cancel_statement_execution} +\alias{cancel_statement_execution} +\alias{statementExecutionCancelExecution} +\title{Cancel statement execution.} +\usage{ +cancel_statement_execution(client, statement_id) + +statementExecutionCancelExecution(client, statement_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{statement_id}{Required. The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls.} +} +\description{ +Requests that an executing statement be canceled. Callers must poll for +status to see the terminal state. +} diff --git a/man/catalogsCreate.Rd b/man/catalogsCreate.Rd deleted file mode 100644 index 9d428ff1..00000000 --- a/man/catalogsCreate.Rd +++ /dev/null @@ -1,41 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/catalogs.R -\name{catalogsCreate} -\alias{catalogsCreate} -\title{Create a catalog.} -\usage{ -catalogsCreate( - client, - name, - comment = NULL, - connection_name = NULL, - options = NULL, - properties = NULL, - provider_name = NULL, - share_name = NULL, - storage_root = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of catalog.} - -\item{comment}{User-provided free-form text description.} - -\item{connection_name}{The name of the connection to an external data source.} - -\item{options}{A map of key-value properties attached to the securable.} - -\item{properties}{A map of key-value properties attached to the securable.} - -\item{provider_name}{The name of delta sharing provider.} - -\item{share_name}{The name of the share under the share provider.} - -\item{storage_root}{Storage root URL for managed tables within catalog.} -} -\description{ -Creates a new catalog instance in the parent metastore if the caller is a -metastore admin or has the \strong{CREATE_CATALOG} privilege. -} diff --git a/man/catalogsDelete.Rd b/man/catalogsDelete.Rd deleted file mode 100644 index 3c2bb0f2..00000000 --- a/man/catalogsDelete.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/catalogs.R -\name{catalogsDelete} -\alias{catalogsDelete} -\title{Delete a catalog.} -\usage{ -catalogsDelete(client, name, force = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the catalog.} - -\item{force}{Force deletion even if the catalog is not empty.} -} -\description{ -Deletes the catalog that matches the supplied name. The caller must be a -metastore admin or the owner of the catalog. -} diff --git a/man/catalogsGet.Rd b/man/catalogsGet.Rd deleted file mode 100644 index 682a861f..00000000 --- a/man/catalogsGet.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/catalogs.R -\name{catalogsGet} -\alias{catalogsGet} -\title{Get a catalog.} -\usage{ -catalogsGet(client, name, include_browse = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the catalog.} - -\item{include_browse}{Whether to include catalogs in the response for which the principal can only access selective metadata for.} -} -\description{ -Gets the specified catalog in a metastore. The caller must be a metastore -admin, the owner of the catalog, or a user that has the \strong{USE_CATALOG} -privilege set for their account. -} diff --git a/man/catalogsList.Rd b/man/catalogsList.Rd deleted file mode 100644 index 511053d6..00000000 --- a/man/catalogsList.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/catalogs.R -\name{catalogsList} -\alias{catalogsList} -\title{List catalogs.} -\usage{ -catalogsList(client, include_browse = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{include_browse}{Whether to include catalogs in the response for which the principal can only access selective metadata for.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of catalogs in the metastore. If the caller is the metastore -admin, all catalogs will be retrieved. Otherwise, only catalogs owned by the -caller (or for which the caller has the \strong{USE_CATALOG} privilege) will be -retrieved. There is no guarantee of a specific ordering of the elements in -the array. -} diff --git a/man/catalogsUpdate.Rd b/man/catalogsUpdate.Rd deleted file mode 100644 index f4ebd583..00000000 --- a/man/catalogsUpdate.Rd +++ /dev/null @@ -1,39 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/catalogs.R -\name{catalogsUpdate} -\alias{catalogsUpdate} -\title{Update a catalog.} -\usage{ -catalogsUpdate( - client, - name, - comment = NULL, - enable_predictive_optimization = NULL, - isolation_mode = NULL, - new_name = NULL, - owner = NULL, - properties = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the catalog.} - -\item{comment}{User-provided free-form text description.} - -\item{enable_predictive_optimization}{Whether predictive optimization should be enabled for this object and objects under it.} - -\item{isolation_mode}{Whether the current securable is accessible from all workspaces or a specific set of workspaces.} - -\item{new_name}{New name for the catalog.} - -\item{owner}{Username of current owner of catalog.} - -\item{properties}{A map of key-value properties attached to the securable.} -} -\description{ -Updates the catalog that matches the supplied name. The caller must be either -the owner of the catalog, or a metastore admin (when changing the owner field -of the catalog). -} diff --git a/man/change_cluster_owner.Rd b/man/change_cluster_owner.Rd new file mode 100644 index 00000000..8dba52f6 --- /dev/null +++ b/man/change_cluster_owner.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{change_cluster_owner} +\alias{change_cluster_owner} +\alias{clustersChangeOwner} +\title{Change cluster owner.} +\usage{ +change_cluster_owner(client, cluster_id, owner_username) + +clustersChangeOwner(client, cluster_id, owner_username) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. \if{html}{\out{}}.} + +\item{owner_username}{Required. New owner of the cluster_id after this RPC.} +} +\description{ +Change the owner of the cluster. You must be an admin and the cluster must be +terminated to perform this operation. The service principal application ID +can be supplied as an argument to \code{owner_username}. +} diff --git a/man/cleanRoomsCreate.Rd b/man/cleanRoomsCreate.Rd deleted file mode 100644 index 9ce937e8..00000000 --- a/man/cleanRoomsCreate.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clean_rooms.R -\name{cleanRoomsCreate} -\alias{cleanRoomsCreate} -\title{Create a clean room.} -\usage{ -cleanRoomsCreate(client, name, remote_detailed_info, comment = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the clean room.} - -\item{remote_detailed_info}{Required. Central clean room details.} - -\item{comment}{User-provided free-form text description.} -} -\description{ -Creates a new clean room with specified colaborators. The caller must be a -metastore admin or have the \strong{CREATE_CLEAN_ROOM} privilege on the metastore. -} diff --git a/man/cleanRoomsDelete.Rd b/man/cleanRoomsDelete.Rd deleted file mode 100644 index 2a347899..00000000 --- a/man/cleanRoomsDelete.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clean_rooms.R -\name{cleanRoomsDelete} -\alias{cleanRoomsDelete} -\title{Delete a clean room.} -\usage{ -cleanRoomsDelete(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the clean room.} -} -\description{ -Deletes a data object clean room from the metastore. The caller must be an -owner of the clean room. -} diff --git a/man/cleanRoomsGet.Rd b/man/cleanRoomsGet.Rd deleted file mode 100644 index c589a56d..00000000 --- a/man/cleanRoomsGet.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clean_rooms.R -\name{cleanRoomsGet} -\alias{cleanRoomsGet} -\title{Get a clean room.} -\usage{ -cleanRoomsGet(client, name, include_remote_details = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the clean room.} - -\item{include_remote_details}{Whether to include remote details (central) on the clean room.} -} -\description{ -Gets a data object clean room from the metastore. The caller must be a -metastore admin or the owner of the clean room. -} diff --git a/man/cleanRoomsList.Rd b/man/cleanRoomsList.Rd deleted file mode 100644 index 06157b06..00000000 --- a/man/cleanRoomsList.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clean_rooms.R -\name{cleanRoomsList} -\alias{cleanRoomsList} -\title{List clean rooms.} -\usage{ -cleanRoomsList(client, max_results = NULL, page_token = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{max_results}{Maximum number of clean rooms to return.} - -\item{page_token}{Opaque pagination token to go to next page based on previous query.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of data object clean rooms from the metastore. The caller must -be a metastore admin or the owner of the clean room. There is no guarantee of -a specific ordering of the elements in the array. -} diff --git a/man/cleanRoomsUpdate.Rd b/man/cleanRoomsUpdate.Rd deleted file mode 100644 index c0dae441..00000000 --- a/man/cleanRoomsUpdate.Rd +++ /dev/null @@ -1,43 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clean_rooms.R -\name{cleanRoomsUpdate} -\alias{cleanRoomsUpdate} -\title{Update a clean room.} -\usage{ -cleanRoomsUpdate( - client, - name, - catalog_updates = NULL, - comment = NULL, - owner = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the clean room.} - -\item{catalog_updates}{Array of shared data object updates.} - -\item{comment}{User-provided free-form text description.} - -\item{owner}{Username of current owner of clean room.} -} -\description{ -Updates the clean room with the changes and data objects in the request. The -caller must be the owner of the clean room or a metastore admin. -} -\details{ -When the caller is a metastore admin, only the \strong{owner} field can be -updated. - -In the case that the clean room name is changed \strong{updateCleanRoom} requires -that the caller is both the clean room owner and a metastore admin. - -For each table that is added through this method, the clean room owner must -also have \strong{SELECT} privilege on the table. The privilege must be maintained -indefinitely for recipients to be able to access the table. Typically, you -should use a group as the clean room owner. - -Table removals through \strong{update} do not require additional privileges. -} diff --git a/man/close_dbfs.Rd b/man/close_dbfs.Rd new file mode 100644 index 00000000..8a66465b --- /dev/null +++ b/man/close_dbfs.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbfs.R +\name{close_dbfs} +\alias{close_dbfs} +\alias{dbfsClose} +\title{Close the stream.} +\usage{ +close_dbfs(client, handle) + +dbfsClose(client, handle) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{handle}{Required. The handle on an open stream.} +} +\description{ +Closes the stream specified by the input handle. If the handle does not +exist, this call throws an exception with \code{RESOURCE_DOES_NOT_EXIST}. +} diff --git a/man/clusterPoliciesCreate.Rd b/man/clusterPoliciesCreate.Rd deleted file mode 100644 index 84b2ca28..00000000 --- a/man/clusterPoliciesCreate.Rd +++ /dev/null @@ -1,37 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/cluster_policies.R -\name{clusterPoliciesCreate} -\alias{clusterPoliciesCreate} -\title{Create a new policy.} -\usage{ -clusterPoliciesCreate( - client, - name, - definition = NULL, - description = NULL, - libraries = NULL, - max_clusters_per_user = NULL, - policy_family_definition_overrides = NULL, - policy_family_id = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Cluster Policy name requested by the user.} - -\item{definition}{Policy definition document expressed in \href{https://docs.databricks.com/administration-guide/clusters/policy-definition.html}{Databricks Cluster Policy Definition Language}.} - -\item{description}{Additional human-readable description of the cluster policy.} - -\item{libraries}{A list of libraries to be installed on the next cluster restart that uses this policy.} - -\item{max_clusters_per_user}{Max number of clusters per user that can be active using this policy.} - -\item{policy_family_definition_overrides}{Policy definition JSON document expressed in \href{https://docs.databricks.com/administration-guide/clusters/policy-definition.html}{Databricks Policy Definition Language}.} - -\item{policy_family_id}{ID of the policy family.} -} -\description{ -Creates a new policy with prescribed settings. -} diff --git a/man/clusterPoliciesDelete.Rd b/man/clusterPoliciesDelete.Rd deleted file mode 100644 index e221143c..00000000 --- a/man/clusterPoliciesDelete.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/cluster_policies.R -\name{clusterPoliciesDelete} -\alias{clusterPoliciesDelete} -\title{Delete a cluster policy.} -\usage{ -clusterPoliciesDelete(client, policy_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{policy_id}{Required. The ID of the policy to delete.} -} -\description{ -Delete a policy for a cluster. Clusters governed by this policy can still -run, but cannot be edited. -} diff --git a/man/clusterPoliciesEdit.Rd b/man/clusterPoliciesEdit.Rd deleted file mode 100644 index 145a8963..00000000 --- a/man/clusterPoliciesEdit.Rd +++ /dev/null @@ -1,41 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/cluster_policies.R -\name{clusterPoliciesEdit} -\alias{clusterPoliciesEdit} -\title{Update a cluster policy.} -\usage{ -clusterPoliciesEdit( - client, - policy_id, - name, - definition = NULL, - description = NULL, - libraries = NULL, - max_clusters_per_user = NULL, - policy_family_definition_overrides = NULL, - policy_family_id = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{policy_id}{Required. The ID of the policy to update.} - -\item{name}{Required. Cluster Policy name requested by the user.} - -\item{definition}{Policy definition document expressed in \href{https://docs.databricks.com/administration-guide/clusters/policy-definition.html}{Databricks Cluster Policy Definition Language}.} - -\item{description}{Additional human-readable description of the cluster policy.} - -\item{libraries}{A list of libraries to be installed on the next cluster restart that uses this policy.} - -\item{max_clusters_per_user}{Max number of clusters per user that can be active using this policy.} - -\item{policy_family_definition_overrides}{Policy definition JSON document expressed in \href{https://docs.databricks.com/administration-guide/clusters/policy-definition.html}{Databricks Policy Definition Language}.} - -\item{policy_family_id}{ID of the policy family.} -} -\description{ -Update an existing policy for cluster. This operation may make some clusters -governed by the previous policy invalid. -} diff --git a/man/clusterPoliciesGet.Rd b/man/clusterPoliciesGet.Rd deleted file mode 100644 index 9f68d75f..00000000 --- a/man/clusterPoliciesGet.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/cluster_policies.R -\name{clusterPoliciesGet} -\alias{clusterPoliciesGet} -\title{Get a cluster policy.} -\usage{ -clusterPoliciesGet(client, policy_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{policy_id}{Required. Canonical unique identifier for the cluster policy.} -} -\description{ -Get a cluster policy entity. Creation and editing is available to admins -only. -} diff --git a/man/clusterPoliciesGetPermissionLevels.Rd b/man/clusterPoliciesGetPermissionLevels.Rd deleted file mode 100644 index 3afaba26..00000000 --- a/man/clusterPoliciesGetPermissionLevels.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/cluster_policies.R -\name{clusterPoliciesGetPermissionLevels} -\alias{clusterPoliciesGetPermissionLevels} -\title{Get cluster policy permission levels.} -\usage{ -clusterPoliciesGetPermissionLevels(client, cluster_policy_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_policy_id}{Required. The cluster policy for which to get or manage permissions.} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/clusterPoliciesGetPermissions.Rd b/man/clusterPoliciesGetPermissions.Rd deleted file mode 100644 index 2be07a19..00000000 --- a/man/clusterPoliciesGetPermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/cluster_policies.R -\name{clusterPoliciesGetPermissions} -\alias{clusterPoliciesGetPermissions} -\title{Get cluster policy permissions.} -\usage{ -clusterPoliciesGetPermissions(client, cluster_policy_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_policy_id}{Required. The cluster policy for which to get or manage permissions.} -} -\description{ -Gets the permissions of a cluster policy. Cluster policies can inherit -permissions from their root object. -} diff --git a/man/clusterPoliciesList.Rd b/man/clusterPoliciesList.Rd deleted file mode 100644 index cdb681ea..00000000 --- a/man/clusterPoliciesList.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/cluster_policies.R -\name{clusterPoliciesList} -\alias{clusterPoliciesList} -\title{List cluster policies.} -\usage{ -clusterPoliciesList(client, sort_column = NULL, sort_order = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{sort_column}{The cluster policy attribute to sort by.} - -\item{sort_order}{The order in which the policies get listed.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Returns a list of policies accessible by the requesting user. -} diff --git a/man/clusterPoliciesSetPermissions.Rd b/man/clusterPoliciesSetPermissions.Rd deleted file mode 100644 index e73aff44..00000000 --- a/man/clusterPoliciesSetPermissions.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/cluster_policies.R -\name{clusterPoliciesSetPermissions} -\alias{clusterPoliciesSetPermissions} -\title{Set cluster policy permissions.} -\usage{ -clusterPoliciesSetPermissions( - client, - cluster_policy_id, - access_control_list = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_policy_id}{Required. The cluster policy for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on a cluster policy. Cluster policies can inherit -permissions from their root object. -} diff --git a/man/clusterPoliciesUpdatePermissions.Rd b/man/clusterPoliciesUpdatePermissions.Rd deleted file mode 100644 index bdcf4cca..00000000 --- a/man/clusterPoliciesUpdatePermissions.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/cluster_policies.R -\name{clusterPoliciesUpdatePermissions} -\alias{clusterPoliciesUpdatePermissions} -\title{Update cluster policy permissions.} -\usage{ -clusterPoliciesUpdatePermissions( - client, - cluster_policy_id, - access_control_list = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_policy_id}{Required. The cluster policy for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on a cluster policy. Cluster policies can inherit -permissions from their root object. -} diff --git a/man/cluster_library_status.Rd b/man/cluster_library_status.Rd new file mode 100644 index 00000000..9e9cbb6f --- /dev/null +++ b/man/cluster_library_status.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/libraries.R +\name{cluster_library_status} +\alias{cluster_library_status} +\alias{librariesClusterStatus} +\title{Get status.} +\usage{ +cluster_library_status(client, cluster_id) + +librariesClusterStatus(client, cluster_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. Unique identifier of the cluster whose status should be retrieved.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Get the status of libraries on a cluster. A status will be available for all +libraries installed on this cluster via the API or the libraries UI as well +as libraries set to be installed on all clusters via the libraries UI. The +order of returned libraries will be as follows. +} +\details{ +\enumerate{ +\item Libraries set to be installed on this cluster will be returned first. +Within this group, the final order will be order in which the libraries were +added to the cluster. +\item Libraries set to be installed on all clusters are returned next. Within +this group there is no order guarantee. +\item Libraries that were previously requested on this cluster or on all +clusters, but now marked for removal. Within this group there is no order +guarantee. +} +} diff --git a/man/clustersChangeOwner.Rd b/man/clustersChangeOwner.Rd deleted file mode 100644 index b92b3d18..00000000 --- a/man/clustersChangeOwner.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersChangeOwner} -\alias{clustersChangeOwner} -\title{Change cluster owner.} -\usage{ -clustersChangeOwner(client, cluster_id, owner_username) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. \if{html}{\out{}}.} - -\item{owner_username}{Required. New owner of the cluster_id after this RPC.} -} -\description{ -Change the owner of the cluster. You must be an admin and the cluster must be -terminated to perform this operation. The service principal application ID -can be supplied as an argument to \code{owner_username}. -} diff --git a/man/clustersCreate.Rd b/man/clustersCreate.Rd deleted file mode 100644 index 1d4a4297..00000000 --- a/man/clustersCreate.Rd +++ /dev/null @@ -1,108 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersCreate} -\alias{clustersCreate} -\title{Create new cluster.} -\usage{ -clustersCreate( - client, - spark_version, - apply_policy_default_values = NULL, - autoscale = NULL, - autotermination_minutes = NULL, - aws_attributes = NULL, - azure_attributes = NULL, - cluster_log_conf = NULL, - cluster_name = NULL, - cluster_source = NULL, - custom_tags = NULL, - data_security_mode = NULL, - docker_image = NULL, - driver_instance_pool_id = NULL, - driver_node_type_id = NULL, - enable_elastic_disk = NULL, - enable_local_disk_encryption = NULL, - gcp_attributes = NULL, - init_scripts = NULL, - instance_pool_id = NULL, - node_type_id = NULL, - num_workers = NULL, - policy_id = NULL, - runtime_engine = NULL, - single_user_name = NULL, - spark_conf = NULL, - spark_env_vars = NULL, - ssh_public_keys = NULL, - workload_type = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{spark_version}{Required. The Spark version of the cluster, e.g.} - -\item{apply_policy_default_values}{This field has no description yet.} - -\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} - -\item{autotermination_minutes}{Automatically terminates the cluster after it is inactive for this time in minutes.} - -\item{aws_attributes}{Attributes related to clusters running on Amazon Web Services.} - -\item{azure_attributes}{Attributes related to clusters running on Microsoft Azure.} - -\item{cluster_log_conf}{The configuration for delivering spark logs to a long-term storage destination.} - -\item{cluster_name}{Cluster name requested by the user.} - -\item{cluster_source}{Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.} - -\item{custom_tags}{Additional tags for cluster resources.} - -\item{data_security_mode}{Data security mode decides what data governance model to use when accessing data from a cluster.} - -\item{docker_image}{This field has no description yet.} - -\item{driver_instance_pool_id}{The optional ID of the instance pool for the driver of the cluster belongs.} - -\item{driver_node_type_id}{The node type of the Spark driver.} - -\item{enable_elastic_disk}{Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space.} - -\item{enable_local_disk_encryption}{Whether to enable LUKS on cluster VMs' local disks.} - -\item{gcp_attributes}{Attributes related to clusters running on Google Cloud Platform.} - -\item{init_scripts}{The configuration for storing init scripts.} - -\item{instance_pool_id}{The optional ID of the instance pool to which the cluster belongs.} - -\item{node_type_id}{This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} - -\item{num_workers}{Number of worker nodes that this cluster should have.} - -\item{policy_id}{The ID of the cluster policy used to create the cluster if applicable.} - -\item{runtime_engine}{Decides which runtime engine to be use, e.g.} - -\item{single_user_name}{Single user name if data_security_mode is \code{SINGLE_USER}.} - -\item{spark_conf}{An object containing a set of optional, user-specified Spark configuration key-value pairs.} - -\item{spark_env_vars}{An object containing a set of optional, user-specified environment variable key-value pairs.} - -\item{ssh_public_keys}{SSH public key contents that will be added to each Spark node in this cluster.} - -\item{workload_type}{This field has no description yet.} -} -\description{ -Creates a new Spark cluster. This method will acquire new instances from the -cloud provider if necessary. Note: Databricks may not be able to acquire some -of the requested nodes, due to cloud provider limitations (account limits, -spot price, etc.) or transient network issues. -} -\details{ -If Databricks acquires at least 85\% of the requested on-demand nodes, cluster -creation will succeed. Otherwise the cluster will terminate with an -informative error message. -} diff --git a/man/clustersCreateAndWait.Rd b/man/clustersCreateAndWait.Rd deleted file mode 100644 index 5f18438d..00000000 --- a/man/clustersCreateAndWait.Rd +++ /dev/null @@ -1,119 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersCreateAndWait} -\alias{clustersCreateAndWait} -\title{Create new cluster.} -\usage{ -clustersCreateAndWait( - client, - spark_version, - apply_policy_default_values = NULL, - autoscale = NULL, - autotermination_minutes = NULL, - aws_attributes = NULL, - azure_attributes = NULL, - cluster_log_conf = NULL, - cluster_name = NULL, - cluster_source = NULL, - custom_tags = NULL, - data_security_mode = NULL, - docker_image = NULL, - driver_instance_pool_id = NULL, - driver_node_type_id = NULL, - enable_elastic_disk = NULL, - enable_local_disk_encryption = NULL, - gcp_attributes = NULL, - init_scripts = NULL, - instance_pool_id = NULL, - node_type_id = NULL, - num_workers = NULL, - policy_id = NULL, - runtime_engine = NULL, - single_user_name = NULL, - spark_conf = NULL, - spark_env_vars = NULL, - ssh_public_keys = NULL, - workload_type = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{spark_version}{Required. The Spark version of the cluster, e.g.} - -\item{apply_policy_default_values}{This field has no description yet.} - -\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} - -\item{autotermination_minutes}{Automatically terminates the cluster after it is inactive for this time in minutes.} - -\item{aws_attributes}{Attributes related to clusters running on Amazon Web Services.} - -\item{azure_attributes}{Attributes related to clusters running on Microsoft Azure.} - -\item{cluster_log_conf}{The configuration for delivering spark logs to a long-term storage destination.} - -\item{cluster_name}{Cluster name requested by the user.} - -\item{cluster_source}{Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.} - -\item{custom_tags}{Additional tags for cluster resources.} - -\item{data_security_mode}{Data security mode decides what data governance model to use when accessing data from a cluster.} - -\item{docker_image}{This field has no description yet.} - -\item{driver_instance_pool_id}{The optional ID of the instance pool for the driver of the cluster belongs.} - -\item{driver_node_type_id}{The node type of the Spark driver.} - -\item{enable_elastic_disk}{Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space.} - -\item{enable_local_disk_encryption}{Whether to enable LUKS on cluster VMs' local disks.} - -\item{gcp_attributes}{Attributes related to clusters running on Google Cloud Platform.} - -\item{init_scripts}{The configuration for storing init scripts.} - -\item{instance_pool_id}{The optional ID of the instance pool to which the cluster belongs.} - -\item{node_type_id}{This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} - -\item{num_workers}{Number of worker nodes that this cluster should have.} - -\item{policy_id}{The ID of the cluster policy used to create the cluster if applicable.} - -\item{runtime_engine}{Decides which runtime engine to be use, e.g.} - -\item{single_user_name}{Single user name if data_security_mode is \code{SINGLE_USER}.} - -\item{spark_conf}{An object containing a set of optional, user-specified Spark configuration key-value pairs.} - -\item{spark_env_vars}{An object containing a set of optional, user-specified environment variable key-value pairs.} - -\item{ssh_public_keys}{SSH public key contents that will be added to each Spark node in this cluster.} - -\item{workload_type}{This field has no description yet.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Clusters on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Clusters is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Creates a new Spark cluster. This method will acquire new instances from the -cloud provider if necessary. Note: Databricks may not be able to acquire some -of the requested nodes, due to cloud provider limitations (account limits, -spot price, etc.) or transient network issues. - -If Databricks acquires at least 85\% of the requested on-demand nodes, cluster -creation will succeed. Otherwise the cluster will terminate with an -informative error message. -} diff --git a/man/clustersDelete.Rd b/man/clustersDelete.Rd deleted file mode 100644 index 06b90ac1..00000000 --- a/man/clustersDelete.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersDelete} -\alias{clustersDelete} -\title{Terminate cluster.} -\usage{ -clustersDelete(client, cluster_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster to be terminated.} -} -\description{ -Terminates the Spark cluster with the specified ID. The cluster is removed -asynchronously. Once the termination has completed, the cluster will be in a -\code{TERMINATED} state. If the cluster is already in a \code{TERMINATING} or -\code{TERMINATED} state, nothing will happen. -} diff --git a/man/clustersDeleteAndWait.Rd b/man/clustersDeleteAndWait.Rd deleted file mode 100644 index 2156eed5..00000000 --- a/man/clustersDeleteAndWait.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersDeleteAndWait} -\alias{clustersDeleteAndWait} -\title{Terminate cluster.} -\usage{ -clustersDeleteAndWait( - client, - cluster_id, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster to be terminated.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Clusters on Databricks reach -TERMINATED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Clusters is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Terminates the Spark cluster with the specified ID. The cluster is removed -asynchronously. Once the termination has completed, the cluster will be in a -\code{TERMINATED} state. If the cluster is already in a \code{TERMINATING} or -\code{TERMINATED} state, nothing will happen. -} diff --git a/man/clustersEdit.Rd b/man/clustersEdit.Rd deleted file mode 100644 index 1b212a9d..00000000 --- a/man/clustersEdit.Rd +++ /dev/null @@ -1,115 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersEdit} -\alias{clustersEdit} -\title{Update cluster configuration.} -\usage{ -clustersEdit( - client, - cluster_id, - spark_version, - apply_policy_default_values = NULL, - autoscale = NULL, - autotermination_minutes = NULL, - aws_attributes = NULL, - azure_attributes = NULL, - cluster_log_conf = NULL, - cluster_name = NULL, - cluster_source = NULL, - custom_tags = NULL, - data_security_mode = NULL, - docker_image = NULL, - driver_instance_pool_id = NULL, - driver_node_type_id = NULL, - enable_elastic_disk = NULL, - enable_local_disk_encryption = NULL, - gcp_attributes = NULL, - init_scripts = NULL, - instance_pool_id = NULL, - node_type_id = NULL, - num_workers = NULL, - policy_id = NULL, - runtime_engine = NULL, - single_user_name = NULL, - spark_conf = NULL, - spark_env_vars = NULL, - ssh_public_keys = NULL, - workload_type = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. ID of the cluser.} - -\item{spark_version}{Required. The Spark version of the cluster, e.g.} - -\item{apply_policy_default_values}{This field has no description yet.} - -\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} - -\item{autotermination_minutes}{Automatically terminates the cluster after it is inactive for this time in minutes.} - -\item{aws_attributes}{Attributes related to clusters running on Amazon Web Services.} - -\item{azure_attributes}{Attributes related to clusters running on Microsoft Azure.} - -\item{cluster_log_conf}{The configuration for delivering spark logs to a long-term storage destination.} - -\item{cluster_name}{Cluster name requested by the user.} - -\item{cluster_source}{Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.} - -\item{custom_tags}{Additional tags for cluster resources.} - -\item{data_security_mode}{Data security mode decides what data governance model to use when accessing data from a cluster.} - -\item{docker_image}{This field has no description yet.} - -\item{driver_instance_pool_id}{The optional ID of the instance pool for the driver of the cluster belongs.} - -\item{driver_node_type_id}{The node type of the Spark driver.} - -\item{enable_elastic_disk}{Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space.} - -\item{enable_local_disk_encryption}{Whether to enable LUKS on cluster VMs' local disks.} - -\item{gcp_attributes}{Attributes related to clusters running on Google Cloud Platform.} - -\item{init_scripts}{The configuration for storing init scripts.} - -\item{instance_pool_id}{The optional ID of the instance pool to which the cluster belongs.} - -\item{node_type_id}{This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} - -\item{num_workers}{Number of worker nodes that this cluster should have.} - -\item{policy_id}{The ID of the cluster policy used to create the cluster if applicable.} - -\item{runtime_engine}{Decides which runtime engine to be use, e.g.} - -\item{single_user_name}{Single user name if data_security_mode is \code{SINGLE_USER}.} - -\item{spark_conf}{An object containing a set of optional, user-specified Spark configuration key-value pairs.} - -\item{spark_env_vars}{An object containing a set of optional, user-specified environment variable key-value pairs.} - -\item{ssh_public_keys}{SSH public key contents that will be added to each Spark node in this cluster.} - -\item{workload_type}{This field has no description yet.} -} -\description{ -Updates the configuration of a cluster to match the provided attributes and -size. A cluster can be updated if it is in a \code{RUNNING} or \code{TERMINATED} state. -} -\details{ -If a cluster is updated while in a \code{RUNNING} state, it will be restarted so -that the new attributes can take effect. - -If a cluster is updated while in a \code{TERMINATED} state, it will remain -\code{TERMINATED}. The next time it is started using the \code{clusters/start} API, the -new attributes will take effect. Any attempt to update a cluster in any other -state will be rejected with an \code{INVALID_STATE} error code. - -Clusters created by the Databricks Jobs service cannot be edited. -} diff --git a/man/clustersEditAndWait.Rd b/man/clustersEditAndWait.Rd deleted file mode 100644 index b25a0ef3..00000000 --- a/man/clustersEditAndWait.Rd +++ /dev/null @@ -1,126 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersEditAndWait} -\alias{clustersEditAndWait} -\title{Update cluster configuration.} -\usage{ -clustersEditAndWait( - client, - cluster_id, - spark_version, - apply_policy_default_values = NULL, - autoscale = NULL, - autotermination_minutes = NULL, - aws_attributes = NULL, - azure_attributes = NULL, - cluster_log_conf = NULL, - cluster_name = NULL, - cluster_source = NULL, - custom_tags = NULL, - data_security_mode = NULL, - docker_image = NULL, - driver_instance_pool_id = NULL, - driver_node_type_id = NULL, - enable_elastic_disk = NULL, - enable_local_disk_encryption = NULL, - gcp_attributes = NULL, - init_scripts = NULL, - instance_pool_id = NULL, - node_type_id = NULL, - num_workers = NULL, - policy_id = NULL, - runtime_engine = NULL, - single_user_name = NULL, - spark_conf = NULL, - spark_env_vars = NULL, - ssh_public_keys = NULL, - workload_type = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. ID of the cluser.} - -\item{spark_version}{Required. The Spark version of the cluster, e.g.} - -\item{apply_policy_default_values}{This field has no description yet.} - -\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} - -\item{autotermination_minutes}{Automatically terminates the cluster after it is inactive for this time in minutes.} - -\item{aws_attributes}{Attributes related to clusters running on Amazon Web Services.} - -\item{azure_attributes}{Attributes related to clusters running on Microsoft Azure.} - -\item{cluster_log_conf}{The configuration for delivering spark logs to a long-term storage destination.} - -\item{cluster_name}{Cluster name requested by the user.} - -\item{cluster_source}{Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.} - -\item{custom_tags}{Additional tags for cluster resources.} - -\item{data_security_mode}{Data security mode decides what data governance model to use when accessing data from a cluster.} - -\item{docker_image}{This field has no description yet.} - -\item{driver_instance_pool_id}{The optional ID of the instance pool for the driver of the cluster belongs.} - -\item{driver_node_type_id}{The node type of the Spark driver.} - -\item{enable_elastic_disk}{Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space.} - -\item{enable_local_disk_encryption}{Whether to enable LUKS on cluster VMs' local disks.} - -\item{gcp_attributes}{Attributes related to clusters running on Google Cloud Platform.} - -\item{init_scripts}{The configuration for storing init scripts.} - -\item{instance_pool_id}{The optional ID of the instance pool to which the cluster belongs.} - -\item{node_type_id}{This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} - -\item{num_workers}{Number of worker nodes that this cluster should have.} - -\item{policy_id}{The ID of the cluster policy used to create the cluster if applicable.} - -\item{runtime_engine}{Decides which runtime engine to be use, e.g.} - -\item{single_user_name}{Single user name if data_security_mode is \code{SINGLE_USER}.} - -\item{spark_conf}{An object containing a set of optional, user-specified Spark configuration key-value pairs.} - -\item{spark_env_vars}{An object containing a set of optional, user-specified environment variable key-value pairs.} - -\item{ssh_public_keys}{SSH public key contents that will be added to each Spark node in this cluster.} - -\item{workload_type}{This field has no description yet.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Clusters on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Clusters is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Updates the configuration of a cluster to match the provided attributes and -size. A cluster can be updated if it is in a \code{RUNNING} or \code{TERMINATED} state. - -If a cluster is updated while in a \code{RUNNING} state, it will be restarted so -that the new attributes can take effect. - -If a cluster is updated while in a \code{TERMINATED} state, it will remain -\code{TERMINATED}. The next time it is started using the \code{clusters/start} API, the -new attributes will take effect. Any attempt to update a cluster in any other -state will be rejected with an \code{INVALID_STATE} error code. - -Clusters created by the Databricks Jobs service cannot be edited. -} diff --git a/man/clustersEvents.Rd b/man/clustersEvents.Rd deleted file mode 100644 index c0da57ae..00000000 --- a/man/clustersEvents.Rd +++ /dev/null @@ -1,42 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersEvents} -\alias{clustersEvents} -\title{List cluster activity events.} -\usage{ -clustersEvents( - client, - cluster_id, - end_time = NULL, - event_types = NULL, - limit = NULL, - offset = NULL, - order = NULL, - start_time = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The ID of the cluster to retrieve events about.} - -\item{end_time}{The end time in epoch milliseconds.} - -\item{event_types}{An optional set of event types to filter on.} - -\item{limit}{The maximum number of events to include in a page of events.} - -\item{offset}{The offset in the result set.} - -\item{order}{The order to list events in; either 'ASC' or 'DESC'.} - -\item{start_time}{The start time in epoch milliseconds.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Retrieves a list of events about the activity of a cluster. This API is -paginated. If there are more events to read, the response includes all the -nparameters necessary to request the next page of events. -} diff --git a/man/clustersGet.Rd b/man/clustersGet.Rd deleted file mode 100644 index 9334b8fa..00000000 --- a/man/clustersGet.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersGet} -\alias{clustersGet} -\title{Get cluster info.} -\usage{ -clustersGet(client, cluster_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster about which to retrieve information.} -} -\description{ -Retrieves the information for a cluster given its identifier. Clusters can be -described while they are running, or up to 60 days after they are terminated. -} diff --git a/man/clustersGetPermissionLevels.Rd b/man/clustersGetPermissionLevels.Rd deleted file mode 100644 index bd2efc22..00000000 --- a/man/clustersGetPermissionLevels.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersGetPermissionLevels} -\alias{clustersGetPermissionLevels} -\title{Get cluster permission levels.} -\usage{ -clustersGetPermissionLevels(client, cluster_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster for which to get or manage permissions.} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/clustersGetPermissions.Rd b/man/clustersGetPermissions.Rd deleted file mode 100644 index 58d202f8..00000000 --- a/man/clustersGetPermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersGetPermissions} -\alias{clustersGetPermissions} -\title{Get cluster permissions.} -\usage{ -clustersGetPermissions(client, cluster_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster for which to get or manage permissions.} -} -\description{ -Gets the permissions of a cluster. Clusters can inherit permissions from -their root object. -} diff --git a/man/clustersList.Rd b/man/clustersList.Rd deleted file mode 100644 index c75c35fc..00000000 --- a/man/clustersList.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersList} -\alias{clustersList} -\title{List all clusters.} -\usage{ -clustersList(client, can_use_client = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{can_use_client}{Filter clusters based on what type of client it can be used for.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Return information about all pinned clusters, active clusters, up to 200 of -the most recently terminated all-purpose clusters in the past 30 days, and up -to 30 of the most recently terminated job clusters in the past 30 days. -} -\details{ -For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated -all-purpose clusters in the past 30 days, and 50 terminated job clusters in -the past 30 days, then this API returns the 1 pinned cluster, 4 active -clusters, all 45 terminated all-purpose clusters, and the 30 most recently -terminated job clusters. -} diff --git a/man/clustersListNodeTypes.Rd b/man/clustersListNodeTypes.Rd deleted file mode 100644 index d6a0a883..00000000 --- a/man/clustersListNodeTypes.Rd +++ /dev/null @@ -1,15 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersListNodeTypes} -\alias{clustersListNodeTypes} -\title{List node types.} -\usage{ -clustersListNodeTypes(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Returns a list of supported Spark node types. These node types can be used to -launch a cluster. -} diff --git a/man/clustersListZones.Rd b/man/clustersListZones.Rd deleted file mode 100644 index ef1fde04..00000000 --- a/man/clustersListZones.Rd +++ /dev/null @@ -1,15 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersListZones} -\alias{clustersListZones} -\title{List availability zones.} -\usage{ -clustersListZones(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Returns a list of availability zones where clusters can be created in (For -example, us-west-2a). These zones can be used to launch a cluster. -} diff --git a/man/clustersPermanentDelete.Rd b/man/clustersPermanentDelete.Rd deleted file mode 100644 index 07a64ffb..00000000 --- a/man/clustersPermanentDelete.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersPermanentDelete} -\alias{clustersPermanentDelete} -\title{Permanently delete cluster.} -\usage{ -clustersPermanentDelete(client, cluster_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster to be deleted.} -} -\description{ -Permanently deletes a Spark cluster. This cluster is terminated and resources -are asynchronously removed. -} -\details{ -In addition, users will no longer see permanently deleted clusters in the -cluster list, and API users can no longer perform any action on permanently -deleted clusters. -} diff --git a/man/clustersPin.Rd b/man/clustersPin.Rd deleted file mode 100644 index 5ea11d3e..00000000 --- a/man/clustersPin.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersPin} -\alias{clustersPin} -\title{Pin cluster.} -\usage{ -clustersPin(client, cluster_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. \if{html}{\out{}}.} -} -\description{ -Pinning a cluster ensures that the cluster will always be returned by the -ListClusters API. Pinning a cluster that is already pinned will have no -effect. This API can only be called by workspace admins. -} diff --git a/man/clustersResize.Rd b/man/clustersResize.Rd deleted file mode 100644 index 156cb57c..00000000 --- a/man/clustersResize.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersResize} -\alias{clustersResize} -\title{Resize cluster.} -\usage{ -clustersResize(client, cluster_id, autoscale = NULL, num_workers = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster to be resized.} - -\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} - -\item{num_workers}{Number of worker nodes that this cluster should have.} -} -\description{ -Resizes a cluster to have a desired number of workers. This will fail unless -the cluster is in a \code{RUNNING} state. -} diff --git a/man/clustersResizeAndWait.Rd b/man/clustersResizeAndWait.Rd deleted file mode 100644 index 5d4781eb..00000000 --- a/man/clustersResizeAndWait.Rd +++ /dev/null @@ -1,38 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersResizeAndWait} -\alias{clustersResizeAndWait} -\title{Resize cluster.} -\usage{ -clustersResizeAndWait( - client, - cluster_id, - autoscale = NULL, - num_workers = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster to be resized.} - -\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} - -\item{num_workers}{Number of worker nodes that this cluster should have.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Clusters on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Clusters is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Resizes a cluster to have a desired number of workers. This will fail unless -the cluster is in a \code{RUNNING} state. -} diff --git a/man/clustersRestart.Rd b/man/clustersRestart.Rd deleted file mode 100644 index 83ed3707..00000000 --- a/man/clustersRestart.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersRestart} -\alias{clustersRestart} -\title{Restart cluster.} -\usage{ -clustersRestart(client, cluster_id, restart_user = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster to be started.} - -\item{restart_user}{\if{html}{\out{}}.} -} -\description{ -Restarts a Spark cluster with the supplied ID. If the cluster is not -currently in a \code{RUNNING} state, nothing will happen. -} diff --git a/man/clustersRestartAndWait.Rd b/man/clustersRestartAndWait.Rd deleted file mode 100644 index b2962000..00000000 --- a/man/clustersRestartAndWait.Rd +++ /dev/null @@ -1,35 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersRestartAndWait} -\alias{clustersRestartAndWait} -\title{Restart cluster.} -\usage{ -clustersRestartAndWait( - client, - cluster_id, - restart_user = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster to be started.} - -\item{restart_user}{\if{html}{\out{}}.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Clusters on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Clusters is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Restarts a Spark cluster with the supplied ID. If the cluster is not -currently in a \code{RUNNING} state, nothing will happen. -} diff --git a/man/clustersSetPermissions.Rd b/man/clustersSetPermissions.Rd deleted file mode 100644 index 4f6f75f5..00000000 --- a/man/clustersSetPermissions.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersSetPermissions} -\alias{clustersSetPermissions} -\title{Set cluster permissions.} -\usage{ -clustersSetPermissions(client, cluster_id, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on a cluster. Clusters can inherit permissions from their -root object. -} diff --git a/man/clustersSparkVersions.Rd b/man/clustersSparkVersions.Rd deleted file mode 100644 index 495da3ca..00000000 --- a/man/clustersSparkVersions.Rd +++ /dev/null @@ -1,15 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersSparkVersions} -\alias{clustersSparkVersions} -\title{List available Spark versions.} -\usage{ -clustersSparkVersions(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Returns the list of available Spark versions. These versions can be used to -launch a cluster. -} diff --git a/man/clustersStart.Rd b/man/clustersStart.Rd deleted file mode 100644 index e6617bb0..00000000 --- a/man/clustersStart.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersStart} -\alias{clustersStart} -\title{Start terminated cluster.} -\usage{ -clustersStart(client, cluster_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster to be started.} -} -\description{ -Starts a terminated Spark cluster with the supplied ID. This works similar to -\code{createCluster} except: -} -\details{ -\itemize{ -\item The previous cluster id and attributes are preserved. * The cluster starts -with the last specified cluster size. * If the previous cluster was an -autoscaling cluster, the current cluster starts with the minimum number of -nodes. * If the cluster is not currently in a \code{TERMINATED} state, nothing -will happen. * Clusters launched to run a job cannot be started. -} -} diff --git a/man/clustersStartAndWait.Rd b/man/clustersStartAndWait.Rd deleted file mode 100644 index 3d0abf48..00000000 --- a/man/clustersStartAndWait.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersStartAndWait} -\alias{clustersStartAndWait} -\title{Start terminated cluster.} -\usage{ -clustersStartAndWait(client, cluster_id, timeout = 20, callback = cli_reporter) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster to be started.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Clusters on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Clusters is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Starts a terminated Spark cluster with the supplied ID. This works similar to -\code{createCluster} except: -\itemize{ -\item The previous cluster id and attributes are preserved. * The cluster starts -with the last specified cluster size. * If the previous cluster was an -autoscaling cluster, the current cluster starts with the minimum number of -nodes. * If the cluster is not currently in a \code{TERMINATED} state, nothing -will happen. * Clusters launched to run a job cannot be started. -} -} diff --git a/man/clustersUnpin.Rd b/man/clustersUnpin.Rd deleted file mode 100644 index 22598835..00000000 --- a/man/clustersUnpin.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersUnpin} -\alias{clustersUnpin} -\title{Unpin cluster.} -\usage{ -clustersUnpin(client, cluster_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. \if{html}{\out{}}.} -} -\description{ -Unpinning a cluster will allow the cluster to eventually be removed from the -ListClusters API. Unpinning a cluster that is not pinned will have no effect. -This API can only be called by workspace admins. -} diff --git a/man/clustersUpdatePermissions.Rd b/man/clustersUpdatePermissions.Rd deleted file mode 100644 index f2dcc403..00000000 --- a/man/clustersUpdatePermissions.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/clusters.R -\name{clustersUpdatePermissions} -\alias{clustersUpdatePermissions} -\title{Update cluster permissions.} -\usage{ -clustersUpdatePermissions(client, cluster_id, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. The cluster for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on a cluster. Clusters can inherit permissions from -their root object. -} diff --git a/man/commandExecutionCancel.Rd b/man/commandExecutionCancel.Rd deleted file mode 100644 index 7d4c9391..00000000 --- a/man/commandExecutionCancel.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/command_execution.R -\name{commandExecutionCancel} -\alias{commandExecutionCancel} -\title{Cancel a command.} -\usage{ -commandExecutionCancel( - client, - cluster_id = NULL, - command_id = NULL, - context_id = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{This field has no description yet.} - -\item{command_id}{This field has no description yet.} - -\item{context_id}{This field has no description yet.} -} -\description{ -Cancels a currently running command within an execution context. -} -\details{ -The command ID is obtained from a prior successful call to \strong{execute}. -} diff --git a/man/commandExecutionCancelAndWait.Rd b/man/commandExecutionCancelAndWait.Rd deleted file mode 100644 index f5ea2904..00000000 --- a/man/commandExecutionCancelAndWait.Rd +++ /dev/null @@ -1,39 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/command_execution.R -\name{commandExecutionCancelAndWait} -\alias{commandExecutionCancelAndWait} -\title{Cancel a command.} -\usage{ -commandExecutionCancelAndWait( - client, - cluster_id = NULL, - command_id = NULL, - context_id = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{This field has no description yet.} - -\item{command_id}{This field has no description yet.} - -\item{context_id}{This field has no description yet.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Command Execution on Databricks reach -Cancelled state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Command Execution is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Cancels a currently running command within an execution context. - -The command ID is obtained from a prior successful call to \strong{execute}. -} diff --git a/man/commandExecutionCommandStatus.Rd b/man/commandExecutionCommandStatus.Rd deleted file mode 100644 index dce67ac8..00000000 --- a/man/commandExecutionCommandStatus.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/command_execution.R -\name{commandExecutionCommandStatus} -\alias{commandExecutionCommandStatus} -\title{Get command info.} -\usage{ -commandExecutionCommandStatus(client, cluster_id, context_id, command_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. This field has no description yet.} - -\item{context_id}{Required. This field has no description yet.} - -\item{command_id}{Required. This field has no description yet.} -} -\description{ -Gets the status of and, if available, the results from a currently executing -command. -} -\details{ -The command ID is obtained from a prior successful call to \strong{execute}. -} diff --git a/man/commandExecutionContextStatus.Rd b/man/commandExecutionContextStatus.Rd deleted file mode 100644 index f1fa685c..00000000 --- a/man/commandExecutionContextStatus.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/command_execution.R -\name{commandExecutionContextStatus} -\alias{commandExecutionContextStatus} -\title{Get status.} -\usage{ -commandExecutionContextStatus(client, cluster_id, context_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. This field has no description yet.} - -\item{context_id}{Required. This field has no description yet.} -} -\description{ -Gets the status for an execution context. -} diff --git a/man/commandExecutionCreate.Rd b/man/commandExecutionCreate.Rd deleted file mode 100644 index 9fd7e516..00000000 --- a/man/commandExecutionCreate.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/command_execution.R -\name{commandExecutionCreate} -\alias{commandExecutionCreate} -\title{Create an execution context.} -\usage{ -commandExecutionCreate(client, cluster_id = NULL, language = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Running cluster id.} - -\item{language}{This field has no description yet.} -} -\description{ -Creates an execution context for running cluster commands. -} -\details{ -If successful, this method returns the ID of the new execution context. -} diff --git a/man/commandExecutionCreateAndWait.Rd b/man/commandExecutionCreateAndWait.Rd deleted file mode 100644 index e09f15fb..00000000 --- a/man/commandExecutionCreateAndWait.Rd +++ /dev/null @@ -1,36 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/command_execution.R -\name{commandExecutionCreateAndWait} -\alias{commandExecutionCreateAndWait} -\title{Create an execution context.} -\usage{ -commandExecutionCreateAndWait( - client, - cluster_id = NULL, - language = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Running cluster id.} - -\item{language}{This field has no description yet.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Command Execution on Databricks reach -Running state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Command Execution is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Creates an execution context for running cluster commands. - -If successful, this method returns the ID of the new execution context. -} diff --git a/man/commandExecutionDestroy.Rd b/man/commandExecutionDestroy.Rd deleted file mode 100644 index 2eb3e069..00000000 --- a/man/commandExecutionDestroy.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/command_execution.R -\name{commandExecutionDestroy} -\alias{commandExecutionDestroy} -\title{Delete an execution context.} -\usage{ -commandExecutionDestroy(client, cluster_id, context_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. This field has no description yet.} - -\item{context_id}{Required. This field has no description yet.} -} -\description{ -Deletes an execution context. -} diff --git a/man/commandExecutionExecute.Rd b/man/commandExecutionExecute.Rd deleted file mode 100644 index e994438b..00000000 --- a/man/commandExecutionExecute.Rd +++ /dev/null @@ -1,33 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/command_execution.R -\name{commandExecutionExecute} -\alias{commandExecutionExecute} -\title{Run a command.} -\usage{ -commandExecutionExecute( - client, - cluster_id = NULL, - command = NULL, - context_id = NULL, - language = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Running cluster id.} - -\item{command}{Executable code.} - -\item{context_id}{Running context id.} - -\item{language}{This field has no description yet.} -} -\description{ -Runs a cluster command in the given execution context, using the provided -language. -} -\details{ -If successful, it returns an ID for tracking the status of the command's -execution. -} diff --git a/man/commandExecutionExecuteAndWait.Rd b/man/commandExecutionExecuteAndWait.Rd deleted file mode 100644 index 0ce9e64a..00000000 --- a/man/commandExecutionExecuteAndWait.Rd +++ /dev/null @@ -1,44 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/command_execution.R -\name{commandExecutionExecuteAndWait} -\alias{commandExecutionExecuteAndWait} -\title{Run a command.} -\usage{ -commandExecutionExecuteAndWait( - client, - cluster_id = NULL, - command = NULL, - context_id = NULL, - language = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Running cluster id.} - -\item{command}{Executable code.} - -\item{context_id}{Running context id.} - -\item{language}{This field has no description yet.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Command Execution on Databricks reach -Finished or Error state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Command Execution is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Runs a cluster command in the given execution context, using the provided -language. - -If successful, it returns an ID for tracking the status of the command's -execution. -} diff --git a/man/command_execution_status.Rd b/man/command_execution_status.Rd new file mode 100644 index 00000000..1671dcf8 --- /dev/null +++ b/man/command_execution_status.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/command_execution.R +\name{command_execution_status} +\alias{command_execution_status} +\alias{commandExecutionCommandStatus} +\title{Get command info.} +\usage{ +command_execution_status(client, cluster_id, context_id, command_id) + +commandExecutionCommandStatus(client, cluster_id, context_id, command_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. This field has no description yet.} + +\item{context_id}{Required. This field has no description yet.} + +\item{command_id}{Required. This field has no description yet.} +} +\description{ +Gets the status of and, if available, the results from a currently executing +command. +} +\details{ +The command ID is obtained from a prior successful call to \strong{execute}. +} diff --git a/man/connectionsCreate.Rd b/man/connectionsCreate.Rd deleted file mode 100644 index 71e898de..00000000 --- a/man/connectionsCreate.Rd +++ /dev/null @@ -1,39 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/connections.R -\name{connectionsCreate} -\alias{connectionsCreate} -\title{Create a connection.} -\usage{ -connectionsCreate( - client, - name, - connection_type, - options, - comment = NULL, - properties = NULL, - read_only = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the connection.} - -\item{connection_type}{Required. The type of connection.} - -\item{options}{Required. A map of key-value properties attached to the securable.} - -\item{comment}{User-provided free-form text description.} - -\item{properties}{An object containing map of key-value properties attached to the connection.} - -\item{read_only}{If the connection is read only.} -} -\description{ -Creates a new connection -} -\details{ -Creates a new connection to an external data source. It allows users to -specify connection details and configurations for interaction with the -external server. -} diff --git a/man/connectionsDelete.Rd b/man/connectionsDelete.Rd deleted file mode 100644 index c8d5f6d7..00000000 --- a/man/connectionsDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/connections.R -\name{connectionsDelete} -\alias{connectionsDelete} -\title{Delete a connection.} -\usage{ -connectionsDelete(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the connection to be deleted.} -} -\description{ -Deletes the connection that matches the supplied name. -} diff --git a/man/connectionsGet.Rd b/man/connectionsGet.Rd deleted file mode 100644 index 5f3d4ee7..00000000 --- a/man/connectionsGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/connections.R -\name{connectionsGet} -\alias{connectionsGet} -\title{Get a connection.} -\usage{ -connectionsGet(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the connection.} -} -\description{ -Gets a connection from it's name. -} diff --git a/man/connectionsList.Rd b/man/connectionsList.Rd deleted file mode 100644 index fc0c2597..00000000 --- a/man/connectionsList.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/connections.R -\name{connectionsList} -\alias{connectionsList} -\title{List connections.} -\usage{ -connectionsList(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -List all connections. -} diff --git a/man/connectionsUpdate.Rd b/man/connectionsUpdate.Rd deleted file mode 100644 index 817439e8..00000000 --- a/man/connectionsUpdate.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/connections.R -\name{connectionsUpdate} -\alias{connectionsUpdate} -\title{Update a connection.} -\usage{ -connectionsUpdate(client, name, options, new_name = NULL, owner = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the connection.} - -\item{options}{Required. A map of key-value properties attached to the securable.} - -\item{new_name}{New name for the connection.} - -\item{owner}{Username of current owner of the connection.} -} -\description{ -Updates the connection that matches the supplied name. -} diff --git a/man/context_command_execution_status.Rd b/man/context_command_execution_status.Rd new file mode 100644 index 00000000..3270f116 --- /dev/null +++ b/man/context_command_execution_status.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/command_execution.R +\name{context_command_execution_status} +\alias{context_command_execution_status} +\alias{commandExecutionContextStatus} +\title{Get status.} +\usage{ +context_command_execution_status(client, cluster_id, context_id) + +commandExecutionContextStatus(client, cluster_id, context_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. This field has no description yet.} + +\item{context_id}{Required. This field has no description yet.} +} +\description{ +Gets the status for an execution context. +} diff --git a/man/create_alert.Rd b/man/create_alert.Rd new file mode 100644 index 00000000..22206bc8 --- /dev/null +++ b/man/create_alert.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/alerts.R +\name{create_alert} +\alias{create_alert} +\alias{alertsCreate} +\title{Create an alert.} +\usage{ +create_alert(client, name, options, query_id, parent = NULL, rearm = NULL) + +alertsCreate(client, name, options, query_id, parent = NULL, rearm = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the alert.} + +\item{options}{Required. Alert configuration options.} + +\item{query_id}{Required. Query ID.} + +\item{parent}{The identifier of the workspace folder containing the object.} + +\item{rearm}{Number of seconds after being triggered before the alert rearms itself and can be triggered again.} +} +\description{ +Creates an alert. An alert is a Databricks SQL object that periodically runs +a query, evaluates a condition of its result, and notifies users or +notification destinations if the condition was met. +} diff --git a/man/create_app.Rd b/man/create_app.Rd new file mode 100644 index 00000000..f29dc430 --- /dev/null +++ b/man/create_app.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/apps.R +\name{create_app} +\alias{create_app} +\alias{appsCreate} +\title{Create and deploy an application.} +\usage{ +create_app(client, manifest, resources = NULL) + +appsCreate(client, manifest, resources = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{manifest}{Required. Manifest that specifies the application requirements.} + +\item{resources}{Information passed at app deployment time to fulfill app dependencies.} +} +\description{ +Creates and deploys an application. +} diff --git a/man/create_catalog.Rd b/man/create_catalog.Rd new file mode 100644 index 00000000..e59ad93b --- /dev/null +++ b/man/create_catalog.Rd @@ -0,0 +1,54 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/catalogs.R +\name{create_catalog} +\alias{create_catalog} +\alias{catalogsCreate} +\title{Create a catalog.} +\usage{ +create_catalog( + client, + name, + comment = NULL, + connection_name = NULL, + options = NULL, + properties = NULL, + provider_name = NULL, + share_name = NULL, + storage_root = NULL +) + +catalogsCreate( + client, + name, + comment = NULL, + connection_name = NULL, + options = NULL, + properties = NULL, + provider_name = NULL, + share_name = NULL, + storage_root = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of catalog.} + +\item{comment}{User-provided free-form text description.} + +\item{connection_name}{The name of the connection to an external data source.} + +\item{options}{A map of key-value properties attached to the securable.} + +\item{properties}{A map of key-value properties attached to the securable.} + +\item{provider_name}{The name of delta sharing provider.} + +\item{share_name}{The name of the share under the share provider.} + +\item{storage_root}{Storage root URL for managed tables within catalog.} +} +\description{ +Creates a new catalog instance in the parent metastore if the caller is a +metastore admin or has the \strong{CREATE_CATALOG} privilege. +} diff --git a/man/create_clean_room.Rd b/man/create_clean_room.Rd new file mode 100644 index 00000000..4f011d3d --- /dev/null +++ b/man/create_clean_room.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clean_rooms.R +\name{create_clean_room} +\alias{create_clean_room} +\alias{cleanRoomsCreate} +\title{Create a clean room.} +\usage{ +create_clean_room(client, name, remote_detailed_info, comment = NULL) + +cleanRoomsCreate(client, name, remote_detailed_info, comment = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the clean room.} + +\item{remote_detailed_info}{Required. Central clean room details.} + +\item{comment}{User-provided free-form text description.} +} +\description{ +Creates a new clean room with specified colaborators. The caller must be a +metastore admin or have the \strong{CREATE_CLEAN_ROOM} privilege on the metastore. +} diff --git a/man/create_cluster.Rd b/man/create_cluster.Rd new file mode 100644 index 00000000..9d3329d5 --- /dev/null +++ b/man/create_cluster.Rd @@ -0,0 +1,141 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{create_cluster} +\alias{create_cluster} +\alias{clustersCreate} +\title{Create new cluster.} +\usage{ +create_cluster( + client, + spark_version, + apply_policy_default_values = NULL, + autoscale = NULL, + autotermination_minutes = NULL, + aws_attributes = NULL, + azure_attributes = NULL, + cluster_log_conf = NULL, + cluster_name = NULL, + cluster_source = NULL, + custom_tags = NULL, + data_security_mode = NULL, + docker_image = NULL, + driver_instance_pool_id = NULL, + driver_node_type_id = NULL, + enable_elastic_disk = NULL, + enable_local_disk_encryption = NULL, + gcp_attributes = NULL, + init_scripts = NULL, + instance_pool_id = NULL, + node_type_id = NULL, + num_workers = NULL, + policy_id = NULL, + runtime_engine = NULL, + single_user_name = NULL, + spark_conf = NULL, + spark_env_vars = NULL, + ssh_public_keys = NULL, + workload_type = NULL +) + +clustersCreate( + client, + spark_version, + apply_policy_default_values = NULL, + autoscale = NULL, + autotermination_minutes = NULL, + aws_attributes = NULL, + azure_attributes = NULL, + cluster_log_conf = NULL, + cluster_name = NULL, + cluster_source = NULL, + custom_tags = NULL, + data_security_mode = NULL, + docker_image = NULL, + driver_instance_pool_id = NULL, + driver_node_type_id = NULL, + enable_elastic_disk = NULL, + enable_local_disk_encryption = NULL, + gcp_attributes = NULL, + init_scripts = NULL, + instance_pool_id = NULL, + node_type_id = NULL, + num_workers = NULL, + policy_id = NULL, + runtime_engine = NULL, + single_user_name = NULL, + spark_conf = NULL, + spark_env_vars = NULL, + ssh_public_keys = NULL, + workload_type = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{spark_version}{Required. The Spark version of the cluster, e.g.} + +\item{apply_policy_default_values}{This field has no description yet.} + +\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} + +\item{autotermination_minutes}{Automatically terminates the cluster after it is inactive for this time in minutes.} + +\item{aws_attributes}{Attributes related to clusters running on Amazon Web Services.} + +\item{azure_attributes}{Attributes related to clusters running on Microsoft Azure.} + +\item{cluster_log_conf}{The configuration for delivering spark logs to a long-term storage destination.} + +\item{cluster_name}{Cluster name requested by the user.} + +\item{cluster_source}{Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.} + +\item{custom_tags}{Additional tags for cluster resources.} + +\item{data_security_mode}{Data security mode decides what data governance model to use when accessing data from a cluster.} + +\item{docker_image}{This field has no description yet.} + +\item{driver_instance_pool_id}{The optional ID of the instance pool for the driver of the cluster belongs.} + +\item{driver_node_type_id}{The node type of the Spark driver.} + +\item{enable_elastic_disk}{Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space.} + +\item{enable_local_disk_encryption}{Whether to enable LUKS on cluster VMs' local disks.} + +\item{gcp_attributes}{Attributes related to clusters running on Google Cloud Platform.} + +\item{init_scripts}{The configuration for storing init scripts.} + +\item{instance_pool_id}{The optional ID of the instance pool to which the cluster belongs.} + +\item{node_type_id}{This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} + +\item{num_workers}{Number of worker nodes that this cluster should have.} + +\item{policy_id}{The ID of the cluster policy used to create the cluster if applicable.} + +\item{runtime_engine}{Decides which runtime engine to be use, e.g.} + +\item{single_user_name}{Single user name if data_security_mode is \code{SINGLE_USER}.} + +\item{spark_conf}{An object containing a set of optional, user-specified Spark configuration key-value pairs.} + +\item{spark_env_vars}{An object containing a set of optional, user-specified environment variable key-value pairs.} + +\item{ssh_public_keys}{SSH public key contents that will be added to each Spark node in this cluster.} + +\item{workload_type}{This field has no description yet.} +} +\description{ +Creates a new Spark cluster. This method will acquire new instances from the +cloud provider if necessary. Note: Databricks may not be able to acquire some +of the requested nodes, due to cloud provider limitations (account limits, +spot price, etc.) or transient network issues. +} +\details{ +If Databricks acquires at least 85\% of the requested on-demand nodes, cluster +creation will succeed. Otherwise the cluster will terminate with an +informative error message. +} diff --git a/man/create_cluster_and_wait.Rd b/man/create_cluster_and_wait.Rd new file mode 100644 index 00000000..518c9d59 --- /dev/null +++ b/man/create_cluster_and_wait.Rd @@ -0,0 +1,119 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{create_cluster_and_wait} +\alias{create_cluster_and_wait} +\title{Create new cluster.} +\usage{ +create_cluster_and_wait( + client, + spark_version, + apply_policy_default_values = NULL, + autoscale = NULL, + autotermination_minutes = NULL, + aws_attributes = NULL, + azure_attributes = NULL, + cluster_log_conf = NULL, + cluster_name = NULL, + cluster_source = NULL, + custom_tags = NULL, + data_security_mode = NULL, + docker_image = NULL, + driver_instance_pool_id = NULL, + driver_node_type_id = NULL, + enable_elastic_disk = NULL, + enable_local_disk_encryption = NULL, + gcp_attributes = NULL, + init_scripts = NULL, + instance_pool_id = NULL, + node_type_id = NULL, + num_workers = NULL, + policy_id = NULL, + runtime_engine = NULL, + single_user_name = NULL, + spark_conf = NULL, + spark_env_vars = NULL, + ssh_public_keys = NULL, + workload_type = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{spark_version}{Required. The Spark version of the cluster, e.g.} + +\item{apply_policy_default_values}{This field has no description yet.} + +\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} + +\item{autotermination_minutes}{Automatically terminates the cluster after it is inactive for this time in minutes.} + +\item{aws_attributes}{Attributes related to clusters running on Amazon Web Services.} + +\item{azure_attributes}{Attributes related to clusters running on Microsoft Azure.} + +\item{cluster_log_conf}{The configuration for delivering spark logs to a long-term storage destination.} + +\item{cluster_name}{Cluster name requested by the user.} + +\item{cluster_source}{Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.} + +\item{custom_tags}{Additional tags for cluster resources.} + +\item{data_security_mode}{Data security mode decides what data governance model to use when accessing data from a cluster.} + +\item{docker_image}{This field has no description yet.} + +\item{driver_instance_pool_id}{The optional ID of the instance pool for the driver of the cluster belongs.} + +\item{driver_node_type_id}{The node type of the Spark driver.} + +\item{enable_elastic_disk}{Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space.} + +\item{enable_local_disk_encryption}{Whether to enable LUKS on cluster VMs' local disks.} + +\item{gcp_attributes}{Attributes related to clusters running on Google Cloud Platform.} + +\item{init_scripts}{The configuration for storing init scripts.} + +\item{instance_pool_id}{The optional ID of the instance pool to which the cluster belongs.} + +\item{node_type_id}{This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} + +\item{num_workers}{Number of worker nodes that this cluster should have.} + +\item{policy_id}{The ID of the cluster policy used to create the cluster if applicable.} + +\item{runtime_engine}{Decides which runtime engine to be use, e.g.} + +\item{single_user_name}{Single user name if data_security_mode is \code{SINGLE_USER}.} + +\item{spark_conf}{An object containing a set of optional, user-specified Spark configuration key-value pairs.} + +\item{spark_env_vars}{An object containing a set of optional, user-specified environment variable key-value pairs.} + +\item{ssh_public_keys}{SSH public key contents that will be added to each Spark node in this cluster.} + +\item{workload_type}{This field has no description yet.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Clusters on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Clusters is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Creates a new Spark cluster. This method will acquire new instances from the +cloud provider if necessary. Note: Databricks may not be able to acquire some +of the requested nodes, due to cloud provider limitations (account limits, +spot price, etc.) or transient network issues. + +If Databricks acquires at least 85\% of the requested on-demand nodes, cluster +creation will succeed. Otherwise the cluster will terminate with an +informative error message. +} diff --git a/man/create_cluster_policy.Rd b/man/create_cluster_policy.Rd new file mode 100644 index 00000000..1f4c16c4 --- /dev/null +++ b/man/create_cluster_policy.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/cluster_policies.R +\name{create_cluster_policy} +\alias{create_cluster_policy} +\alias{clusterPoliciesCreate} +\title{Create a new policy.} +\usage{ +create_cluster_policy( + client, + name, + definition = NULL, + description = NULL, + libraries = NULL, + max_clusters_per_user = NULL, + policy_family_definition_overrides = NULL, + policy_family_id = NULL +) + +clusterPoliciesCreate( + client, + name, + definition = NULL, + description = NULL, + libraries = NULL, + max_clusters_per_user = NULL, + policy_family_definition_overrides = NULL, + policy_family_id = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Cluster Policy name requested by the user.} + +\item{definition}{Policy definition document expressed in \href{https://docs.databricks.com/administration-guide/clusters/policy-definition.html}{Databricks Cluster Policy Definition Language}.} + +\item{description}{Additional human-readable description of the cluster policy.} + +\item{libraries}{A list of libraries to be installed on the next cluster restart that uses this policy.} + +\item{max_clusters_per_user}{Max number of clusters per user that can be active using this policy.} + +\item{policy_family_definition_overrides}{Policy definition JSON document expressed in \href{https://docs.databricks.com/administration-guide/clusters/policy-definition.html}{Databricks Policy Definition Language}.} + +\item{policy_family_id}{ID of the policy family.} +} +\description{ +Creates a new policy with prescribed settings. +} diff --git a/man/create_command_execution.Rd b/man/create_command_execution.Rd new file mode 100644 index 00000000..c2ab692b --- /dev/null +++ b/man/create_command_execution.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/command_execution.R +\name{create_command_execution} +\alias{create_command_execution} +\alias{commandExecutionCreate} +\title{Create an execution context.} +\usage{ +create_command_execution(client, cluster_id = NULL, language = NULL) + +commandExecutionCreate(client, cluster_id = NULL, language = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Running cluster id.} + +\item{language}{This field has no description yet.} +} +\description{ +Creates an execution context for running cluster commands. +} +\details{ +If successful, this method returns the ID of the new execution context. +} diff --git a/man/create_command_execution_and_wait.Rd b/man/create_command_execution_and_wait.Rd new file mode 100644 index 00000000..2cbe9898 --- /dev/null +++ b/man/create_command_execution_and_wait.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/command_execution.R +\name{create_command_execution_and_wait} +\alias{create_command_execution_and_wait} +\title{Create an execution context.} +\usage{ +create_command_execution_and_wait( + client, + cluster_id = NULL, + language = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Running cluster id.} + +\item{language}{This field has no description yet.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Command Execution on Databricks reach +Running state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Command Execution is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Creates an execution context for running cluster commands. + +If successful, this method returns the ID of the new execution context. +} diff --git a/man/create_connection.Rd b/man/create_connection.Rd new file mode 100644 index 00000000..e4051bd3 --- /dev/null +++ b/man/create_connection.Rd @@ -0,0 +1,50 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/connections.R +\name{create_connection} +\alias{create_connection} +\alias{connectionsCreate} +\title{Create a connection.} +\usage{ +create_connection( + client, + name, + connection_type, + options, + comment = NULL, + properties = NULL, + read_only = NULL +) + +connectionsCreate( + client, + name, + connection_type, + options, + comment = NULL, + properties = NULL, + read_only = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the connection.} + +\item{connection_type}{Required. The type of connection.} + +\item{options}{Required. A map of key-value properties attached to the securable.} + +\item{comment}{User-provided free-form text description.} + +\item{properties}{An object containing map of key-value properties attached to the connection.} + +\item{read_only}{If the connection is read only.} +} +\description{ +Creates a new connection +} +\details{ +Creates a new connection to an external data source. It allows users to +specify connection details and configurations for interaction with the +external server. +} diff --git a/man/create_dashboard.Rd b/man/create_dashboard.Rd new file mode 100644 index 00000000..3e1af1ab --- /dev/null +++ b/man/create_dashboard.Rd @@ -0,0 +1,45 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dashboards.R +\name{create_dashboard} +\alias{create_dashboard} +\alias{dashboardsCreate} +\title{Create a dashboard object.} +\usage{ +create_dashboard( + client, + name, + dashboard_filters_enabled = NULL, + is_favorite = NULL, + parent = NULL, + run_as_role = NULL, + tags = NULL +) + +dashboardsCreate( + client, + name, + dashboard_filters_enabled = NULL, + is_favorite = NULL, + parent = NULL, + run_as_role = NULL, + tags = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The title of this dashboard that appears in list views and at the top of the dashboard page.} + +\item{dashboard_filters_enabled}{Indicates whether the dashboard filters are enabled.} + +\item{is_favorite}{Indicates whether this dashboard object should appear in the current user's favorites list.} + +\item{parent}{The identifier of the workspace folder containing the object.} + +\item{run_as_role}{Sets the \strong{Run as} role for the object.} + +\item{tags}{This field has no description yet.} +} +\description{ +Create a dashboard object. +} diff --git a/man/create_dashboard_widget.Rd b/man/create_dashboard_widget.Rd new file mode 100644 index 00000000..5bd2af9e --- /dev/null +++ b/man/create_dashboard_widget.Rd @@ -0,0 +1,41 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dashboard_widgets.R +\name{create_dashboard_widget} +\alias{create_dashboard_widget} +\alias{dashboardWidgetsCreate} +\title{Add widget to a dashboard.} +\usage{ +create_dashboard_widget( + client, + dashboard_id, + options, + width, + text = NULL, + visualization_id = NULL +) + +dashboardWidgetsCreate( + client, + dashboard_id, + options, + width, + text = NULL, + visualization_id = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{dashboard_id}{Required. Dashboard ID returned by :method:dashboards/create.} + +\item{options}{Required. This field has no description yet.} + +\item{width}{Required. Width of a widget.} + +\item{text}{If this is a textbox widget, the application displays this text.} + +\item{visualization_id}{Query Vizualization ID returned by :method:queryvisualizations/create.} +} +\description{ +Add widget to a dashboard. +} diff --git a/man/create_dbfs.Rd b/man/create_dbfs.Rd new file mode 100644 index 00000000..b8eab0e8 --- /dev/null +++ b/man/create_dbfs.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbfs.R +\name{create_dbfs} +\alias{create_dbfs} +\alias{dbfsCreate} +\title{Open a stream.} +\usage{ +create_dbfs(client, path, overwrite = NULL) + +dbfsCreate(client, path, overwrite = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{path}{Required. The path of the new file.} + +\item{overwrite}{The flag that specifies whether to overwrite existing file/files.} +} +\description{ +Opens a stream to write to a file and returns a handle to this stream. There +is a 10 minute idle timeout on this handle. If a file or directory already +exists on the given path and \strong{overwrite} is set to false, this call will +throw an exception with \code{RESOURCE_ALREADY_EXISTS}. +} +\details{ +A typical workflow for file upload would be: +\enumerate{ +\item Issue a \code{create} call and get a handle. 2. Issue one or more +\code{add-block} calls with the handle you have. 3. Issue a \code{close} call with +the handle you have. +} +} diff --git a/man/create_experiment.Rd b/man/create_experiment.Rd new file mode 100644 index 00000000..c5a56cd2 --- /dev/null +++ b/man/create_experiment.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{create_experiment} +\alias{create_experiment} +\alias{experimentsCreateExperiment} +\title{Create experiment.} +\usage{ +create_experiment(client, name, artifact_location = NULL, tags = NULL) + +experimentsCreateExperiment( + client, + name, + artifact_location = NULL, + tags = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Experiment name.} + +\item{artifact_location}{Location where all artifacts for the experiment are stored.} + +\item{tags}{A collection of tags to set on the experiment.} +} +\description{ +Creates an experiment with a name. Returns the ID of the newly created +experiment. Validates that another experiment with the same name does not +already exist and fails if another experiment with the same name already +exists. +} +\details{ +Throws \code{RESOURCE_ALREADY_EXISTS} if a experiment with the given name exists. +} diff --git a/man/create_experiment_run.Rd b/man/create_experiment_run.Rd new file mode 100644 index 00000000..5a6dddc0 --- /dev/null +++ b/man/create_experiment_run.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{create_experiment_run} +\alias{create_experiment_run} +\alias{experimentsCreateRun} +\title{Create a run.} +\usage{ +create_experiment_run( + client, + experiment_id = NULL, + start_time = NULL, + tags = NULL, + user_id = NULL +) + +experimentsCreateRun( + client, + experiment_id = NULL, + start_time = NULL, + tags = NULL, + user_id = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{experiment_id}{ID of the associated experiment.} + +\item{start_time}{Unix timestamp in milliseconds of when the run started.} + +\item{tags}{Additional metadata for run.} + +\item{user_id}{ID of the user executing the run.} +} +\description{ +Creates a new run within an experiment. A run is usually a single execution +of a machine learning or data ETL pipeline. MLflow uses runs to track the +\code{mlflowParam}, \code{mlflowMetric} and \code{mlflowRunTag} associated with a single +execution. +} diff --git a/man/create_external_location.Rd b/man/create_external_location.Rd new file mode 100644 index 00000000..82d337a2 --- /dev/null +++ b/man/create_external_location.Rd @@ -0,0 +1,55 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/external_locations.R +\name{create_external_location} +\alias{create_external_location} +\alias{externalLocationsCreate} +\title{Create an external location.} +\usage{ +create_external_location( + client, + name, + url, + credential_name, + access_point = NULL, + comment = NULL, + encryption_details = NULL, + read_only = NULL, + skip_validation = NULL +) + +externalLocationsCreate( + client, + name, + url, + credential_name, + access_point = NULL, + comment = NULL, + encryption_details = NULL, + read_only = NULL, + skip_validation = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the external location.} + +\item{url}{Required. Path URL of the external location.} + +\item{credential_name}{Required. Name of the storage credential used with this location.} + +\item{access_point}{The AWS access point to use when accesing s3 for this external location.} + +\item{comment}{User-provided free-form text description.} + +\item{encryption_details}{Encryption options that apply to clients connecting to cloud storage.} + +\item{read_only}{Indicates whether the external location is read-only.} + +\item{skip_validation}{Skips validation of the storage credential associated with the external location.} +} +\description{ +Creates a new external location entry in the metastore. The caller must be a +metastore admin or have the \strong{CREATE_EXTERNAL_LOCATION} privilege on both +the metastore and the associated storage credential. +} diff --git a/man/create_file_directory.Rd b/man/create_file_directory.Rd new file mode 100644 index 00000000..ad180658 --- /dev/null +++ b/man/create_file_directory.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/files.R +\name{create_file_directory} +\alias{create_file_directory} +\alias{filesCreateDirectory} +\title{Create a directory.} +\usage{ +create_file_directory(client, directory_path) + +filesCreateDirectory(client, directory_path) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{directory_path}{Required. The absolute path of a directory.} +} +\description{ +Creates an empty directory. If necessary, also creates any parent directories +of the new, empty directory (like the shell command \code{mkdir -p}). If called on +an existing directory, returns a success response; this method is idempotent +(it will succeed if the directory already exists). +} diff --git a/man/create_function.Rd b/man/create_function.Rd new file mode 100644 index 00000000..d1f5a831 --- /dev/null +++ b/man/create_function.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/functions.R +\name{create_function} +\alias{create_function} +\alias{functionsCreate} +\title{Create a function.} +\usage{ +create_function(client, function_info) + +functionsCreate(client, function_info) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{function_info}{Required. Partial \strong{FunctionInfo} specifying the function to be created.} +} +\description{ +Creates a new function +} +\details{ +The user must have the following permissions in order for the function to be +created: - \strong{USE_CATALOG} on the function's parent catalog - \strong{USE_SCHEMA} +and \strong{CREATE_FUNCTION} on the function's parent schema +} diff --git a/man/create_git_credential.Rd b/man/create_git_credential.Rd new file mode 100644 index 00000000..8e55630f --- /dev/null +++ b/man/create_git_credential.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/git_credentials.R +\name{create_git_credential} +\alias{create_git_credential} +\alias{gitCredentialsCreate} +\title{Create a credential entry.} +\usage{ +create_git_credential( + client, + git_provider, + git_username = NULL, + personal_access_token = NULL +) + +gitCredentialsCreate( + client, + git_provider, + git_username = NULL, + personal_access_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{git_provider}{Required. Git provider.} + +\item{git_username}{Git username.} + +\item{personal_access_token}{The personal access token used to authenticate to the corresponding Git provider.} +} +\description{ +Creates a Git credential entry for the user. Only one Git credential per user +is supported, so any attempts to create credentials if an entry already +exists will fail. Use the PATCH endpoint to update existing credentials, or +the DELETE endpoint to delete existing credentials. +} diff --git a/man/create_global_init_script.Rd b/man/create_global_init_script.Rd new file mode 100644 index 00000000..b7fd4cad --- /dev/null +++ b/man/create_global_init_script.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/global_init_scripts.R +\name{create_global_init_script} +\alias{create_global_init_script} +\alias{globalInitScriptsCreate} +\title{Create init script.} +\usage{ +create_global_init_script( + client, + name, + script, + enabled = NULL, + position = NULL +) + +globalInitScriptsCreate(client, name, script, enabled = NULL, position = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the script.} + +\item{script}{Required. The Base64-encoded content of the script.} + +\item{enabled}{Specifies whether the script is enabled.} + +\item{position}{The position of a global init script, where 0 represents the first script to run, 1 is the second script to run, in ascending order.} +} +\description{ +Creates a new global init script in this workspace. +} diff --git a/man/create_group.Rd b/man/create_group.Rd new file mode 100644 index 00000000..c8bdbc89 --- /dev/null +++ b/man/create_group.Rd @@ -0,0 +1,58 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/groups.R +\name{create_group} +\alias{create_group} +\alias{groupsCreate} +\title{Create a new group.} +\usage{ +create_group( + client, + display_name = NULL, + entitlements = NULL, + external_id = NULL, + groups = NULL, + id = NULL, + members = NULL, + meta = NULL, + roles = NULL, + schemas = NULL +) + +groupsCreate( + client, + display_name = NULL, + entitlements = NULL, + external_id = NULL, + groups = NULL, + id = NULL, + members = NULL, + meta = NULL, + roles = NULL, + schemas = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{display_name}{String that represents a human-readable group name.} + +\item{entitlements}{Entitlements assigned to the group.} + +\item{external_id}{This field has no description yet.} + +\item{groups}{This field has no description yet.} + +\item{id}{Databricks group ID.} + +\item{members}{This field has no description yet.} + +\item{meta}{Container for the group identifier.} + +\item{roles}{Corresponds to AWS instance profile/arn role.} + +\item{schemas}{The schema of the group.} +} +\description{ +Creates a group in the Databricks workspace with a unique name, using the +supplied group details. +} diff --git a/man/create_instance_pool.Rd b/man/create_instance_pool.Rd new file mode 100644 index 00000000..c892aff8 --- /dev/null +++ b/man/create_instance_pool.Rd @@ -0,0 +1,73 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/instance_pools.R +\name{create_instance_pool} +\alias{create_instance_pool} +\alias{instancePoolsCreate} +\title{Create a new instance pool.} +\usage{ +create_instance_pool( + client, + instance_pool_name, + node_type_id, + aws_attributes = NULL, + azure_attributes = NULL, + custom_tags = NULL, + disk_spec = NULL, + enable_elastic_disk = NULL, + gcp_attributes = NULL, + idle_instance_autotermination_minutes = NULL, + max_capacity = NULL, + min_idle_instances = NULL, + preloaded_docker_images = NULL, + preloaded_spark_versions = NULL +) + +instancePoolsCreate( + client, + instance_pool_name, + node_type_id, + aws_attributes = NULL, + azure_attributes = NULL, + custom_tags = NULL, + disk_spec = NULL, + enable_elastic_disk = NULL, + gcp_attributes = NULL, + idle_instance_autotermination_minutes = NULL, + max_capacity = NULL, + min_idle_instances = NULL, + preloaded_docker_images = NULL, + preloaded_spark_versions = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{instance_pool_name}{Required. Pool name requested by the user.} + +\item{node_type_id}{Required. This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} + +\item{aws_attributes}{Attributes related to instance pools running on Amazon Web Services.} + +\item{azure_attributes}{Attributes related to instance pools running on Azure.} + +\item{custom_tags}{Additional tags for pool resources.} + +\item{disk_spec}{Defines the specification of the disks that will be attached to all spark containers.} + +\item{enable_elastic_disk}{Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.} + +\item{gcp_attributes}{Attributes related to instance pools running on Google Cloud Platform.} + +\item{idle_instance_autotermination_minutes}{Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.} + +\item{max_capacity}{Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.} + +\item{min_idle_instances}{Minimum number of idle instances to keep in the instance pool.} + +\item{preloaded_docker_images}{Custom Docker Image BYOC.} + +\item{preloaded_spark_versions}{A list containing at most one preloaded Spark image version for the pool.} +} +\description{ +Creates a new instance pool using idle and ready-to-use cloud instances. +} diff --git a/man/create_ip_access_list.Rd b/man/create_ip_access_list.Rd new file mode 100644 index 00000000..2f3627fb --- /dev/null +++ b/man/create_ip_access_list.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ip_access_lists.R +\name{create_ip_access_list} +\alias{create_ip_access_list} +\alias{ipAccessListsCreate} +\title{Create access list.} +\usage{ +create_ip_access_list(client, label, list_type, ip_addresses = NULL) + +ipAccessListsCreate(client, label, list_type, ip_addresses = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{label}{Required. Label for the IP access list.} + +\item{list_type}{Required. Type of IP access list.} + +\item{ip_addresses}{This field has no description yet.} +} +\description{ +Creates an IP access list for this workspace. +} +\details{ +A list can be an allow list or a block list. See the top of this file for a +description of how the server treats allow lists and block lists at runtime. + +When creating or updating an IP access list: +\itemize{ +\item For all allow lists and block lists combined, the API supports a maximum of +1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to +exceed that number return error 400 with \code{error_code} value \code{QUOTA_EXCEEDED}. +\item If the new list would block the calling user's current IP, error 400 is +returned with \code{error_code} value \code{INVALID_STATE}. +} + +It can take a few minutes for the changes to take effect. \strong{Note}: Your new +IP access list has no effect until you enable the feature. See +:method:workspaceconf/setStatus +} diff --git a/man/create_job.Rd b/man/create_job.Rd new file mode 100644 index 00000000..f7e01d56 --- /dev/null +++ b/man/create_job.Rd @@ -0,0 +1,113 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{create_job} +\alias{create_job} +\alias{jobsCreate} +\title{Create a new job.} +\usage{ +create_job( + client, + access_control_list = NULL, + compute = NULL, + continuous = NULL, + deployment = NULL, + description = NULL, + edit_mode = NULL, + email_notifications = NULL, + format = NULL, + git_source = NULL, + health = NULL, + job_clusters = NULL, + max_concurrent_runs = NULL, + name = NULL, + notification_settings = NULL, + parameters = NULL, + queue = NULL, + run_as = NULL, + schedule = NULL, + tags = NULL, + tasks = NULL, + timeout_seconds = NULL, + trigger = NULL, + webhook_notifications = NULL +) + +jobsCreate( + client, + access_control_list = NULL, + compute = NULL, + continuous = NULL, + deployment = NULL, + description = NULL, + edit_mode = NULL, + email_notifications = NULL, + format = NULL, + git_source = NULL, + health = NULL, + job_clusters = NULL, + max_concurrent_runs = NULL, + name = NULL, + notification_settings = NULL, + parameters = NULL, + queue = NULL, + run_as = NULL, + schedule = NULL, + tags = NULL, + tasks = NULL, + timeout_seconds = NULL, + trigger = NULL, + webhook_notifications = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{access_control_list}{List of permissions to set on the job.} + +\item{compute}{A list of compute requirements that can be referenced by tasks of this job.} + +\item{continuous}{An optional continuous property for this job.} + +\item{deployment}{Deployment information for jobs managed by external sources.} + +\item{description}{An optional description for the job.} + +\item{edit_mode}{Edit mode of the job.} + +\item{email_notifications}{An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.} + +\item{format}{Used to tell what is the format of the job.} + +\item{git_source}{An optional specification for a remote Git repository containing the source code used by tasks.} + +\item{health}{An optional set of health rules that can be defined for this job.} + +\item{job_clusters}{A list of job cluster specifications that can be shared and reused by tasks of this job.} + +\item{max_concurrent_runs}{An optional maximum allowed number of concurrent runs of the job.} + +\item{name}{An optional name for the job.} + +\item{notification_settings}{Optional notification settings that are used when sending notifications to each of the \code{email_notifications} and \code{webhook_notifications} for this job.} + +\item{parameters}{Job-level parameter definitions.} + +\item{queue}{The queue settings of the job.} + +\item{run_as}{Write-only setting, available only in Create/Update/Reset and Submit calls.} + +\item{schedule}{An optional periodic schedule for this job.} + +\item{tags}{A map of tags associated with the job.} + +\item{tasks}{A list of task specifications to be executed by this job.} + +\item{timeout_seconds}{An optional timeout applied to each run of this job.} + +\item{trigger}{A configuration to trigger a run when certain conditions are met.} + +\item{webhook_notifications}{A collection of system notification IDs to notify when runs of this job begin or complete.} +} +\description{ +Create a new job. +} diff --git a/man/create_lakehouse_monitor.Rd b/man/create_lakehouse_monitor.Rd new file mode 100644 index 00000000..55a401bf --- /dev/null +++ b/man/create_lakehouse_monitor.Rd @@ -0,0 +1,89 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakehouse_monitors.R +\name{create_lakehouse_monitor} +\alias{create_lakehouse_monitor} +\alias{lakehouseMonitorsCreate} +\title{Create a table monitor.} +\usage{ +create_lakehouse_monitor( + client, + full_name, + assets_dir, + output_schema_name, + baseline_table_name = NULL, + custom_metrics = NULL, + data_classification_config = NULL, + inference_log = NULL, + notifications = NULL, + schedule = NULL, + skip_builtin_dashboard = NULL, + slicing_exprs = NULL, + snapshot = NULL, + time_series = NULL, + warehouse_id = NULL +) + +lakehouseMonitorsCreate( + client, + full_name, + assets_dir, + output_schema_name, + baseline_table_name = NULL, + custom_metrics = NULL, + data_classification_config = NULL, + inference_log = NULL, + notifications = NULL, + schedule = NULL, + skip_builtin_dashboard = NULL, + slicing_exprs = NULL, + snapshot = NULL, + time_series = NULL, + warehouse_id = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the table.} + +\item{assets_dir}{Required. The directory to store monitoring assets (e.g.} + +\item{output_schema_name}{Required. Schema where output metric tables are created.} + +\item{baseline_table_name}{Name of the baseline table from which drift metrics are computed from.} + +\item{custom_metrics}{Custom metrics to compute on the monitored table.} + +\item{data_classification_config}{The data classification config for the monitor.} + +\item{inference_log}{Configuration for monitoring inference logs.} + +\item{notifications}{The notification settings for the monitor.} + +\item{schedule}{The schedule for automatically updating and refreshing metric tables.} + +\item{skip_builtin_dashboard}{Whether to skip creating a default dashboard summarizing data quality metrics.} + +\item{slicing_exprs}{List of column expressions to slice data with for targeted analysis.} + +\item{snapshot}{Configuration for monitoring snapshot tables.} + +\item{time_series}{Configuration for monitoring time series tables.} + +\item{warehouse_id}{Optional argument to specify the warehouse for dashboard creation.} +} +\description{ +Creates a new monitor for the specified table. +} +\details{ +The caller must either: 1. be an owner of the table's parent catalog, have +\strong{USE_SCHEMA} on the table's parent schema, and have \strong{SELECT} access on +the table 2. have \strong{USE_CATALOG} on the table's parent catalog, be an owner +of the table's parent schema, and have \strong{SELECT} access on the table. 3. +have the following permissions: - \strong{USE_CATALOG} on the table's parent +catalog - \strong{USE_SCHEMA} on the table's parent schema - be an owner of the +table. + +Workspace assets, such as the dashboard, will be created in the workspace +where this call was made. +} diff --git a/man/create_lakeview.Rd b/man/create_lakeview.Rd new file mode 100644 index 00000000..d39e2096 --- /dev/null +++ b/man/create_lakeview.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakeview.R +\name{create_lakeview} +\alias{create_lakeview} +\alias{lakeviewCreate} +\title{Create dashboard.} +\usage{ +create_lakeview( + client, + display_name, + parent_path = NULL, + serialized_dashboard = NULL, + warehouse_id = NULL +) + +lakeviewCreate( + client, + display_name, + parent_path = NULL, + serialized_dashboard = NULL, + warehouse_id = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{display_name}{Required. The display name of the dashboard.} + +\item{parent_path}{The workspace path of the folder containing the dashboard.} + +\item{serialized_dashboard}{The contents of the dashboard in serialized string form.} + +\item{warehouse_id}{The warehouse ID used to run the dashboard.} +} +\description{ +Create a draft dashboard. +} diff --git a/man/create_metastore.Rd b/man/create_metastore.Rd new file mode 100644 index 00000000..4e4ce2b9 --- /dev/null +++ b/man/create_metastore.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/metastores.R +\name{create_metastore} +\alias{create_metastore} +\alias{metastoresCreate} +\title{Create a metastore.} +\usage{ +create_metastore(client, name, region = NULL, storage_root = NULL) + +metastoresCreate(client, name, region = NULL, storage_root = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The user-specified name of the metastore.} + +\item{region}{Cloud region which the metastore serves (e.g., \code{us-west-2}, \code{westus}).} + +\item{storage_root}{The storage root URL for metastore.} +} +\description{ +Creates a new metastore based on a provided name and optional storage root +path. By default (if the \strong{owner} field is not set), the owner of the new +metastore is the user calling the \strong{createMetastore} API. If the \strong{owner} +field is set to the empty string (\strong{''}), the ownership is assigned to the +System User instead. +} diff --git a/man/create_model.Rd b/man/create_model.Rd new file mode 100644 index 00000000..2e85e47e --- /dev/null +++ b/man/create_model.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{create_model} +\alias{create_model} +\alias{modelRegistryCreateModel} +\title{Create a model.} +\usage{ +create_model(client, name, description = NULL, tags = NULL) + +modelRegistryCreateModel(client, name, description = NULL, tags = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Register models under this name.} + +\item{description}{Optional description for registered model.} + +\item{tags}{Additional metadata for registered model.} +} +\description{ +Creates a new registered model with the name specified in the request body. +} +\details{ +Throws \code{RESOURCE_ALREADY_EXISTS} if a registered model with the given name +exists. +} diff --git a/man/create_model_comment.Rd b/man/create_model_comment.Rd new file mode 100644 index 00000000..422ec839 --- /dev/null +++ b/man/create_model_comment.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{create_model_comment} +\alias{create_model_comment} +\alias{modelRegistryCreateComment} +\title{Post a comment.} +\usage{ +create_model_comment(client, name, version, comment) + +modelRegistryCreateComment(client, name, version, comment) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the model.} + +\item{version}{Required. Version of the model.} + +\item{comment}{Required. User-provided comment on the action.} +} +\description{ +Posts a comment on a model version. A comment can be submitted either by a +user or programmatically to display relevant information about the model. For +example, test results or deployment errors. +} diff --git a/man/create_model_transition_request.Rd b/man/create_model_transition_request.Rd new file mode 100644 index 00000000..c531e720 --- /dev/null +++ b/man/create_model_transition_request.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{create_model_transition_request} +\alias{create_model_transition_request} +\alias{modelRegistryCreateTransitionRequest} +\title{Make a transition request.} +\usage{ +create_model_transition_request(client, name, version, stage, comment = NULL) + +modelRegistryCreateTransitionRequest( + client, + name, + version, + stage, + comment = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the model.} + +\item{version}{Required. Version of the model.} + +\item{stage}{Required. Target stage of the transition.} + +\item{comment}{User-provided comment on the action.} +} +\description{ +Creates a model version stage transition request. +} diff --git a/man/create_model_version.Rd b/man/create_model_version.Rd new file mode 100644 index 00000000..a5d52982 --- /dev/null +++ b/man/create_model_version.Rd @@ -0,0 +1,45 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{create_model_version} +\alias{create_model_version} +\alias{modelRegistryCreateModelVersion} +\title{Create a model version.} +\usage{ +create_model_version( + client, + name, + source, + description = NULL, + run_id = NULL, + run_link = NULL, + tags = NULL +) + +modelRegistryCreateModelVersion( + client, + name, + source, + description = NULL, + run_id = NULL, + run_link = NULL, + tags = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Register model under this name.} + +\item{source}{Required. URI indicating the location of the model artifacts.} + +\item{description}{Optional description for model version.} + +\item{run_id}{MLflow run ID for correlation, if \code{source} was generated by an experiment run in MLflow tracking server.} + +\item{run_link}{MLflow run link - this is the exact link of the run that generated this model version, potentially hosted at another instance of MLflow.} + +\item{tags}{Additional metadata for model version.} +} +\description{ +Creates a model version. +} diff --git a/man/create_model_webhook.Rd b/man/create_model_webhook.Rd new file mode 100644 index 00000000..97f62ef3 --- /dev/null +++ b/man/create_model_webhook.Rd @@ -0,0 +1,48 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{create_model_webhook} +\alias{create_model_webhook} +\alias{modelRegistryCreateWebhook} +\title{Create a webhook.} +\usage{ +create_model_webhook( + client, + events, + description = NULL, + http_url_spec = NULL, + job_spec = NULL, + model_name = NULL, + status = NULL +) + +modelRegistryCreateWebhook( + client, + events, + description = NULL, + http_url_spec = NULL, + job_spec = NULL, + model_name = NULL, + status = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{events}{Required. Events that can trigger a registry webhook: * \code{MODEL_VERSION_CREATED}: A new model version was created for the associated model.} + +\item{description}{User-specified description for the webhook.} + +\item{http_url_spec}{This field has no description yet.} + +\item{job_spec}{This field has no description yet.} + +\item{model_name}{Name of the model whose events would trigger this webhook.} + +\item{status}{Enable or disable triggering the webhook, or put the webhook into test mode.} +} +\description{ +\strong{NOTE}: This endpoint is in Public Preview. +} +\details{ +Creates a registry webhook. +} diff --git a/man/create_obo_token.Rd b/man/create_obo_token.Rd new file mode 100644 index 00000000..7cdef2d1 --- /dev/null +++ b/man/create_obo_token.Rd @@ -0,0 +1,33 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token_management.R +\name{create_obo_token} +\alias{create_obo_token} +\alias{tokenManagementCreateOboToken} +\title{Create on-behalf token.} +\usage{ +create_obo_token( + client, + application_id, + comment = NULL, + lifetime_seconds = NULL +) + +tokenManagementCreateOboToken( + client, + application_id, + comment = NULL, + lifetime_seconds = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{application_id}{Required. Application ID of the service principal.} + +\item{comment}{Comment that describes the purpose of the token.} + +\item{lifetime_seconds}{The number of seconds before the token expires.} +} +\description{ +Creates a token on behalf of a service principal. +} diff --git a/man/create_online_table.Rd b/man/create_online_table.Rd new file mode 100644 index 00000000..750f7975 --- /dev/null +++ b/man/create_online_table.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/online_tables.R +\name{create_online_table} +\alias{create_online_table} +\alias{onlineTablesCreate} +\title{Create an Online Table.} +\usage{ +create_online_table(client, name = NULL, spec = NULL) + +onlineTablesCreate(client, name = NULL, spec = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Full three-part (catalog, schema, table) name of the table.} + +\item{spec}{Specification of the online table.} +} +\description{ +Create a new Online Table. +} diff --git a/man/create_pipeline.Rd b/man/create_pipeline.Rd new file mode 100644 index 00000000..669d8329 --- /dev/null +++ b/man/create_pipeline.Rd @@ -0,0 +1,98 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{create_pipeline} +\alias{create_pipeline} +\alias{pipelinesCreate} +\title{Create a pipeline.} +\usage{ +create_pipeline( + client, + allow_duplicate_names = NULL, + catalog = NULL, + channel = NULL, + clusters = NULL, + configuration = NULL, + continuous = NULL, + development = NULL, + dry_run = NULL, + edition = NULL, + filters = NULL, + id = NULL, + libraries = NULL, + name = NULL, + notifications = NULL, + photon = NULL, + serverless = NULL, + storage = NULL, + target = NULL, + trigger = NULL +) + +pipelinesCreate( + client, + allow_duplicate_names = NULL, + catalog = NULL, + channel = NULL, + clusters = NULL, + configuration = NULL, + continuous = NULL, + development = NULL, + dry_run = NULL, + edition = NULL, + filters = NULL, + id = NULL, + libraries = NULL, + name = NULL, + notifications = NULL, + photon = NULL, + serverless = NULL, + storage = NULL, + target = NULL, + trigger = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{allow_duplicate_names}{If false, deployment will fail if name conflicts with that of another pipeline.} + +\item{catalog}{A catalog in Unity Catalog to publish data from this pipeline to.} + +\item{channel}{DLT Release Channel that specifies which version to use.} + +\item{clusters}{Cluster settings for this pipeline deployment.} + +\item{configuration}{String-String configuration for this pipeline execution.} + +\item{continuous}{Whether the pipeline is continuous or triggered.} + +\item{development}{Whether the pipeline is in Development mode.} + +\item{dry_run}{This field has no description yet.} + +\item{edition}{Pipeline product edition.} + +\item{filters}{Filters on which Pipeline packages to include in the deployed graph.} + +\item{id}{Unique identifier for this pipeline.} + +\item{libraries}{Libraries or code needed by this deployment.} + +\item{name}{Friendly identifier for this pipeline.} + +\item{notifications}{List of notification settings for this pipeline.} + +\item{photon}{Whether Photon is enabled for this pipeline.} + +\item{serverless}{Whether serverless compute is enabled for this pipeline.} + +\item{storage}{DBFS root directory for storing checkpoints and tables.} + +\item{target}{Target schema (database) to add tables in this pipeline to.} + +\item{trigger}{Which pipeline trigger to use.} +} +\description{ +Creates a new data processing pipeline based on the requested configuration. +If successful, this method returns the ID of the new pipeline. +} diff --git a/man/create_provider.Rd b/man/create_provider.Rd new file mode 100644 index 00000000..5588d620 --- /dev/null +++ b/man/create_provider.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/providers.R +\name{create_provider} +\alias{create_provider} +\alias{providersCreate} +\title{Create an auth provider.} +\usage{ +create_provider( + client, + name, + authentication_type, + comment = NULL, + recipient_profile_str = NULL +) + +providersCreate( + client, + name, + authentication_type, + comment = NULL, + recipient_profile_str = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the Provider.} + +\item{authentication_type}{Required. The delta sharing authentication type.} + +\item{comment}{Description about the provider.} + +\item{recipient_profile_str}{This field is required when the \strong{authentication_type} is \strong{TOKEN} or not provided.} +} +\description{ +Creates a new authentication provider minimally based on a name and +authentication type. The caller must be an admin on the metastore. +} diff --git a/man/create_query.Rd b/man/create_query.Rd new file mode 100644 index 00000000..fce0e3e6 --- /dev/null +++ b/man/create_query.Rd @@ -0,0 +1,58 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/queries.R +\name{create_query} +\alias{create_query} +\alias{queriesCreate} +\title{Create a new query definition.} +\usage{ +create_query( + client, + data_source_id = NULL, + description = NULL, + name = NULL, + options = NULL, + parent = NULL, + query = NULL, + run_as_role = NULL +) + +queriesCreate( + client, + data_source_id = NULL, + description = NULL, + name = NULL, + options = NULL, + parent = NULL, + query = NULL, + run_as_role = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{data_source_id}{Data source ID maps to the ID of the data source used by the resource and is distinct from the warehouse ID.} + +\item{description}{General description that conveys additional information about this query such as usage notes.} + +\item{name}{The title of this query that appears in list views, widget headings, and on the query page.} + +\item{options}{Exclusively used for storing a list parameter definitions.} + +\item{parent}{The identifier of the workspace folder containing the object.} + +\item{query}{The text of the query to be run.} + +\item{run_as_role}{Sets the \strong{Run as} role for the object.} +} +\description{ +Creates a new query definition. Queries created with this endpoint belong to +the authenticated user making the request. +} +\details{ +The \code{data_source_id} field specifies the ID of the SQL warehouse to run this +query against. You can use the Data Sources API to see a complete list of +available SQL warehouses. Or you can copy the \code{data_source_id} from an +existing query. + +\strong{Note}: You cannot add a visualization until you create the query. +} diff --git a/man/create_query_visualization.Rd b/man/create_query_visualization.Rd new file mode 100644 index 00000000..f5d19682 --- /dev/null +++ b/man/create_query_visualization.Rd @@ -0,0 +1,41 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/query_visualizations.R +\name{create_query_visualization} +\alias{create_query_visualization} +\alias{queryVisualizationsCreate} +\title{Add visualization to a query.} +\usage{ +create_query_visualization( + client, + query_id, + type, + options, + description = NULL, + name = NULL +) + +queryVisualizationsCreate( + client, + query_id, + type, + options, + description = NULL, + name = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{query_id}{Required. The identifier returned by :method:queries/create.} + +\item{type}{Required. The type of visualization: chart, table, pivot table, and so on.} + +\item{options}{Required. The options object varies widely from one visualization type to the next and is unsupported.} + +\item{description}{A short description of this visualization.} + +\item{name}{The name of the visualization that appears on dashboards and the query screen.} +} +\description{ +Add visualization to a query. +} diff --git a/man/create_recipient.Rd b/man/create_recipient.Rd new file mode 100644 index 00000000..09b837fe --- /dev/null +++ b/man/create_recipient.Rd @@ -0,0 +1,55 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/recipients.R +\name{create_recipient} +\alias{create_recipient} +\alias{recipientsCreate} +\title{Create a share recipient.} +\usage{ +create_recipient( + client, + name, + authentication_type, + comment = NULL, + data_recipient_global_metastore_id = NULL, + ip_access_list = NULL, + owner = NULL, + properties_kvpairs = NULL, + sharing_code = NULL +) + +recipientsCreate( + client, + name, + authentication_type, + comment = NULL, + data_recipient_global_metastore_id = NULL, + ip_access_list = NULL, + owner = NULL, + properties_kvpairs = NULL, + sharing_code = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of Recipient.} + +\item{authentication_type}{Required. The delta sharing authentication type.} + +\item{comment}{Description about the recipient.} + +\item{data_recipient_global_metastore_id}{The global Unity Catalog metastore id provided by the data recipient.} + +\item{ip_access_list}{IP Access List.} + +\item{owner}{Username of the recipient owner.} + +\item{properties_kvpairs}{Recipient properties as map of string key-value pairs.} + +\item{sharing_code}{The one-time sharing code provided by the data recipient.} +} +\description{ +Creates a new recipient with the delta sharing authentication type in the +metastore. The caller must be a metastore admin or has the +\strong{CREATE_RECIPIENT} privilege on the metastore. +} diff --git a/man/create_registered_model.Rd b/man/create_registered_model.Rd new file mode 100644 index 00000000..55123e31 --- /dev/null +++ b/man/create_registered_model.Rd @@ -0,0 +1,53 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/registered_models.R +\name{create_registered_model} +\alias{create_registered_model} +\alias{registeredModelsCreate} +\title{Create a Registered Model.} +\usage{ +create_registered_model( + client, + catalog_name, + schema_name, + name, + comment = NULL, + storage_location = NULL +) + +registeredModelsCreate( + client, + catalog_name, + schema_name, + name, + comment = NULL, + storage_location = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{catalog_name}{Required. The name of the catalog where the schema and the registered model reside.} + +\item{schema_name}{Required. The name of the schema where the registered model resides.} + +\item{name}{Required. The name of the registered model.} + +\item{comment}{The comment attached to the registered model.} + +\item{storage_location}{The storage location on the cloud under which model version data files are stored.} +} +\description{ +Creates a new registered model in Unity Catalog. +} +\details{ +File storage for model versions in the registered model will be located in +the default location which is specified by the parent schema, or the parent +catalog, or the Metastore. + +For registered model creation to succeed, the user must satisfy the following +conditions: - The caller must be a metastore admin, or be the owner of the +parent catalog and schema, or have the \strong{USE_CATALOG} privilege on the +parent catalog and the \strong{USE_SCHEMA} privilege on the parent schema. - The +caller must have the \strong{CREATE MODEL} or \strong{CREATE FUNCTION} privilege on the +parent schema. +} diff --git a/man/create_repo.Rd b/man/create_repo.Rd new file mode 100644 index 00000000..0299e01c --- /dev/null +++ b/man/create_repo.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/repos.R +\name{create_repo} +\alias{create_repo} +\alias{reposCreate} +\title{Create a repo.} +\usage{ +create_repo(client, url, provider, path = NULL, sparse_checkout = NULL) + +reposCreate(client, url, provider, path = NULL, sparse_checkout = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{url}{Required. URL of the Git repository to be linked.} + +\item{provider}{Required. Git provider.} + +\item{path}{Desired path for the repo in the workspace.} + +\item{sparse_checkout}{If specified, the repo will be created with sparse checkout enabled.} +} +\description{ +Creates a repo in the workspace and links it to the remote Git repo +specified. Note that repos created programmatically must be linked to a +remote Git repo, unlike repos created in the browser. +} diff --git a/man/create_schema.Rd b/man/create_schema.Rd new file mode 100644 index 00000000..2d5d90c6 --- /dev/null +++ b/man/create_schema.Rd @@ -0,0 +1,43 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/schemas.R +\name{create_schema} +\alias{create_schema} +\alias{schemasCreate} +\title{Create a schema.} +\usage{ +create_schema( + client, + name, + catalog_name, + comment = NULL, + properties = NULL, + storage_root = NULL +) + +schemasCreate( + client, + name, + catalog_name, + comment = NULL, + properties = NULL, + storage_root = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of schema, relative to parent catalog.} + +\item{catalog_name}{Required. Name of parent catalog.} + +\item{comment}{User-provided free-form text description.} + +\item{properties}{A map of key-value properties attached to the securable.} + +\item{storage_root}{Storage root URL for managed tables within schema.} +} +\description{ +Creates a new schema for catalog in the Metatastore. The caller must be a +metastore admin, or have the \strong{CREATE_SCHEMA} privilege in the parent +catalog. +} diff --git a/man/create_secret_scope.Rd b/man/create_secret_scope.Rd new file mode 100644 index 00000000..8260c3ec --- /dev/null +++ b/man/create_secret_scope.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/secrets.R +\name{create_secret_scope} +\alias{create_secret_scope} +\alias{secretsCreateScope} +\title{Create a new secret scope.} +\usage{ +create_secret_scope( + client, + scope, + backend_azure_keyvault = NULL, + initial_manage_principal = NULL, + scope_backend_type = NULL +) + +secretsCreateScope( + client, + scope, + backend_azure_keyvault = NULL, + initial_manage_principal = NULL, + scope_backend_type = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{scope}{Required. Scope name requested by the user.} + +\item{backend_azure_keyvault}{The metadata for the secret scope if the type is \code{AZURE_KEYVAULT}.} + +\item{initial_manage_principal}{The principal that is initially granted \code{MANAGE} permission to the created scope.} + +\item{scope_backend_type}{The backend type the scope will be created with.} +} +\description{ +The scope name must consist of alphanumeric characters, dashes, underscores, +and periods, and may not exceed 128 characters. +} diff --git a/man/create_service_principal.Rd b/man/create_service_principal.Rd new file mode 100644 index 00000000..a261ed44 --- /dev/null +++ b/man/create_service_principal.Rd @@ -0,0 +1,57 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/service_principals.R +\name{create_service_principal} +\alias{create_service_principal} +\alias{servicePrincipalsCreate} +\title{Create a service principal.} +\usage{ +create_service_principal( + client, + active = NULL, + application_id = NULL, + display_name = NULL, + entitlements = NULL, + external_id = NULL, + groups = NULL, + id = NULL, + roles = NULL, + schemas = NULL +) + +servicePrincipalsCreate( + client, + active = NULL, + application_id = NULL, + display_name = NULL, + entitlements = NULL, + external_id = NULL, + groups = NULL, + id = NULL, + roles = NULL, + schemas = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{active}{If this user is active.} + +\item{application_id}{UUID relating to the service principal.} + +\item{display_name}{String that represents a concatenation of given and family names.} + +\item{entitlements}{Entitlements assigned to the service principal.} + +\item{external_id}{This field has no description yet.} + +\item{groups}{This field has no description yet.} + +\item{id}{Databricks service principal ID.} + +\item{roles}{Corresponds to AWS instance profile/arn role.} + +\item{schemas}{The schema of the List response.} +} +\description{ +Creates a new service principal in the Databricks workspace. +} diff --git a/man/create_serving_endpoint.Rd b/man/create_serving_endpoint.Rd new file mode 100644 index 00000000..5cdd713f --- /dev/null +++ b/man/create_serving_endpoint.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{create_serving_endpoint} +\alias{create_serving_endpoint} +\alias{servingEndpointsCreate} +\title{Create a new serving endpoint.} +\usage{ +create_serving_endpoint(client, name, config, rate_limits = NULL, tags = NULL) + +servingEndpointsCreate(client, name, config, rate_limits = NULL, tags = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint.} + +\item{config}{Required. The core config of the serving endpoint.} + +\item{rate_limits}{Rate limits to be applied to the serving endpoint.} + +\item{tags}{Tags to be attached to the serving endpoint and automatically propagated to billing logs.} +} +\description{ +Create a new serving endpoint. +} diff --git a/man/create_serving_endpoint_and_wait.Rd b/man/create_serving_endpoint_and_wait.Rd new file mode 100644 index 00000000..85360347 --- /dev/null +++ b/man/create_serving_endpoint_and_wait.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{create_serving_endpoint_and_wait} +\alias{create_serving_endpoint_and_wait} +\title{Create a new serving endpoint.} +\usage{ +create_serving_endpoint_and_wait( + client, + name, + config, + rate_limits = NULL, + tags = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint.} + +\item{config}{Required. The core config of the serving endpoint.} + +\item{rate_limits}{Rate limits to be applied to the serving endpoint.} + +\item{tags}{Tags to be attached to the serving endpoint and automatically propagated to billing logs.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Serving Endpoints on Databricks reach +NOT_UPDATING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Serving Endpoints is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} diff --git a/man/create_share.Rd b/man/create_share.Rd new file mode 100644 index 00000000..abb675bf --- /dev/null +++ b/man/create_share.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/shares.R +\name{create_share} +\alias{create_share} +\alias{sharesCreate} +\title{Create a share.} +\usage{ +create_share(client, name, comment = NULL) + +sharesCreate(client, name, comment = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the share.} + +\item{comment}{User-provided free-form text description.} +} +\description{ +Creates a new share for data objects. Data objects can be added after +creation with \strong{update}. The caller must be a metastore admin or have the +\strong{CREATE_SHARE} privilege on the metastore. +} diff --git a/man/create_storage_credential.Rd b/man/create_storage_credential.Rd new file mode 100644 index 00000000..a8fbfb39 --- /dev/null +++ b/man/create_storage_credential.Rd @@ -0,0 +1,57 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/storage_credentials.R +\name{create_storage_credential} +\alias{create_storage_credential} +\alias{storageCredentialsCreate} +\title{Create a storage credential.} +\usage{ +create_storage_credential( + client, + name, + aws_iam_role = NULL, + azure_managed_identity = NULL, + azure_service_principal = NULL, + cloudflare_api_token = NULL, + comment = NULL, + databricks_gcp_service_account = NULL, + read_only = NULL, + skip_validation = NULL +) + +storageCredentialsCreate( + client, + name, + aws_iam_role = NULL, + azure_managed_identity = NULL, + azure_service_principal = NULL, + cloudflare_api_token = NULL, + comment = NULL, + databricks_gcp_service_account = NULL, + read_only = NULL, + skip_validation = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The credential name.} + +\item{aws_iam_role}{The AWS IAM role configuration.} + +\item{azure_managed_identity}{The Azure managed identity configuration.} + +\item{azure_service_principal}{The Azure service principal configuration.} + +\item{cloudflare_api_token}{The Cloudflare API token configuration.} + +\item{comment}{Comment associated with the credential.} + +\item{databricks_gcp_service_account}{The \if{html}{\out{}} managed GCP service account configuration.} + +\item{read_only}{Whether the storage credential is only usable for read operations.} + +\item{skip_validation}{Supplying true to this argument skips validation of the created credential.} +} +\description{ +Creates a new storage credential. +} diff --git a/man/create_table_constraint.Rd b/man/create_table_constraint.Rd new file mode 100644 index 00000000..98797da8 --- /dev/null +++ b/man/create_table_constraint.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/table_constraints.R +\name{create_table_constraint} +\alias{create_table_constraint} +\alias{tableConstraintsCreate} +\title{Create a table constraint.} +\usage{ +create_table_constraint(client, full_name_arg, constraint) + +tableConstraintsCreate(client, full_name_arg, constraint) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name_arg}{Required. The full name of the table referenced by the constraint.} + +\item{constraint}{Required. A table constraint, as defined by \emph{one} of the following fields being set: \strong{primary_key_constraint}, \strong{foreign_key_constraint}, \strong{named_table_constraint}.} +} +\description{ +Creates a new table constraint. +} +\details{ +For the table constraint creation to succeed, the user must satisfy both of +these conditions: - the user must have the \strong{USE_CATALOG} privilege on the +table's parent catalog, the \strong{USE_SCHEMA} privilege on the table's parent +schema, and be the owner of the table. - if the new constraint is a +\strong{ForeignKeyConstraint}, the user must have the \strong{USE_CATALOG} privilege on +the referenced parent table's catalog, the \strong{USE_SCHEMA} privilege on the +referenced parent table's schema, and be the owner of the referenced parent +table. +} diff --git a/man/create_token.Rd b/man/create_token.Rd new file mode 100644 index 00000000..9ce064f0 --- /dev/null +++ b/man/create_token.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tokens.R +\name{create_token} +\alias{create_token} +\alias{tokensCreate} +\title{Create a user token.} +\usage{ +create_token(client, comment = NULL, lifetime_seconds = NULL) + +tokensCreate(client, comment = NULL, lifetime_seconds = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{comment}{Optional description to attach to the token.} + +\item{lifetime_seconds}{The lifetime of the token, in seconds.} +} +\description{ +Creates and returns a token for a user. If this call is made through token +authentication, it creates a token with the same client ID as the +authenticated token. If the user's token quota is exceeded, this call returns +an error \strong{QUOTA_EXCEEDED}. +} diff --git a/man/create_user.Rd b/man/create_user.Rd new file mode 100644 index 00000000..285686bc --- /dev/null +++ b/man/create_user.Rd @@ -0,0 +1,66 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/users.R +\name{create_user} +\alias{create_user} +\alias{usersCreate} +\title{Create a new user.} +\usage{ +create_user( + client, + active = NULL, + display_name = NULL, + emails = NULL, + entitlements = NULL, + external_id = NULL, + groups = NULL, + id = NULL, + name = NULL, + roles = NULL, + schemas = NULL, + user_name = NULL +) + +usersCreate( + client, + active = NULL, + display_name = NULL, + emails = NULL, + entitlements = NULL, + external_id = NULL, + groups = NULL, + id = NULL, + name = NULL, + roles = NULL, + schemas = NULL, + user_name = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{active}{If this user is active.} + +\item{display_name}{String that represents a concatenation of given and family names.} + +\item{emails}{All the emails associated with the Databricks user.} + +\item{entitlements}{Entitlements assigned to the user.} + +\item{external_id}{External ID is not currently supported.} + +\item{groups}{This field has no description yet.} + +\item{id}{Databricks user ID.} + +\item{name}{This field has no description yet.} + +\item{roles}{Corresponds to AWS instance profile/arn role.} + +\item{schemas}{The schema of the user.} + +\item{user_name}{Email address of the Databricks user.} +} +\description{ +Creates a new user in the Databricks workspace. This new user will also be +added to the Databricks account. +} diff --git a/man/create_vector_search_endpoint.Rd b/man/create_vector_search_endpoint.Rd new file mode 100644 index 00000000..365188d2 --- /dev/null +++ b/man/create_vector_search_endpoint.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_endpoints.R +\name{create_vector_search_endpoint} +\alias{create_vector_search_endpoint} +\alias{vectorSearchEndpointsCreateEndpoint} +\title{Create an endpoint.} +\usage{ +create_vector_search_endpoint(client, name, endpoint_type) + +vectorSearchEndpointsCreateEndpoint(client, name, endpoint_type) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of endpoint.} + +\item{endpoint_type}{Required. Type of endpoint.} +} +\description{ +Create a new endpoint. +} diff --git a/man/create_vector_search_endpoint_and_wait.Rd b/man/create_vector_search_endpoint_and_wait.Rd new file mode 100644 index 00000000..858f0385 --- /dev/null +++ b/man/create_vector_search_endpoint_and_wait.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_endpoints.R +\name{create_vector_search_endpoint_and_wait} +\alias{create_vector_search_endpoint_and_wait} +\title{Create an endpoint.} +\usage{ +create_vector_search_endpoint_and_wait( + client, + name, + endpoint_type, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of endpoint.} + +\item{endpoint_type}{Required. Type of endpoint.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Vector Search Endpoints on Databricks reach +ONLINE state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Vector Search Endpoints is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Create a new endpoint. +} diff --git a/man/create_vector_search_index.Rd b/man/create_vector_search_index.Rd new file mode 100644 index 00000000..e2d040a0 --- /dev/null +++ b/man/create_vector_search_index.Rd @@ -0,0 +1,45 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_indexes.R +\name{create_vector_search_index} +\alias{create_vector_search_index} +\alias{vectorSearchIndexesCreateIndex} +\title{Create an index.} +\usage{ +create_vector_search_index( + client, + name, + endpoint_name, + primary_key, + index_type, + delta_sync_index_spec = NULL, + direct_access_index_spec = NULL +) + +vectorSearchIndexesCreateIndex( + client, + name, + endpoint_name, + primary_key, + index_type, + delta_sync_index_spec = NULL, + direct_access_index_spec = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the index.} + +\item{endpoint_name}{Required. Name of the endpoint to be used for serving the index.} + +\item{primary_key}{Required. Primary key of the index.} + +\item{index_type}{Required. There are 2 types of Vector Search indexes: - \code{DELTA_SYNC}: An index that automatically syncs with a source Delta Table, automatically and incrementally updating the index as the underlying data in the Delta Table changes.} + +\item{delta_sync_index_spec}{Specification for Delta Sync Index.} + +\item{direct_access_index_spec}{Specification for Direct Vector Access Index.} +} +\description{ +Create a new index. +} diff --git a/man/create_volume.Rd b/man/create_volume.Rd new file mode 100644 index 00000000..64b0d139 --- /dev/null +++ b/man/create_volume.Rd @@ -0,0 +1,63 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/volumes.R +\name{create_volume} +\alias{create_volume} +\alias{volumesCreate} +\title{Create a Volume.} +\usage{ +create_volume( + client, + catalog_name, + schema_name, + name, + volume_type, + comment = NULL, + storage_location = NULL +) + +volumesCreate( + client, + catalog_name, + schema_name, + name, + volume_type, + comment = NULL, + storage_location = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{catalog_name}{Required. The name of the catalog where the schema and the volume are.} + +\item{schema_name}{Required. The name of the schema where the volume is.} + +\item{name}{Required. The name of the volume.} + +\item{volume_type}{Required. This field has no description yet.} + +\item{comment}{The comment attached to the volume.} + +\item{storage_location}{The storage location on the cloud.} +} +\description{ +Creates a new volume. +} +\details{ +The user could create either an external volume or a managed volume. An +external volume will be created in the specified external location, while a +managed volume will be located in the default location which is specified by +the parent schema, or the parent catalog, or the Metastore. + +For the volume creation to succeed, the user must satisfy following +conditions: - The caller must be a metastore admin, or be the owner of the +parent catalog and schema, or have the \strong{USE_CATALOG} privilege on the +parent catalog and the \strong{USE_SCHEMA} privilege on the parent schema. - The +caller must have \strong{CREATE VOLUME} privilege on the parent schema. + +For an external volume, following conditions also need to satisfy - The +caller must have \strong{CREATE EXTERNAL VOLUME} privilege on the external +location. - There are no other tables, nor volumes existing in the specified +storage location. - The specified storage location is not under the location +of other tables, nor volumes, or catalogs or schemas. +} diff --git a/man/create_warehouse.Rd b/man/create_warehouse.Rd new file mode 100644 index 00000000..2c696a9d --- /dev/null +++ b/man/create_warehouse.Rd @@ -0,0 +1,73 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{create_warehouse} +\alias{create_warehouse} +\alias{warehousesCreate} +\title{Create a warehouse.} +\usage{ +create_warehouse( + client, + auto_stop_mins = NULL, + channel = NULL, + cluster_size = NULL, + creator_name = NULL, + enable_photon = NULL, + enable_serverless_compute = NULL, + instance_profile_arn = NULL, + max_num_clusters = NULL, + min_num_clusters = NULL, + name = NULL, + spot_instance_policy = NULL, + tags = NULL, + warehouse_type = NULL +) + +warehousesCreate( + client, + auto_stop_mins = NULL, + channel = NULL, + cluster_size = NULL, + creator_name = NULL, + enable_photon = NULL, + enable_serverless_compute = NULL, + instance_profile_arn = NULL, + max_num_clusters = NULL, + min_num_clusters = NULL, + name = NULL, + spot_instance_policy = NULL, + tags = NULL, + warehouse_type = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{auto_stop_mins}{The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped.} + +\item{channel}{Channel Details.} + +\item{cluster_size}{Size of the clusters allocated for this warehouse.} + +\item{creator_name}{warehouse creator name.} + +\item{enable_photon}{Configures whether the warehouse should use Photon optimized clusters.} + +\item{enable_serverless_compute}{Configures whether the warehouse should use serverless compute.} + +\item{instance_profile_arn}{Deprecated.} + +\item{max_num_clusters}{Maximum number of clusters that the autoscaler will create to handle concurrent queries.} + +\item{min_num_clusters}{Minimum number of available clusters that will be maintained for this SQL warehouse.} + +\item{name}{Logical name for the cluster.} + +\item{spot_instance_policy}{Configurations whether the warehouse should use spot instances.} + +\item{tags}{A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse.} + +\item{warehouse_type}{Warehouse type: \code{PRO} or \code{CLASSIC}.} +} +\description{ +Creates a new SQL warehouse. +} diff --git a/man/create_warehouse_and_wait.Rd b/man/create_warehouse_and_wait.Rd new file mode 100644 index 00000000..dce6cd89 --- /dev/null +++ b/man/create_warehouse_and_wait.Rd @@ -0,0 +1,67 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{create_warehouse_and_wait} +\alias{create_warehouse_and_wait} +\title{Create a warehouse.} +\usage{ +create_warehouse_and_wait( + client, + auto_stop_mins = NULL, + channel = NULL, + cluster_size = NULL, + creator_name = NULL, + enable_photon = NULL, + enable_serverless_compute = NULL, + instance_profile_arn = NULL, + max_num_clusters = NULL, + min_num_clusters = NULL, + name = NULL, + spot_instance_policy = NULL, + tags = NULL, + warehouse_type = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{auto_stop_mins}{The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped.} + +\item{channel}{Channel Details.} + +\item{cluster_size}{Size of the clusters allocated for this warehouse.} + +\item{creator_name}{warehouse creator name.} + +\item{enable_photon}{Configures whether the warehouse should use Photon optimized clusters.} + +\item{enable_serverless_compute}{Configures whether the warehouse should use serverless compute.} + +\item{instance_profile_arn}{Deprecated.} + +\item{max_num_clusters}{Maximum number of clusters that the autoscaler will create to handle concurrent queries.} + +\item{min_num_clusters}{Minimum number of available clusters that will be maintained for this SQL warehouse.} + +\item{name}{Logical name for the cluster.} + +\item{spot_instance_policy}{Configurations whether the warehouse should use spot instances.} + +\item{tags}{A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse.} + +\item{warehouse_type}{Warehouse type: \code{PRO} or \code{CLASSIC}.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Warehouses on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Warehouses is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Creates a new SQL warehouse. +} diff --git a/man/credentialsManagerExchangeToken.Rd b/man/credentialsManagerExchangeToken.Rd deleted file mode 100644 index c41cdce5..00000000 --- a/man/credentialsManagerExchangeToken.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/credentials_manager.R -\name{credentialsManagerExchangeToken} -\alias{credentialsManagerExchangeToken} -\title{Exchange token.} -\usage{ -credentialsManagerExchangeToken(client, partition_id, token_type, scopes) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{partition_id}{Required. The partition of Credentials store.} - -\item{token_type}{Required. A list of token types being requested.} - -\item{scopes}{Required. Array of scopes for the token request.} -} -\description{ -Exchange tokens with an Identity Provider to get a new access token. It -allows specifying scopes to determine token permissions. -} diff --git a/man/cspEnablementGet.Rd b/man/cspEnablementGet.Rd deleted file mode 100644 index f1fc4af1..00000000 --- a/man/cspEnablementGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/csp_enablement.R -\name{cspEnablementGet} -\alias{cspEnablementGet} -\title{Get the compliance security profile setting.} -\usage{ -cspEnablementGet(client, etag = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{etag}{etag used for versioning.} -} -\description{ -Gets the compliance security profile setting. -} diff --git a/man/cspEnablementUpdate.Rd b/man/cspEnablementUpdate.Rd deleted file mode 100644 index 711b3887..00000000 --- a/man/cspEnablementUpdate.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/csp_enablement.R -\name{cspEnablementUpdate} -\alias{cspEnablementUpdate} -\title{Update the compliance security profile setting.} -\usage{ -cspEnablementUpdate(client, allow_missing, setting, field_mask) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{allow_missing}{Required. This should always be set to true for Settings API.} - -\item{setting}{Required. This field has no description yet.} - -\item{field_mask}{Required. Field mask is required to be passed into the PATCH request.} -} -\description{ -Updates the compliance security profile setting for the workspace. A fresh -etag needs to be provided in \code{PATCH} requests (as part of the setting field). -The etag can be retrieved by making a \code{GET} request before the \code{PATCH} -request. If the setting is updated concurrently, \code{PATCH} fails with 409 and -the request must be retried by using the fresh etag in the 409 response. -} diff --git a/man/currentUserMe.Rd b/man/currentUserMe.Rd deleted file mode 100644 index 92e7723c..00000000 --- a/man/currentUserMe.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/current_user.R -\name{currentUserMe} -\alias{currentUserMe} -\title{Get current user info.} -\usage{ -currentUserMe(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Get details about the current method caller's identity. -} diff --git a/man/current_metastore.Rd b/man/current_metastore.Rd new file mode 100644 index 00000000..4165c3dd --- /dev/null +++ b/man/current_metastore.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/metastores.R +\name{current_metastore} +\alias{current_metastore} +\alias{metastoresCurrent} +\title{Get metastore assignment for workspace.} +\usage{ +current_metastore(client) + +metastoresCurrent(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Gets the metastore assignment for the workspace being accessed. +} diff --git a/man/dashboardWidgetsCreate.Rd b/man/dashboardWidgetsCreate.Rd deleted file mode 100644 index b00dd167..00000000 --- a/man/dashboardWidgetsCreate.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dashboard_widgets.R -\name{dashboardWidgetsCreate} -\alias{dashboardWidgetsCreate} -\title{Add widget to a dashboard.} -\usage{ -dashboardWidgetsCreate( - client, - dashboard_id, - options, - width, - text = NULL, - visualization_id = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{dashboard_id}{Required. Dashboard ID returned by :method:dashboards/create.} - -\item{options}{Required. This field has no description yet.} - -\item{width}{Required. Width of a widget.} - -\item{text}{If this is a textbox widget, the application displays this text.} - -\item{visualization_id}{Query Vizualization ID returned by :method:queryvisualizations/create.} -} -\description{ -Add widget to a dashboard. -} diff --git a/man/dashboardWidgetsDelete.Rd b/man/dashboardWidgetsDelete.Rd deleted file mode 100644 index 0d7c58b7..00000000 --- a/man/dashboardWidgetsDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dashboard_widgets.R -\name{dashboardWidgetsDelete} -\alias{dashboardWidgetsDelete} -\title{Remove widget.} -\usage{ -dashboardWidgetsDelete(client, id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Widget ID returned by :method:dashboardwidgets/create.} -} -\description{ -Remove widget. -} diff --git a/man/dashboardWidgetsUpdate.Rd b/man/dashboardWidgetsUpdate.Rd deleted file mode 100644 index 2bccc282..00000000 --- a/man/dashboardWidgetsUpdate.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dashboard_widgets.R -\name{dashboardWidgetsUpdate} -\alias{dashboardWidgetsUpdate} -\title{Update existing widget.} -\usage{ -dashboardWidgetsUpdate( - client, - id, - dashboard_id, - options, - width, - text = NULL, - visualization_id = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Widget ID returned by :method:dashboardwidgets/create.} - -\item{dashboard_id}{Required. Dashboard ID returned by :method:dashboards/create.} - -\item{options}{Required. This field has no description yet.} - -\item{width}{Required. Width of a widget.} - -\item{text}{If this is a textbox widget, the application displays this text.} - -\item{visualization_id}{Query Vizualization ID returned by :method:queryvisualizations/create.} -} -\description{ -Update existing widget. -} diff --git a/man/dashboardsCreate.Rd b/man/dashboardsCreate.Rd deleted file mode 100644 index 51cc81dc..00000000 --- a/man/dashboardsCreate.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dashboards.R -\name{dashboardsCreate} -\alias{dashboardsCreate} -\title{Create a dashboard object.} -\usage{ -dashboardsCreate( - client, - name, - dashboard_filters_enabled = NULL, - is_favorite = NULL, - parent = NULL, - run_as_role = NULL, - tags = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The title of this dashboard that appears in list views and at the top of the dashboard page.} - -\item{dashboard_filters_enabled}{Indicates whether the dashboard filters are enabled.} - -\item{is_favorite}{Indicates whether this dashboard object should appear in the current user's favorites list.} - -\item{parent}{The identifier of the workspace folder containing the object.} - -\item{run_as_role}{Sets the \strong{Run as} role for the object.} - -\item{tags}{This field has no description yet.} -} -\description{ -Create a dashboard object. -} diff --git a/man/dashboardsDelete.Rd b/man/dashboardsDelete.Rd deleted file mode 100644 index 98fc4f06..00000000 --- a/man/dashboardsDelete.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dashboards.R -\name{dashboardsDelete} -\alias{dashboardsDelete} -\title{Remove a dashboard.} -\usage{ -dashboardsDelete(client, dashboard_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{dashboard_id}{Required. This field has no description yet.} -} -\description{ -Moves a dashboard to the trash. Trashed dashboards do not appear in list -views or searches, and cannot be shared. -} diff --git a/man/dashboardsGet.Rd b/man/dashboardsGet.Rd deleted file mode 100644 index bc6a7faf..00000000 --- a/man/dashboardsGet.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dashboards.R -\name{dashboardsGet} -\alias{dashboardsGet} -\title{Retrieve a definition.} -\usage{ -dashboardsGet(client, dashboard_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{dashboard_id}{Required. This field has no description yet.} -} -\description{ -Returns a JSON representation of a dashboard object, including its -visualization and query objects. -} diff --git a/man/dashboardsList.Rd b/man/dashboardsList.Rd deleted file mode 100644 index 9d75cd8c..00000000 --- a/man/dashboardsList.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dashboards.R -\name{dashboardsList} -\alias{dashboardsList} -\title{Get dashboard objects.} -\usage{ -dashboardsList(client, order = NULL, page = NULL, page_size = NULL, q = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{order}{Name of dashboard attribute to order by.} - -\item{page}{Page number to retrieve.} - -\item{page_size}{Number of dashboards to return per page.} - -\item{q}{Full text search term.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Fetch a paginated list of dashboard objects. -} -\details{ -\subsection{**Warning: Calling this API concurrently 10 or more times could result in}{ - -throttling, service degradation, or a temporary ban.** -} -} diff --git a/man/dashboardsRestore.Rd b/man/dashboardsRestore.Rd deleted file mode 100644 index 6f524f8c..00000000 --- a/man/dashboardsRestore.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dashboards.R -\name{dashboardsRestore} -\alias{dashboardsRestore} -\title{Restore a dashboard.} -\usage{ -dashboardsRestore(client, dashboard_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{dashboard_id}{Required. This field has no description yet.} -} -\description{ -A restored dashboard appears in list views and searches and can be shared. -} diff --git a/man/dashboardsUpdate.Rd b/man/dashboardsUpdate.Rd deleted file mode 100644 index 49d47532..00000000 --- a/man/dashboardsUpdate.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dashboards.R -\name{dashboardsUpdate} -\alias{dashboardsUpdate} -\title{Change a dashboard definition.} -\usage{ -dashboardsUpdate(client, dashboard_id, name = NULL, run_as_role = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{dashboard_id}{Required. This field has no description yet.} - -\item{name}{The title of this dashboard that appears in list views and at the top of the dashboard page.} - -\item{run_as_role}{Sets the \strong{Run as} role for the object.} -} -\description{ -Modify this dashboard definition. This operation only affects attributes of -the dashboard object. It does not add, modify, or remove widgets. -} -\details{ -\strong{Note}: You cannot undo this operation. -} diff --git a/man/dataSourcesList.Rd b/man/dataSourcesList.Rd deleted file mode 100644 index e4951710..00000000 --- a/man/dataSourcesList.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/data_sources.R -\name{dataSourcesList} -\alias{dataSourcesList} -\title{Get a list of SQL warehouses.} -\usage{ -dataSourcesList(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Retrieves a full list of SQL warehouses available in this workspace. All -fields that appear in this API response are enumerated for clarity. However, -you need only a SQL warehouse's \code{id} to create new queries against it. -} diff --git a/man/dbfsAddBlock.Rd b/man/dbfsAddBlock.Rd deleted file mode 100644 index 702f9bf0..00000000 --- a/man/dbfsAddBlock.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dbfs.R -\name{dbfsAddBlock} -\alias{dbfsAddBlock} -\title{Append data block.} -\usage{ -dbfsAddBlock(client, handle, data) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{handle}{Required. The handle on an open stream.} - -\item{data}{Required. The base64-encoded data to append to the stream.} -} -\description{ -Appends a block of data to the stream specified by the input handle. If the -handle does not exist, this call will throw an exception with -\code{RESOURCE_DOES_NOT_EXIST}. -} -\details{ -If the block of data exceeds 1 MB, this call will throw an exception with -\code{MAX_BLOCK_SIZE_EXCEEDED}. -} diff --git a/man/dbfsClose.Rd b/man/dbfsClose.Rd deleted file mode 100644 index d097beb8..00000000 --- a/man/dbfsClose.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dbfs.R -\name{dbfsClose} -\alias{dbfsClose} -\title{Close the stream.} -\usage{ -dbfsClose(client, handle) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{handle}{Required. The handle on an open stream.} -} -\description{ -Closes the stream specified by the input handle. If the handle does not -exist, this call throws an exception with \code{RESOURCE_DOES_NOT_EXIST}. -} diff --git a/man/dbfsCreate.Rd b/man/dbfsCreate.Rd deleted file mode 100644 index cd4ca47a..00000000 --- a/man/dbfsCreate.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dbfs.R -\name{dbfsCreate} -\alias{dbfsCreate} -\title{Open a stream.} -\usage{ -dbfsCreate(client, path, overwrite = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{path}{Required. The path of the new file.} - -\item{overwrite}{The flag that specifies whether to overwrite existing file/files.} -} -\description{ -Opens a stream to write to a file and returns a handle to this stream. There -is a 10 minute idle timeout on this handle. If a file or directory already -exists on the given path and \strong{overwrite} is set to false, this call will -throw an exception with \code{RESOURCE_ALREADY_EXISTS}. -} -\details{ -A typical workflow for file upload would be: -\enumerate{ -\item Issue a \code{create} call and get a handle. 2. Issue one or more -\code{add-block} calls with the handle you have. 3. Issue a \code{close} call with -the handle you have. -} -} diff --git a/man/dbfsDelete.Rd b/man/dbfsDelete.Rd deleted file mode 100644 index 281687ff..00000000 --- a/man/dbfsDelete.Rd +++ /dev/null @@ -1,35 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dbfs.R -\name{dbfsDelete} -\alias{dbfsDelete} -\title{Delete a file/directory.} -\usage{ -dbfsDelete(client, path, recursive = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{path}{Required. The path of the file or directory to delete.} - -\item{recursive}{Whether or not to recursively delete the directory's contents.} -} -\description{ -Delete the file or directory (optionally recursively delete all files in the -directory). This call throws an exception with \code{IO_ERROR} if the path is a -non-empty directory and \code{recursive} is set to \code{false} or on other similar -errors. -} -\details{ -When you delete a large number of files, the delete operation is done in -increments. The call returns a response after approximately 45 seconds with -an error message (503 Service Unavailable) asking you to re-invoke the delete -operation until the directory structure is fully deleted. - -For operations that delete more than 10K files, we discourage using the DBFS -REST API, but advise you to perform such operations in the context of a -cluster, using the \href{/dev-tools/databricks-utils.html#dbutils-fs}{File system utility (dbutils.fs)}. \code{dbutils.fs} -covers the functional scope of the DBFS REST API, but from notebooks. Running -such operations using notebooks provides better control and manageability, -such as selective deletes, and the possibility to automate periodic delete -jobs. -} diff --git a/man/dbfsGetStatus.Rd b/man/dbfsGetStatus.Rd deleted file mode 100644 index c6e6994d..00000000 --- a/man/dbfsGetStatus.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dbfs.R -\name{dbfsGetStatus} -\alias{dbfsGetStatus} -\title{Get the information of a file or directory.} -\usage{ -dbfsGetStatus(client, path) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{path}{Required. The path of the file or directory.} -} -\description{ -Gets the file information for a file or directory. If the file or directory -does not exist, this call throws an exception with \code{RESOURCE_DOES_NOT_EXIST}. -} diff --git a/man/dbfsList.Rd b/man/dbfsList.Rd deleted file mode 100644 index 35d9521a..00000000 --- a/man/dbfsList.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dbfs.R -\name{dbfsList} -\alias{dbfsList} -\title{List directory contents or file details.} -\usage{ -dbfsList(client, path) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{path}{Required. The path of the file or directory.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -List the contents of a directory, or details of the file. If the file or -directory does not exist, this call throws an exception with -\code{RESOURCE_DOES_NOT_EXIST}. -} -\details{ -When calling list on a large directory, the list operation will time out -after approximately 60 seconds. We strongly recommend using list only on -directories containing less than 10K files and discourage using the DBFS REST -API for operations that list more than 10K files. Instead, we recommend that -you perform such operations in the context of a cluster, using the \href{/dev-tools/databricks-utils.html#dbutils-fs}{File system utility (dbutils.fs)}, -which provides the same functionality without timing out. -} diff --git a/man/dbfsMkdirs.Rd b/man/dbfsMkdirs.Rd deleted file mode 100644 index 15af11a8..00000000 --- a/man/dbfsMkdirs.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dbfs.R -\name{dbfsMkdirs} -\alias{dbfsMkdirs} -\title{Create a directory.} -\usage{ -dbfsMkdirs(client, path) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{path}{Required. The path of the new directory.} -} -\description{ -Creates the given directory and necessary parent directories if they do not -exist. If a file (not a directory) exists at any prefix of the input path, -this call throws an exception with \code{RESOURCE_ALREADY_EXISTS}. \strong{Note}: If -this operation fails, it might have succeeded in creating some of the -necessary parent directories. -} diff --git a/man/dbfsMove.Rd b/man/dbfsMove.Rd deleted file mode 100644 index 4cabce2b..00000000 --- a/man/dbfsMove.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dbfs.R -\name{dbfsMove} -\alias{dbfsMove} -\title{Move a file.} -\usage{ -dbfsMove(client, source_path, destination_path) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{source_path}{Required. The source path of the file or directory.} - -\item{destination_path}{Required. The destination path of the file or directory.} -} -\description{ -Moves a file from one location to another location within DBFS. If the source -file does not exist, this call throws an exception with -\code{RESOURCE_DOES_NOT_EXIST}. If a file already exists in the destination path, -this call throws an exception with \code{RESOURCE_ALREADY_EXISTS}. If the given -source path is a directory, this call always recursively moves all files. -} diff --git a/man/dbfsPut.Rd b/man/dbfsPut.Rd deleted file mode 100644 index ef2ecc26..00000000 --- a/man/dbfsPut.Rd +++ /dev/null @@ -1,32 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dbfs.R -\name{dbfsPut} -\alias{dbfsPut} -\title{Upload a file.} -\usage{ -dbfsPut(client, path, contents = NULL, overwrite = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{path}{Required. The path of the new file.} - -\item{contents}{This parameter might be absent, and instead a posted file will be used.} - -\item{overwrite}{The flag that specifies whether to overwrite existing file/files.} -} -\description{ -Uploads a file through the use of multipart form post. It is mainly used for -streaming uploads, but can also be used as a convenient single call for data -upload. -} -\details{ -Alternatively you can pass contents as base64 string. - -The amount of data that can be passed (when not streaming) using the -\strong{contents} parameter is limited to 1 MB. \code{MAX_BLOCK_SIZE_EXCEEDED} will be -thrown if this limit is exceeded. - -If you want to upload large files, use the streaming upload. For details, see -:method:dbfs/create, :method:dbfs/addBlock, :method:dbfs/close. -} diff --git a/man/dbfsRead.Rd b/man/dbfsRead.Rd deleted file mode 100644 index 7d158f5a..00000000 --- a/man/dbfsRead.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dbfs.R -\name{dbfsRead} -\alias{dbfsRead} -\title{Get the contents of a file.} -\usage{ -dbfsRead(client, path, length = NULL, offset = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{path}{Required. The path of the file to read.} - -\item{length}{The number of bytes to read starting from the offset.} - -\item{offset}{The offset to read from in bytes.} -} -\description{ -Returns the contents of a file. If the file does not exist, this call throws -an exception with \code{RESOURCE_DOES_NOT_EXIST}. If the path is a directory, the -read length is negative, or if the offset is negative, this call throws an -exception with \code{INVALID_PARAMETER_VALUE}. If the read length exceeds 1 MB, -this call throws an exception with \code{MAX_READ_SIZE_EXCEEDED}. -} -\details{ -If \code{offset + length} exceeds the number of bytes in a file, it reads the -contents until the end of file. -} diff --git a/man/dbsqlPermissionsGet.Rd b/man/dbsqlPermissionsGet.Rd deleted file mode 100644 index 91d5831e..00000000 --- a/man/dbsqlPermissionsGet.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dbsql_permissions.R -\name{dbsqlPermissionsGet} -\alias{dbsqlPermissionsGet} -\title{Get object ACL.} -\usage{ -dbsqlPermissionsGet(client, object_type, object_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{object_type}{Required. The type of object permissions to check.} - -\item{object_id}{Required. Object ID.} -} -\description{ -Gets a JSON representation of the access control list (ACL) for a specified -object. -} diff --git a/man/dbsqlPermissionsSet.Rd b/man/dbsqlPermissionsSet.Rd deleted file mode 100644 index ab832651..00000000 --- a/man/dbsqlPermissionsSet.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dbsql_permissions.R -\name{dbsqlPermissionsSet} -\alias{dbsqlPermissionsSet} -\title{Set object ACL.} -\usage{ -dbsqlPermissionsSet(client, object_type, object_id, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{object_type}{Required. The type of object permission to set.} - -\item{object_id}{Required. Object ID.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets the access control list (ACL) for a specified object. This operation -will complete rewrite the ACL. -} diff --git a/man/dbsqlPermissionsTransferOwnership.Rd b/man/dbsqlPermissionsTransferOwnership.Rd deleted file mode 100644 index 0e8e830a..00000000 --- a/man/dbsqlPermissionsTransferOwnership.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/dbsql_permissions.R -\name{dbsqlPermissionsTransferOwnership} -\alias{dbsqlPermissionsTransferOwnership} -\title{Transfer object ownership.} -\usage{ -dbsqlPermissionsTransferOwnership( - client, - object_type, - object_id, - new_owner = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{object_type}{Required. The type of object on which to change ownership.} - -\item{object_id}{Required. The ID of the object on which to change ownership.} - -\item{new_owner}{Email address for the new owner, who must exist in the workspace.} -} -\description{ -Transfers ownership of a dashboard, query, or alert to an active user. -Requires an admin API key. -} diff --git a/man/defaultNamespaceDelete.Rd b/man/defaultNamespaceDelete.Rd deleted file mode 100644 index 94a57be6..00000000 --- a/man/defaultNamespaceDelete.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/default_namespace.R -\name{defaultNamespaceDelete} -\alias{defaultNamespaceDelete} -\title{Delete the default namespace setting.} -\usage{ -defaultNamespaceDelete(client, etag = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{etag}{etag used for versioning.} -} -\description{ -Deletes the default namespace setting for the workspace. A fresh etag needs -to be provided in \code{DELETE} requests (as a query parameter). The etag can be -retrieved by making a \code{GET} request before the \code{DELETE} request. If the -setting is updated/deleted concurrently, \code{DELETE} fails with 409 and the -request must be retried by using the fresh etag in the 409 response. -} diff --git a/man/defaultNamespaceGet.Rd b/man/defaultNamespaceGet.Rd deleted file mode 100644 index 2c4e1b31..00000000 --- a/man/defaultNamespaceGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/default_namespace.R -\name{defaultNamespaceGet} -\alias{defaultNamespaceGet} -\title{Get the default namespace setting.} -\usage{ -defaultNamespaceGet(client, etag = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{etag}{etag used for versioning.} -} -\description{ -Gets the default namespace setting. -} diff --git a/man/defaultNamespaceUpdate.Rd b/man/defaultNamespaceUpdate.Rd deleted file mode 100644 index 94a471da..00000000 --- a/man/defaultNamespaceUpdate.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/default_namespace.R -\name{defaultNamespaceUpdate} -\alias{defaultNamespaceUpdate} -\title{Update the default namespace setting.} -\usage{ -defaultNamespaceUpdate(client, allow_missing, setting, field_mask) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{allow_missing}{Required. This should always be set to true for Settings API.} - -\item{setting}{Required. This represents the setting configuration for the default namespace in the Databricks workspace.} - -\item{field_mask}{Required. Field mask is required to be passed into the PATCH request.} -} -\description{ -Updates the default namespace setting for the workspace. A fresh etag needs -to be provided in \code{PATCH} requests (as part of the setting field). The etag -can be retrieved by making a \code{GET} request before the \code{PATCH} request. Note -that if the setting does not exist, \code{GET} returns a NOT_FOUND error and the -etag is present in the error response, which should be set in the \code{PATCH} -request. If the setting is updated concurrently, \code{PATCH} fails with 409 and -the request must be retried by using the fresh etag in the 409 response. -} diff --git a/man/delete_alert.Rd b/man/delete_alert.Rd new file mode 100644 index 00000000..15a1dfa3 --- /dev/null +++ b/man/delete_alert.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/alerts.R +\name{delete_alert} +\alias{delete_alert} +\alias{alertsDelete} +\title{Delete an alert.} +\usage{ +delete_alert(client, alert_id) + +alertsDelete(client, alert_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{alert_id}{Required. This field has no description yet.} +} +\description{ +Deletes an alert. Deleted alerts are no longer accessible and cannot be +restored. \strong{Note:} Unlike queries and dashboards, alerts cannot be moved to +the trash. +} diff --git a/man/delete_app.Rd b/man/delete_app.Rd new file mode 100644 index 00000000..7836da25 --- /dev/null +++ b/man/delete_app.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/apps.R +\name{delete_app} +\alias{delete_app} +\alias{appsDeleteApp} +\title{Delete an application.} +\usage{ +delete_app(client, name) + +appsDeleteApp(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of an application.} +} +\description{ +Delete an application definition +} diff --git a/man/delete_catalog.Rd b/man/delete_catalog.Rd new file mode 100644 index 00000000..fa0dc365 --- /dev/null +++ b/man/delete_catalog.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/catalogs.R +\name{delete_catalog} +\alias{delete_catalog} +\alias{catalogsDelete} +\title{Delete a catalog.} +\usage{ +delete_catalog(client, name, force = NULL) + +catalogsDelete(client, name, force = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the catalog.} + +\item{force}{Force deletion even if the catalog is not empty.} +} +\description{ +Deletes the catalog that matches the supplied name. The caller must be a +metastore admin or the owner of the catalog. +} diff --git a/man/delete_clean_room.Rd b/man/delete_clean_room.Rd new file mode 100644 index 00000000..0305e9e3 --- /dev/null +++ b/man/delete_clean_room.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clean_rooms.R +\name{delete_clean_room} +\alias{delete_clean_room} +\alias{cleanRoomsDelete} +\title{Delete a clean room.} +\usage{ +delete_clean_room(client, name) + +cleanRoomsDelete(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the clean room.} +} +\description{ +Deletes a data object clean room from the metastore. The caller must be an +owner of the clean room. +} diff --git a/man/delete_cluster.Rd b/man/delete_cluster.Rd new file mode 100644 index 00000000..880920dd --- /dev/null +++ b/man/delete_cluster.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{delete_cluster} +\alias{delete_cluster} +\alias{clustersDelete} +\title{Terminate cluster.} +\usage{ +delete_cluster(client, cluster_id) + +clustersDelete(client, cluster_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster to be terminated.} +} +\description{ +Terminates the Spark cluster with the specified ID. The cluster is removed +asynchronously. Once the termination has completed, the cluster will be in a +\code{TERMINATED} state. If the cluster is already in a \code{TERMINATING} or +\code{TERMINATED} state, nothing will happen. +} diff --git a/man/delete_cluster_and_wait.Rd b/man/delete_cluster_and_wait.Rd new file mode 100644 index 00000000..579101fd --- /dev/null +++ b/man/delete_cluster_and_wait.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{delete_cluster_and_wait} +\alias{delete_cluster_and_wait} +\title{Terminate cluster.} +\usage{ +delete_cluster_and_wait( + client, + cluster_id, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster to be terminated.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Clusters on Databricks reach +TERMINATED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Clusters is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Terminates the Spark cluster with the specified ID. The cluster is removed +asynchronously. Once the termination has completed, the cluster will be in a +\code{TERMINATED} state. If the cluster is already in a \code{TERMINATING} or +\code{TERMINATED} state, nothing will happen. +} diff --git a/man/delete_cluster_policy.Rd b/man/delete_cluster_policy.Rd new file mode 100644 index 00000000..2e63e0d4 --- /dev/null +++ b/man/delete_cluster_policy.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/cluster_policies.R +\name{delete_cluster_policy} +\alias{delete_cluster_policy} +\alias{clusterPoliciesDelete} +\title{Delete a cluster policy.} +\usage{ +delete_cluster_policy(client, policy_id) + +clusterPoliciesDelete(client, policy_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{policy_id}{Required. The ID of the policy to delete.} +} +\description{ +Delete a policy for a cluster. Clusters governed by this policy can still +run, but cannot be edited. +} diff --git a/man/delete_connection.Rd b/man/delete_connection.Rd new file mode 100644 index 00000000..12082bd5 --- /dev/null +++ b/man/delete_connection.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/connections.R +\name{delete_connection} +\alias{delete_connection} +\alias{connectionsDelete} +\title{Delete a connection.} +\usage{ +delete_connection(client, name) + +connectionsDelete(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the connection to be deleted.} +} +\description{ +Deletes the connection that matches the supplied name. +} diff --git a/man/delete_dashboard.Rd b/man/delete_dashboard.Rd new file mode 100644 index 00000000..82ac7218 --- /dev/null +++ b/man/delete_dashboard.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dashboards.R +\name{delete_dashboard} +\alias{delete_dashboard} +\alias{dashboardsDelete} +\title{Remove a dashboard.} +\usage{ +delete_dashboard(client, dashboard_id) + +dashboardsDelete(client, dashboard_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{dashboard_id}{Required. This field has no description yet.} +} +\description{ +Moves a dashboard to the trash. Trashed dashboards do not appear in list +views or searches, and cannot be shared. +} diff --git a/man/delete_dashboard_widget.Rd b/man/delete_dashboard_widget.Rd new file mode 100644 index 00000000..7b42bb37 --- /dev/null +++ b/man/delete_dashboard_widget.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dashboard_widgets.R +\name{delete_dashboard_widget} +\alias{delete_dashboard_widget} +\alias{dashboardWidgetsDelete} +\title{Remove widget.} +\usage{ +delete_dashboard_widget(client, id) + +dashboardWidgetsDelete(client, id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Widget ID returned by :method:dashboardwidgets/create.} +} +\description{ +Remove widget. +} diff --git a/man/delete_dbfs.Rd b/man/delete_dbfs.Rd new file mode 100644 index 00000000..cdf4d971 --- /dev/null +++ b/man/delete_dbfs.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbfs.R +\name{delete_dbfs} +\alias{delete_dbfs} +\alias{dbfsDelete} +\title{Delete a file/directory.} +\usage{ +delete_dbfs(client, path, recursive = NULL) + +dbfsDelete(client, path, recursive = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{path}{Required. The path of the file or directory to delete.} + +\item{recursive}{Whether or not to recursively delete the directory's contents.} +} +\description{ +Delete the file or directory (optionally recursively delete all files in the +directory). This call throws an exception with \code{IO_ERROR} if the path is a +non-empty directory and \code{recursive} is set to \code{false} or on other similar +errors. +} +\details{ +When you delete a large number of files, the delete operation is done in +increments. The call returns a response after approximately 45 seconds with +an error message (503 Service Unavailable) asking you to re-invoke the delete +operation until the directory structure is fully deleted. + +For operations that delete more than 10K files, we discourage using the DBFS +REST API, but advise you to perform such operations in the context of a +cluster, using the \href{/dev-tools/databricks-utils.html#dbutils-fs}{File system utility (dbutils.fs)}. \code{dbutils.fs} +covers the functional scope of the DBFS REST API, but from notebooks. Running +such operations using notebooks provides better control and manageability, +such as selective deletes, and the possibility to automate periodic delete +jobs. +} diff --git a/man/delete_default_namespace.Rd b/man/delete_default_namespace.Rd new file mode 100644 index 00000000..ac19b0cc --- /dev/null +++ b/man/delete_default_namespace.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/default_namespace.R +\name{delete_default_namespace} +\alias{delete_default_namespace} +\alias{defaultNamespaceDelete} +\title{Delete the default namespace setting.} +\usage{ +delete_default_namespace(client, etag = NULL) + +defaultNamespaceDelete(client, etag = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{etag}{etag used for versioning.} +} +\description{ +Deletes the default namespace setting for the workspace. A fresh etag needs +to be provided in \code{DELETE} requests (as a query parameter). The etag can be +retrieved by making a \code{GET} request before the \code{DELETE} request. If the +setting is updated/deleted concurrently, \code{DELETE} fails with 409 and the +request must be retried by using the fresh etag in the 409 response. +} diff --git a/man/delete_experiment.Rd b/man/delete_experiment.Rd new file mode 100644 index 00000000..a1c29c79 --- /dev/null +++ b/man/delete_experiment.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{delete_experiment} +\alias{delete_experiment} +\alias{experimentsDeleteExperiment} +\title{Delete an experiment.} +\usage{ +delete_experiment(client, experiment_id) + +experimentsDeleteExperiment(client, experiment_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{experiment_id}{Required. ID of the associated experiment.} +} +\description{ +Marks an experiment and associated metadata, runs, metrics, params, and tags +for deletion. If the experiment uses FileStore, artifacts associated with +experiment are also deleted. +} diff --git a/man/delete_experiment_run.Rd b/man/delete_experiment_run.Rd new file mode 100644 index 00000000..acb07b10 --- /dev/null +++ b/man/delete_experiment_run.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{delete_experiment_run} +\alias{delete_experiment_run} +\alias{experimentsDeleteRun} +\title{Delete a run.} +\usage{ +delete_experiment_run(client, run_id) + +experimentsDeleteRun(client, run_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. ID of the run to delete.} +} +\description{ +Marks a run for deletion. +} diff --git a/man/delete_experiment_runs.Rd b/man/delete_experiment_runs.Rd new file mode 100644 index 00000000..6fb9ce9f --- /dev/null +++ b/man/delete_experiment_runs.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{delete_experiment_runs} +\alias{delete_experiment_runs} +\alias{experimentsDeleteRuns} +\title{Delete runs by creation time.} +\usage{ +delete_experiment_runs( + client, + experiment_id, + max_timestamp_millis, + max_runs = NULL +) + +experimentsDeleteRuns( + client, + experiment_id, + max_timestamp_millis, + max_runs = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{experiment_id}{Required. The ID of the experiment containing the runs to delete.} + +\item{max_timestamp_millis}{Required. The maximum creation timestamp in milliseconds since the UNIX epoch for deleting runs.} + +\item{max_runs}{An optional positive integer indicating the maximum number of runs to delete.} +} +\description{ +Bulk delete runs in an experiment that were created prior to or at the +specified timestamp. Deletes at most max_runs per request. To call this API +from a Databricks Notebook in Python, you can use the client code snippet on +https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete. +} diff --git a/man/delete_experiment_tag.Rd b/man/delete_experiment_tag.Rd new file mode 100644 index 00000000..dde52d56 --- /dev/null +++ b/man/delete_experiment_tag.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{delete_experiment_tag} +\alias{delete_experiment_tag} +\alias{experimentsDeleteTag} +\title{Delete a tag.} +\usage{ +delete_experiment_tag(client, run_id, key) + +experimentsDeleteTag(client, run_id, key) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. ID of the run that the tag was logged under.} + +\item{key}{Required. Name of the tag.} +} +\description{ +Deletes a tag on a run. Tags are run metadata that can be updated during a +run and after a run completes. +} diff --git a/man/delete_external_location.Rd b/man/delete_external_location.Rd new file mode 100644 index 00000000..a46259db --- /dev/null +++ b/man/delete_external_location.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/external_locations.R +\name{delete_external_location} +\alias{delete_external_location} +\alias{externalLocationsDelete} +\title{Delete an external location.} +\usage{ +delete_external_location(client, name, force = NULL) + +externalLocationsDelete(client, name, force = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the external location.} + +\item{force}{Force deletion even if there are dependent external tables or mounts.} +} +\description{ +Deletes the specified external location from the metastore. The caller must +be the owner of the external location. +} diff --git a/man/delete_file.Rd b/man/delete_file.Rd new file mode 100644 index 00000000..0352e686 --- /dev/null +++ b/man/delete_file.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/files.R +\name{delete_file} +\alias{delete_file} +\alias{filesDelete} +\title{Delete a file.} +\usage{ +delete_file(client, file_path) + +filesDelete(client, file_path) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{file_path}{Required. The absolute path of the file.} +} +\description{ +Deletes a file. If the request is successful, there is no response body. +} diff --git a/man/delete_file_directory.Rd b/man/delete_file_directory.Rd new file mode 100644 index 00000000..fecb8ac7 --- /dev/null +++ b/man/delete_file_directory.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/files.R +\name{delete_file_directory} +\alias{delete_file_directory} +\alias{filesDeleteDirectory} +\title{Delete a directory.} +\usage{ +delete_file_directory(client, directory_path) + +filesDeleteDirectory(client, directory_path) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{directory_path}{Required. The absolute path of a directory.} +} +\description{ +Deletes an empty directory. +} +\details{ +To delete a non-empty directory, first delete all of its contents. This can +be done by listing the directory contents and deleting each file and +subdirectory recursively. +} diff --git a/man/delete_function.Rd b/man/delete_function.Rd new file mode 100644 index 00000000..2edd2ae1 --- /dev/null +++ b/man/delete_function.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/functions.R +\name{delete_function} +\alias{delete_function} +\alias{functionsDelete} +\title{Delete a function.} +\usage{ +delete_function(client, name, force = NULL) + +functionsDelete(client, name, force = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The fully-qualified name of the function (of the form \strong{catalog_name}.\strong{schema_name}.\strong{function__name}).} + +\item{force}{Force deletion even if the function is notempty.} +} +\description{ +Deletes the function that matches the supplied name. For the deletion to +succeed, the user must satisfy one of the following conditions: - Is the +owner of the function's parent catalog - Is the owner of the function's +parent schema and have the \strong{USE_CATALOG} privilege on its parent catalog - +Is the owner of the function itself and have both the \strong{USE_CATALOG} +privilege on its parent catalog and the \strong{USE_SCHEMA} privilege on its +parent schema +} diff --git a/man/delete_git_credential.Rd b/man/delete_git_credential.Rd new file mode 100644 index 00000000..b8ea21b4 --- /dev/null +++ b/man/delete_git_credential.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/git_credentials.R +\name{delete_git_credential} +\alias{delete_git_credential} +\alias{gitCredentialsDelete} +\title{Delete a credential.} +\usage{ +delete_git_credential(client, credential_id) + +gitCredentialsDelete(client, credential_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{credential_id}{Required. The ID for the corresponding credential to access.} +} +\description{ +Deletes the specified Git credential. +} diff --git a/man/delete_global_init_script.Rd b/man/delete_global_init_script.Rd new file mode 100644 index 00000000..752c637a --- /dev/null +++ b/man/delete_global_init_script.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/global_init_scripts.R +\name{delete_global_init_script} +\alias{delete_global_init_script} +\alias{globalInitScriptsDelete} +\title{Delete init script.} +\usage{ +delete_global_init_script(client, script_id) + +globalInitScriptsDelete(client, script_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{script_id}{Required. The ID of the global init script.} +} +\description{ +Deletes a global init script. +} diff --git a/man/delete_group.Rd b/man/delete_group.Rd new file mode 100644 index 00000000..dcdebb7c --- /dev/null +++ b/man/delete_group.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/groups.R +\name{delete_group} +\alias{delete_group} +\alias{groupsDelete} +\title{Delete a group.} +\usage{ +delete_group(client, id) + +groupsDelete(client, id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Unique ID for a group in the Databricks workspace.} +} +\description{ +Deletes a group from the Databricks workspace. +} diff --git a/man/delete_instance_pool.Rd b/man/delete_instance_pool.Rd new file mode 100644 index 00000000..a314ce6c --- /dev/null +++ b/man/delete_instance_pool.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/instance_pools.R +\name{delete_instance_pool} +\alias{delete_instance_pool} +\alias{instancePoolsDelete} +\title{Delete an instance pool.} +\usage{ +delete_instance_pool(client, instance_pool_id) + +instancePoolsDelete(client, instance_pool_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{instance_pool_id}{Required. The instance pool to be terminated.} +} +\description{ +Deletes the instance pool permanently. The idle instances in the pool are +terminated asynchronously. +} diff --git a/man/delete_ip_access_list.Rd b/man/delete_ip_access_list.Rd new file mode 100644 index 00000000..f72f2ed8 --- /dev/null +++ b/man/delete_ip_access_list.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ip_access_lists.R +\name{delete_ip_access_list} +\alias{delete_ip_access_list} +\alias{ipAccessListsDelete} +\title{Delete access list.} +\usage{ +delete_ip_access_list(client, ip_access_list_id) + +ipAccessListsDelete(client, ip_access_list_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{ip_access_list_id}{Required. The ID for the corresponding IP access list.} +} +\description{ +Deletes an IP access list, specified by its list ID. +} diff --git a/man/delete_job.Rd b/man/delete_job.Rd new file mode 100644 index 00000000..b7827a3f --- /dev/null +++ b/man/delete_job.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{delete_job} +\alias{delete_job} +\alias{jobsDelete} +\title{Delete a job.} +\usage{ +delete_job(client, job_id) + +jobsDelete(client, job_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{job_id}{Required. The canonical identifier of the job to delete.} +} +\description{ +Deletes a job. +} diff --git a/man/delete_job_run.Rd b/man/delete_job_run.Rd new file mode 100644 index 00000000..fd4c06ff --- /dev/null +++ b/man/delete_job_run.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{delete_job_run} +\alias{delete_job_run} +\alias{jobsDeleteRun} +\title{Delete a job run.} +\usage{ +delete_job_run(client, run_id) + +jobsDeleteRun(client, run_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. The canonical identifier of the run for which to retrieve the metadata.} +} +\description{ +Deletes a non-active run. Returns an error if the run is active. +} diff --git a/man/delete_lakehouse_monitor.Rd b/man/delete_lakehouse_monitor.Rd new file mode 100644 index 00000000..2ea152a7 --- /dev/null +++ b/man/delete_lakehouse_monitor.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakehouse_monitors.R +\name{delete_lakehouse_monitor} +\alias{delete_lakehouse_monitor} +\alias{lakehouseMonitorsDelete} +\title{Delete a table monitor.} +\usage{ +delete_lakehouse_monitor(client, full_name) + +lakehouseMonitorsDelete(client, full_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the table.} +} +\description{ +Deletes a monitor for the specified table. +} +\details{ +The caller must either: 1. be an owner of the table's parent catalog 2. have +\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's +parent schema 3. have the following permissions: - \strong{USE_CATALOG} on the +table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - be an +owner of the table. + +Additionally, the call must be made from the workspace where the monitor was +created. + +Note that the metric tables and dashboard will not be deleted as part of this +call; those assets must be manually cleaned up (if desired). +} diff --git a/man/delete_metastore.Rd b/man/delete_metastore.Rd new file mode 100644 index 00000000..3cbe0a8b --- /dev/null +++ b/man/delete_metastore.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/metastores.R +\name{delete_metastore} +\alias{delete_metastore} +\alias{metastoresDelete} +\title{Delete a metastore.} +\usage{ +delete_metastore(client, id, force = NULL) + +metastoresDelete(client, id, force = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Unique ID of the metastore.} + +\item{force}{Force deletion even if the metastore is not empty.} +} +\description{ +Deletes a metastore. The caller must be a metastore admin. +} diff --git a/man/delete_model.Rd b/man/delete_model.Rd new file mode 100644 index 00000000..f332b79e --- /dev/null +++ b/man/delete_model.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{delete_model} +\alias{delete_model} +\alias{modelRegistryDeleteModel} +\title{Delete a model.} +\usage{ +delete_model(client, name) + +modelRegistryDeleteModel(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Registered model unique name identifier.} +} +\description{ +Deletes a registered model. +} diff --git a/man/delete_model_comment.Rd b/man/delete_model_comment.Rd new file mode 100644 index 00000000..8e238b28 --- /dev/null +++ b/man/delete_model_comment.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{delete_model_comment} +\alias{delete_model_comment} +\alias{modelRegistryDeleteComment} +\title{Delete a comment.} +\usage{ +delete_model_comment(client, id) + +modelRegistryDeleteComment(client, id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. This field has no description yet.} +} +\description{ +Deletes a comment on a model version. +} diff --git a/man/delete_model_tag.Rd b/man/delete_model_tag.Rd new file mode 100644 index 00000000..dc8ce643 --- /dev/null +++ b/man/delete_model_tag.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{delete_model_tag} +\alias{delete_model_tag} +\alias{modelRegistryDeleteModelTag} +\title{Delete a model tag.} +\usage{ +delete_model_tag(client, name, key) + +modelRegistryDeleteModelTag(client, name, key) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the registered model that the tag was logged under.} + +\item{key}{Required. Name of the tag.} +} +\description{ +Deletes the tag for a registered model. +} diff --git a/man/delete_model_transition_request.Rd b/man/delete_model_transition_request.Rd new file mode 100644 index 00000000..80ccf898 --- /dev/null +++ b/man/delete_model_transition_request.Rd @@ -0,0 +1,41 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{delete_model_transition_request} +\alias{delete_model_transition_request} +\alias{modelRegistryDeleteTransitionRequest} +\title{Delete a transition request.} +\usage{ +delete_model_transition_request( + client, + name, + version, + stage, + creator, + comment = NULL +) + +modelRegistryDeleteTransitionRequest( + client, + name, + version, + stage, + creator, + comment = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the model.} + +\item{version}{Required. Version of the model.} + +\item{stage}{Required. Target stage of the transition request.} + +\item{creator}{Required. Username of the user who created this request.} + +\item{comment}{User-provided comment on the action.} +} +\description{ +Cancels a model version stage transition request. +} diff --git a/man/delete_model_version.Rd b/man/delete_model_version.Rd new file mode 100644 index 00000000..d964e5ec --- /dev/null +++ b/man/delete_model_version.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R, R/model_versions.R +\name{delete_model_version} +\alias{delete_model_version} +\alias{modelRegistryDeleteModelVersion} +\alias{modelVersionsDelete} +\title{Delete a model version.} +\usage{ +delete_model_version(client, full_name, version) + +modelRegistryDeleteModelVersion(client, name, version) + +delete_model_version(client, full_name, version) + +modelVersionsDelete(client, full_name, version) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. The three-level (fully qualified) name of the model version.} + +\item{version}{Required. The integer version number of the model version.} + +\item{name}{Required. Name of the registered model.} +} +\description{ +Deletes a model version. + +Deletes a model version from the specified registered model. Any aliases +assigned to the model version will also be deleted. +} +\details{ +The caller must be a metastore admin or an owner of the parent registered +model. For the latter case, the caller must also be the owner or have the +\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} +privilege on the parent schema. +} diff --git a/man/delete_model_version_tag.Rd b/man/delete_model_version_tag.Rd new file mode 100644 index 00000000..e8f800f1 --- /dev/null +++ b/man/delete_model_version_tag.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{delete_model_version_tag} +\alias{delete_model_version_tag} +\alias{modelRegistryDeleteModelVersionTag} +\title{Delete a model version tag.} +\usage{ +delete_model_version_tag(client, name, version, key) + +modelRegistryDeleteModelVersionTag(client, name, version, key) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the registered model that the tag was logged under.} + +\item{version}{Required. Model version number that the tag was logged under.} + +\item{key}{Required. Name of the tag.} +} +\description{ +Deletes a model version tag. +} diff --git a/man/delete_model_webhook.Rd b/man/delete_model_webhook.Rd new file mode 100644 index 00000000..72bdc632 --- /dev/null +++ b/man/delete_model_webhook.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{delete_model_webhook} +\alias{delete_model_webhook} +\alias{modelRegistryDeleteWebhook} +\title{Delete a webhook.} +\usage{ +delete_model_webhook(client, id = NULL) + +modelRegistryDeleteWebhook(client, id = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Webhook ID required to delete a registry webhook.} +} +\description{ +\strong{NOTE:} This endpoint is in Public Preview. +} +\details{ +Deletes a registry webhook. +} diff --git a/man/delete_notebook.Rd b/man/delete_notebook.Rd new file mode 100644 index 00000000..1aa2dca7 --- /dev/null +++ b/man/delete_notebook.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{delete_notebook} +\alias{delete_notebook} +\alias{workspaceDelete} +\title{Delete a workspace object.} +\usage{ +delete_notebook(client, path, recursive = NULL) + +workspaceDelete(client, path, recursive = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{path}{Required. The absolute path of the notebook or directory.} + +\item{recursive}{The flag that specifies whether to delete the object recursively.} +} +\description{ +Deletes an object or a directory (and optionally recursively deletes all +objects in the directory). * If \code{path} does not exist, this call returns an +error \code{RESOURCE_DOES_NOT_EXIST}. * If \code{path} is a non-empty directory and +\code{recursive} is set to \code{false}, this call returns an error +\code{DIRECTORY_NOT_EMPTY}. +} +\details{ +Object deletion cannot be undone and deleting a directory recursively is not +atomic. +} diff --git a/man/delete_online_table.Rd b/man/delete_online_table.Rd new file mode 100644 index 00000000..93f43b0f --- /dev/null +++ b/man/delete_online_table.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/online_tables.R +\name{delete_online_table} +\alias{delete_online_table} +\alias{onlineTablesDelete} +\title{Delete an Online Table.} +\usage{ +delete_online_table(client, name) + +onlineTablesDelete(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Full three-part (catalog, schema, table) name of the table.} +} +\description{ +Delete an online table. Warning: This will delete all the data in the online +table. If the source Delta table was deleted or modified since this Online +Table was created, this will lose the data forever! +} diff --git a/man/delete_pipeline.Rd b/man/delete_pipeline.Rd new file mode 100644 index 00000000..3b215380 --- /dev/null +++ b/man/delete_pipeline.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{delete_pipeline} +\alias{delete_pipeline} +\alias{pipelinesDelete} +\title{Delete a pipeline.} +\usage{ +delete_pipeline(client, pipeline_id) + +pipelinesDelete(client, pipeline_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Required. This field has no description yet.} +} +\description{ +Deletes a pipeline. +} diff --git a/man/delete_provider.Rd b/man/delete_provider.Rd new file mode 100644 index 00000000..6bf61da9 --- /dev/null +++ b/man/delete_provider.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/providers.R +\name{delete_provider} +\alias{delete_provider} +\alias{providersDelete} +\title{Delete a provider.} +\usage{ +delete_provider(client, name) + +providersDelete(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the provider.} +} +\description{ +Deletes an authentication provider, if the caller is a metastore admin or is +the owner of the provider. +} diff --git a/man/delete_query.Rd b/man/delete_query.Rd new file mode 100644 index 00000000..33815d94 --- /dev/null +++ b/man/delete_query.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/queries.R +\name{delete_query} +\alias{delete_query} +\alias{queriesDelete} +\title{Delete a query.} +\usage{ +delete_query(client, query_id) + +queriesDelete(client, query_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{query_id}{Required. This field has no description yet.} +} +\description{ +Moves a query to the trash. Trashed queries immediately disappear from +searches and list views, and they cannot be used for alerts. The trash is +deleted after 30 days. +} diff --git a/man/delete_query_visualization.Rd b/man/delete_query_visualization.Rd new file mode 100644 index 00000000..c95763b0 --- /dev/null +++ b/man/delete_query_visualization.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/query_visualizations.R +\name{delete_query_visualization} +\alias{delete_query_visualization} +\alias{queryVisualizationsDelete} +\title{Remove visualization.} +\usage{ +delete_query_visualization(client, id) + +queryVisualizationsDelete(client, id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Widget ID returned by :method:queryvizualisations/create.} +} +\description{ +Remove visualization. +} diff --git a/man/delete_recipient.Rd b/man/delete_recipient.Rd new file mode 100644 index 00000000..612b81f0 --- /dev/null +++ b/man/delete_recipient.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/recipients.R +\name{delete_recipient} +\alias{delete_recipient} +\alias{recipientsDelete} +\title{Delete a share recipient.} +\usage{ +delete_recipient(client, name) + +recipientsDelete(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the recipient.} +} +\description{ +Deletes the specified recipient from the metastore. The caller must be the +owner of the recipient. +} diff --git a/man/delete_registered_model.Rd b/man/delete_registered_model.Rd new file mode 100644 index 00000000..10b5f875 --- /dev/null +++ b/man/delete_registered_model.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/registered_models.R +\name{delete_registered_model} +\alias{delete_registered_model} +\alias{registeredModelsDelete} +\title{Delete a Registered Model.} +\usage{ +delete_registered_model(client, full_name) + +registeredModelsDelete(client, full_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. The three-level (fully qualified) name of the registered model.} +} +\description{ +Deletes a registered model and all its model versions from the specified +parent catalog and schema. +} +\details{ +The caller must be a metastore admin or an owner of the registered model. For +the latter case, the caller must also be the owner or have the +\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} +privilege on the parent schema. +} diff --git a/man/delete_registered_model_alias.Rd b/man/delete_registered_model_alias.Rd new file mode 100644 index 00000000..5a18112f --- /dev/null +++ b/man/delete_registered_model_alias.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/registered_models.R +\name{delete_registered_model_alias} +\alias{delete_registered_model_alias} +\alias{registeredModelsDeleteAlias} +\title{Delete a Registered Model Alias.} +\usage{ +delete_registered_model_alias(client, full_name, alias) + +registeredModelsDeleteAlias(client, full_name, alias) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. The three-level (fully qualified) name of the registered model.} + +\item{alias}{Required. The name of the alias.} +} +\description{ +Deletes a registered model alias. +} +\details{ +The caller must be a metastore admin or an owner of the registered model. For +the latter case, the caller must also be the owner or have the +\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} +privilege on the parent schema. +} diff --git a/man/delete_repo.Rd b/man/delete_repo.Rd new file mode 100644 index 00000000..5da27611 --- /dev/null +++ b/man/delete_repo.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/repos.R +\name{delete_repo} +\alias{delete_repo} +\alias{reposDelete} +\title{Delete a repo.} +\usage{ +delete_repo(client, repo_id) + +reposDelete(client, repo_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{repo_id}{Required. The ID for the corresponding repo to access.} +} +\description{ +Deletes the specified repo. +} diff --git a/man/delete_restrict_workspace_admin.Rd b/man/delete_restrict_workspace_admin.Rd new file mode 100644 index 00000000..8f9db373 --- /dev/null +++ b/man/delete_restrict_workspace_admin.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/restrict_workspace_admins.R +\name{delete_restrict_workspace_admin} +\alias{delete_restrict_workspace_admin} +\alias{restrictWorkspaceAdminsDelete} +\title{Delete the restrict workspace admins setting.} +\usage{ +delete_restrict_workspace_admin(client, etag = NULL) + +restrictWorkspaceAdminsDelete(client, etag = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{etag}{etag used for versioning.} +} +\description{ +Reverts the restrict workspace admins setting status for the workspace. A +fresh etag needs to be provided in \code{DELETE} requests (as a query parameter). +The etag can be retrieved by making a \code{GET} request before the DELETE +request. If the setting is updated/deleted concurrently, \code{DELETE} fails with +409 and the request must be retried by using the fresh etag in the 409 +response. +} diff --git a/man/delete_schema.Rd b/man/delete_schema.Rd new file mode 100644 index 00000000..c376bfce --- /dev/null +++ b/man/delete_schema.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/schemas.R +\name{delete_schema} +\alias{delete_schema} +\alias{schemasDelete} +\title{Delete a schema.} +\usage{ +delete_schema(client, full_name) + +schemasDelete(client, full_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the schema.} +} +\description{ +Deletes the specified schema from the parent catalog. The caller must be the +owner of the schema or an owner of the parent catalog. +} diff --git a/man/delete_secret.Rd b/man/delete_secret.Rd new file mode 100644 index 00000000..82a7ed2a --- /dev/null +++ b/man/delete_secret.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/secrets.R +\name{delete_secret} +\alias{delete_secret} +\alias{secretsDeleteSecret} +\title{Delete a secret.} +\usage{ +delete_secret(client, scope, key) + +secretsDeleteSecret(client, scope, key) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{scope}{Required. The name of the scope that contains the secret to delete.} + +\item{key}{Required. Name of the secret to delete.} +} +\description{ +Deletes the secret stored in this secret scope. You must have \code{WRITE} or +\code{MANAGE} permission on the secret scope. +} +\details{ +Throws \code{RESOURCE_DOES_NOT_EXIST} if no such secret scope or secret exists. +Throws \code{PERMISSION_DENIED} if the user does not have permission to make this +API call. +} diff --git a/man/delete_secret_acl.Rd b/man/delete_secret_acl.Rd new file mode 100644 index 00000000..2ae8e954 --- /dev/null +++ b/man/delete_secret_acl.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/secrets.R +\name{delete_secret_acl} +\alias{delete_secret_acl} +\alias{secretsDeleteAcl} +\title{Delete an ACL.} +\usage{ +delete_secret_acl(client, scope, principal) + +secretsDeleteAcl(client, scope, principal) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{scope}{Required. The name of the scope to remove permissions from.} + +\item{principal}{Required. The principal to remove an existing ACL from.} +} +\description{ +Deletes the given ACL on the given scope. +} +\details{ +Users must have the \code{MANAGE} permission to invoke this API. Throws +\code{RESOURCE_DOES_NOT_EXIST} if no such secret scope, principal, or ACL exists. +Throws \code{PERMISSION_DENIED} if the user does not have permission to make this +API call. +} diff --git a/man/delete_secret_scope.Rd b/man/delete_secret_scope.Rd new file mode 100644 index 00000000..8f08b48a --- /dev/null +++ b/man/delete_secret_scope.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/secrets.R +\name{delete_secret_scope} +\alias{delete_secret_scope} +\alias{secretsDeleteScope} +\title{Delete a secret scope.} +\usage{ +delete_secret_scope(client, scope) + +secretsDeleteScope(client, scope) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{scope}{Required. Name of the scope to delete.} +} +\description{ +Deletes a secret scope. +} +\details{ +Throws \code{RESOURCE_DOES_NOT_EXIST} if the scope does not exist. Throws +\code{PERMISSION_DENIED} if the user does not have permission to make this API +call. +} diff --git a/man/delete_service_principal.Rd b/man/delete_service_principal.Rd new file mode 100644 index 00000000..b6231ef4 --- /dev/null +++ b/man/delete_service_principal.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/service_principals.R +\name{delete_service_principal} +\alias{delete_service_principal} +\alias{servicePrincipalsDelete} +\title{Delete a service principal.} +\usage{ +delete_service_principal(client, id) + +servicePrincipalsDelete(client, id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Unique ID for a service principal in the Databricks workspace.} +} +\description{ +Delete a single service principal in the Databricks workspace. +} diff --git a/man/delete_serving_endpoint.Rd b/man/delete_serving_endpoint.Rd new file mode 100644 index 00000000..d05e212a --- /dev/null +++ b/man/delete_serving_endpoint.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{delete_serving_endpoint} +\alias{delete_serving_endpoint} +\alias{servingEndpointsDelete} +\title{Delete a serving endpoint.} +\usage{ +delete_serving_endpoint(client, name) + +servingEndpointsDelete(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint.} +} +\description{ +Delete a serving endpoint. +} diff --git a/man/delete_share.Rd b/man/delete_share.Rd new file mode 100644 index 00000000..71db97f6 --- /dev/null +++ b/man/delete_share.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/shares.R +\name{delete_share} +\alias{delete_share} +\alias{sharesDelete} +\title{Delete a share.} +\usage{ +delete_share(client, name) + +sharesDelete(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the share.} +} +\description{ +Deletes a data object share from the metastore. The caller must be an owner +of the share. +} diff --git a/man/delete_storage_credential.Rd b/man/delete_storage_credential.Rd new file mode 100644 index 00000000..3a29e63c --- /dev/null +++ b/man/delete_storage_credential.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/storage_credentials.R +\name{delete_storage_credential} +\alias{delete_storage_credential} +\alias{storageCredentialsDelete} +\title{Delete a credential.} +\usage{ +delete_storage_credential(client, name, force = NULL) + +storageCredentialsDelete(client, name, force = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the storage credential.} + +\item{force}{Force deletion even if there are dependent external locations or external tables.} +} +\description{ +Deletes a storage credential from the metastore. The caller must be an owner +of the storage credential. +} diff --git a/man/delete_table.Rd b/man/delete_table.Rd new file mode 100644 index 00000000..6e3b931a --- /dev/null +++ b/man/delete_table.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tables.R +\name{delete_table} +\alias{delete_table} +\alias{tablesDelete} +\title{Delete a table.} +\usage{ +delete_table(client, full_name) + +tablesDelete(client, full_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the table.} +} +\description{ +Deletes a table from the specified parent catalog and schema. The caller must +be the owner of the parent catalog, have the \strong{USE_CATALOG} privilege on the +parent catalog and be the owner of the parent schema, or be the owner of the +table and have the \strong{USE_CATALOG} privilege on the parent catalog and the +\strong{USE_SCHEMA} privilege on the parent schema. +} diff --git a/man/delete_table_constraint.Rd b/man/delete_table_constraint.Rd new file mode 100644 index 00000000..ccc8b910 --- /dev/null +++ b/man/delete_table_constraint.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/table_constraints.R +\name{delete_table_constraint} +\alias{delete_table_constraint} +\alias{tableConstraintsDelete} +\title{Delete a table constraint.} +\usage{ +delete_table_constraint(client, full_name, constraint_name, cascade) + +tableConstraintsDelete(client, full_name, constraint_name, cascade) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the table referenced by the constraint.} + +\item{constraint_name}{Required. The name of the constraint to delete.} + +\item{cascade}{Required. If true, try deleting all child constraints of the current constraint.} +} +\description{ +Deletes a table constraint. +} +\details{ +For the table constraint deletion to succeed, the user must satisfy both of +these conditions: - the user must have the \strong{USE_CATALOG} privilege on the +table's parent catalog, the \strong{USE_SCHEMA} privilege on the table's parent +schema, and be the owner of the table. - if \strong{cascade} argument is \strong{true}, +the user must have the following permissions on all of the child tables: the +\strong{USE_CATALOG} privilege on the table's catalog, the \strong{USE_SCHEMA} +privilege on the table's schema, and be the owner of the table. +} diff --git a/man/delete_token.Rd b/man/delete_token.Rd new file mode 100644 index 00000000..8a745829 --- /dev/null +++ b/man/delete_token.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tokens.R +\name{delete_token} +\alias{delete_token} +\alias{tokensDelete} +\title{Revoke token.} +\usage{ +delete_token(client, token_id) + +tokensDelete(client, token_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{token_id}{Required. The ID of the token to be revoked.} +} +\description{ +Revokes an access token. +} +\details{ +If a token with the specified ID is not valid, this call returns an error +\strong{RESOURCE_DOES_NOT_EXIST}. +} diff --git a/man/delete_token_management.Rd b/man/delete_token_management.Rd new file mode 100644 index 00000000..0004db41 --- /dev/null +++ b/man/delete_token_management.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token_management.R +\name{delete_token_management} +\alias{delete_token_management} +\alias{tokenManagementDelete} +\title{Delete a token.} +\usage{ +delete_token_management(client, token_id) + +tokenManagementDelete(client, token_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{token_id}{Required. The ID of the token to get.} +} +\description{ +Deletes a token, specified by its ID. +} diff --git a/man/delete_user.Rd b/man/delete_user.Rd new file mode 100644 index 00000000..3b7d388d --- /dev/null +++ b/man/delete_user.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/users.R +\name{delete_user} +\alias{delete_user} +\alias{usersDelete} +\title{Delete a user.} +\usage{ +delete_user(client, id) + +usersDelete(client, id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Unique ID for a user in the Databricks workspace.} +} +\description{ +Deletes a user. Deleting a user from a Databricks workspace also removes +objects associated with the user. +} diff --git a/man/delete_vector_search_endpoint.Rd b/man/delete_vector_search_endpoint.Rd new file mode 100644 index 00000000..13107169 --- /dev/null +++ b/man/delete_vector_search_endpoint.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_endpoints.R +\name{delete_vector_search_endpoint} +\alias{delete_vector_search_endpoint} +\alias{vectorSearchEndpointsDeleteEndpoint} +\title{Delete an endpoint.} +\usage{ +delete_vector_search_endpoint(client, endpoint_name) + +vectorSearchEndpointsDeleteEndpoint(client, endpoint_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{endpoint_name}{Required. Name of the endpoint.} +} +\description{ +Delete an endpoint. +} diff --git a/man/delete_vector_search_index.Rd b/man/delete_vector_search_index.Rd new file mode 100644 index 00000000..f18e5b7e --- /dev/null +++ b/man/delete_vector_search_index.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_indexes.R +\name{delete_vector_search_index} +\alias{delete_vector_search_index} +\alias{vectorSearchIndexesDeleteIndex} +\title{Delete an index.} +\usage{ +delete_vector_search_index(client, index_name) + +vectorSearchIndexesDeleteIndex(client, index_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{index_name}{Required. Name of the index.} +} +\description{ +Delete an index. +} diff --git a/man/delete_vector_search_index_data.Rd b/man/delete_vector_search_index_data.Rd new file mode 100644 index 00000000..b3963719 --- /dev/null +++ b/man/delete_vector_search_index_data.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_indexes.R +\name{delete_vector_search_index_data} +\alias{delete_vector_search_index_data} +\alias{vectorSearchIndexesDeleteDataVectorIndex} +\title{Delete data from index.} +\usage{ +delete_vector_search_index_data(client, index_name, primary_keys) + +vectorSearchIndexesDeleteDataVectorIndex(client, index_name, primary_keys) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{index_name}{Required. Name of the vector index where data is to be deleted.} + +\item{primary_keys}{Required. List of primary keys for the data to be deleted.} +} +\description{ +Handles the deletion of data from a specified vector index. +} diff --git a/man/delete_volume.Rd b/man/delete_volume.Rd new file mode 100644 index 00000000..cc22f275 --- /dev/null +++ b/man/delete_volume.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/volumes.R +\name{delete_volume} +\alias{delete_volume} +\alias{volumesDelete} +\title{Delete a Volume.} +\usage{ +delete_volume(client, name) + +volumesDelete(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The three-level (fully qualified) name of the volume.} +} +\description{ +Deletes a volume from the specified parent catalog and schema. +} +\details{ +The caller must be a metastore admin or an owner of the volume. For the +latter case, the caller must also be the owner or have the \strong{USE_CATALOG} +privilege on the parent catalog and the \strong{USE_SCHEMA} privilege on the +parent schema. +} diff --git a/man/delete_warehouse.Rd b/man/delete_warehouse.Rd new file mode 100644 index 00000000..1b96f287 --- /dev/null +++ b/man/delete_warehouse.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{delete_warehouse} +\alias{delete_warehouse} +\alias{warehousesDelete} +\title{Delete a warehouse.} +\usage{ +delete_warehouse(client, id) + +warehousesDelete(client, id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Required.} +} +\description{ +Deletes a SQL warehouse. +} diff --git a/man/destroy_command_execution.Rd b/man/destroy_command_execution.Rd new file mode 100644 index 00000000..a501fedf --- /dev/null +++ b/man/destroy_command_execution.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/command_execution.R +\name{destroy_command_execution} +\alias{destroy_command_execution} +\alias{commandExecutionDestroy} +\title{Delete an execution context.} +\usage{ +destroy_command_execution(client, cluster_id, context_id) + +commandExecutionDestroy(client, cluster_id, context_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. This field has no description yet.} + +\item{context_id}{Required. This field has no description yet.} +} +\description{ +Deletes an execution context. +} diff --git a/man/disable_system_schema.Rd b/man/disable_system_schema.Rd new file mode 100644 index 00000000..b877ceb7 --- /dev/null +++ b/man/disable_system_schema.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/system_schemas.R +\name{disable_system_schema} +\alias{disable_system_schema} +\alias{systemSchemasDisable} +\title{Disable a system schema.} +\usage{ +disable_system_schema(client, metastore_id, schema_name) + +systemSchemasDisable(client, metastore_id, schema_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{metastore_id}{Required. The metastore ID under which the system schema lives.} + +\item{schema_name}{Required. Full name of the system schema.} +} +\description{ +Disables the system schema and removes it from the system catalog. The caller +must be an account admin or a metastore admin. +} diff --git a/man/download_file.Rd b/man/download_file.Rd new file mode 100644 index 00000000..c6a043fb --- /dev/null +++ b/man/download_file.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/files.R +\name{download_file} +\alias{download_file} +\alias{filesDownload} +\title{Download a file.} +\usage{ +download_file(client, file_path) + +filesDownload(client, file_path) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{file_path}{Required. The absolute path of the file.} +} +\description{ +Downloads a file of up to 5 GiB. The file contents are the response body. +This is a standard HTTP file download, not a JSON RPC. +} diff --git a/man/edit_cluster.Rd b/man/edit_cluster.Rd new file mode 100644 index 00000000..b87242f7 --- /dev/null +++ b/man/edit_cluster.Rd @@ -0,0 +1,149 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{edit_cluster} +\alias{edit_cluster} +\alias{clustersEdit} +\title{Update cluster configuration.} +\usage{ +edit_cluster( + client, + cluster_id, + spark_version, + apply_policy_default_values = NULL, + autoscale = NULL, + autotermination_minutes = NULL, + aws_attributes = NULL, + azure_attributes = NULL, + cluster_log_conf = NULL, + cluster_name = NULL, + cluster_source = NULL, + custom_tags = NULL, + data_security_mode = NULL, + docker_image = NULL, + driver_instance_pool_id = NULL, + driver_node_type_id = NULL, + enable_elastic_disk = NULL, + enable_local_disk_encryption = NULL, + gcp_attributes = NULL, + init_scripts = NULL, + instance_pool_id = NULL, + node_type_id = NULL, + num_workers = NULL, + policy_id = NULL, + runtime_engine = NULL, + single_user_name = NULL, + spark_conf = NULL, + spark_env_vars = NULL, + ssh_public_keys = NULL, + workload_type = NULL +) + +clustersEdit( + client, + cluster_id, + spark_version, + apply_policy_default_values = NULL, + autoscale = NULL, + autotermination_minutes = NULL, + aws_attributes = NULL, + azure_attributes = NULL, + cluster_log_conf = NULL, + cluster_name = NULL, + cluster_source = NULL, + custom_tags = NULL, + data_security_mode = NULL, + docker_image = NULL, + driver_instance_pool_id = NULL, + driver_node_type_id = NULL, + enable_elastic_disk = NULL, + enable_local_disk_encryption = NULL, + gcp_attributes = NULL, + init_scripts = NULL, + instance_pool_id = NULL, + node_type_id = NULL, + num_workers = NULL, + policy_id = NULL, + runtime_engine = NULL, + single_user_name = NULL, + spark_conf = NULL, + spark_env_vars = NULL, + ssh_public_keys = NULL, + workload_type = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. ID of the cluser.} + +\item{spark_version}{Required. The Spark version of the cluster, e.g.} + +\item{apply_policy_default_values}{This field has no description yet.} + +\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} + +\item{autotermination_minutes}{Automatically terminates the cluster after it is inactive for this time in minutes.} + +\item{aws_attributes}{Attributes related to clusters running on Amazon Web Services.} + +\item{azure_attributes}{Attributes related to clusters running on Microsoft Azure.} + +\item{cluster_log_conf}{The configuration for delivering spark logs to a long-term storage destination.} + +\item{cluster_name}{Cluster name requested by the user.} + +\item{cluster_source}{Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.} + +\item{custom_tags}{Additional tags for cluster resources.} + +\item{data_security_mode}{Data security mode decides what data governance model to use when accessing data from a cluster.} + +\item{docker_image}{This field has no description yet.} + +\item{driver_instance_pool_id}{The optional ID of the instance pool for the driver of the cluster belongs.} + +\item{driver_node_type_id}{The node type of the Spark driver.} + +\item{enable_elastic_disk}{Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space.} + +\item{enable_local_disk_encryption}{Whether to enable LUKS on cluster VMs' local disks.} + +\item{gcp_attributes}{Attributes related to clusters running on Google Cloud Platform.} + +\item{init_scripts}{The configuration for storing init scripts.} + +\item{instance_pool_id}{The optional ID of the instance pool to which the cluster belongs.} + +\item{node_type_id}{This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} + +\item{num_workers}{Number of worker nodes that this cluster should have.} + +\item{policy_id}{The ID of the cluster policy used to create the cluster if applicable.} + +\item{runtime_engine}{Decides which runtime engine to be use, e.g.} + +\item{single_user_name}{Single user name if data_security_mode is \code{SINGLE_USER}.} + +\item{spark_conf}{An object containing a set of optional, user-specified Spark configuration key-value pairs.} + +\item{spark_env_vars}{An object containing a set of optional, user-specified environment variable key-value pairs.} + +\item{ssh_public_keys}{SSH public key contents that will be added to each Spark node in this cluster.} + +\item{workload_type}{This field has no description yet.} +} +\description{ +Updates the configuration of a cluster to match the provided attributes and +size. A cluster can be updated if it is in a \code{RUNNING} or \code{TERMINATED} state. +} +\details{ +If a cluster is updated while in a \code{RUNNING} state, it will be restarted so +that the new attributes can take effect. + +If a cluster is updated while in a \code{TERMINATED} state, it will remain +\code{TERMINATED}. The next time it is started using the \code{clusters/start} API, the +new attributes will take effect. Any attempt to update a cluster in any other +state will be rejected with an \code{INVALID_STATE} error code. + +Clusters created by the Databricks Jobs service cannot be edited. +} diff --git a/man/edit_cluster_and_wait.Rd b/man/edit_cluster_and_wait.Rd new file mode 100644 index 00000000..e0fdc30c --- /dev/null +++ b/man/edit_cluster_and_wait.Rd @@ -0,0 +1,126 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{edit_cluster_and_wait} +\alias{edit_cluster_and_wait} +\title{Update cluster configuration.} +\usage{ +edit_cluster_and_wait( + client, + cluster_id, + spark_version, + apply_policy_default_values = NULL, + autoscale = NULL, + autotermination_minutes = NULL, + aws_attributes = NULL, + azure_attributes = NULL, + cluster_log_conf = NULL, + cluster_name = NULL, + cluster_source = NULL, + custom_tags = NULL, + data_security_mode = NULL, + docker_image = NULL, + driver_instance_pool_id = NULL, + driver_node_type_id = NULL, + enable_elastic_disk = NULL, + enable_local_disk_encryption = NULL, + gcp_attributes = NULL, + init_scripts = NULL, + instance_pool_id = NULL, + node_type_id = NULL, + num_workers = NULL, + policy_id = NULL, + runtime_engine = NULL, + single_user_name = NULL, + spark_conf = NULL, + spark_env_vars = NULL, + ssh_public_keys = NULL, + workload_type = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. ID of the cluser.} + +\item{spark_version}{Required. The Spark version of the cluster, e.g.} + +\item{apply_policy_default_values}{This field has no description yet.} + +\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} + +\item{autotermination_minutes}{Automatically terminates the cluster after it is inactive for this time in minutes.} + +\item{aws_attributes}{Attributes related to clusters running on Amazon Web Services.} + +\item{azure_attributes}{Attributes related to clusters running on Microsoft Azure.} + +\item{cluster_log_conf}{The configuration for delivering spark logs to a long-term storage destination.} + +\item{cluster_name}{Cluster name requested by the user.} + +\item{cluster_source}{Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request.} + +\item{custom_tags}{Additional tags for cluster resources.} + +\item{data_security_mode}{Data security mode decides what data governance model to use when accessing data from a cluster.} + +\item{docker_image}{This field has no description yet.} + +\item{driver_instance_pool_id}{The optional ID of the instance pool for the driver of the cluster belongs.} + +\item{driver_node_type_id}{The node type of the Spark driver.} + +\item{enable_elastic_disk}{Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space.} + +\item{enable_local_disk_encryption}{Whether to enable LUKS on cluster VMs' local disks.} + +\item{gcp_attributes}{Attributes related to clusters running on Google Cloud Platform.} + +\item{init_scripts}{The configuration for storing init scripts.} + +\item{instance_pool_id}{The optional ID of the instance pool to which the cluster belongs.} + +\item{node_type_id}{This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} + +\item{num_workers}{Number of worker nodes that this cluster should have.} + +\item{policy_id}{The ID of the cluster policy used to create the cluster if applicable.} + +\item{runtime_engine}{Decides which runtime engine to be use, e.g.} + +\item{single_user_name}{Single user name if data_security_mode is \code{SINGLE_USER}.} + +\item{spark_conf}{An object containing a set of optional, user-specified Spark configuration key-value pairs.} + +\item{spark_env_vars}{An object containing a set of optional, user-specified environment variable key-value pairs.} + +\item{ssh_public_keys}{SSH public key contents that will be added to each Spark node in this cluster.} + +\item{workload_type}{This field has no description yet.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Clusters on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Clusters is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Updates the configuration of a cluster to match the provided attributes and +size. A cluster can be updated if it is in a \code{RUNNING} or \code{TERMINATED} state. + +If a cluster is updated while in a \code{RUNNING} state, it will be restarted so +that the new attributes can take effect. + +If a cluster is updated while in a \code{TERMINATED} state, it will remain +\code{TERMINATED}. The next time it is started using the \code{clusters/start} API, the +new attributes will take effect. Any attempt to update a cluster in any other +state will be rejected with an \code{INVALID_STATE} error code. + +Clusters created by the Databricks Jobs service cannot be edited. +} diff --git a/man/edit_cluster_policy.Rd b/man/edit_cluster_policy.Rd new file mode 100644 index 00000000..f8471d1f --- /dev/null +++ b/man/edit_cluster_policy.Rd @@ -0,0 +1,54 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/cluster_policies.R +\name{edit_cluster_policy} +\alias{edit_cluster_policy} +\alias{clusterPoliciesEdit} +\title{Update a cluster policy.} +\usage{ +edit_cluster_policy( + client, + policy_id, + name, + definition = NULL, + description = NULL, + libraries = NULL, + max_clusters_per_user = NULL, + policy_family_definition_overrides = NULL, + policy_family_id = NULL +) + +clusterPoliciesEdit( + client, + policy_id, + name, + definition = NULL, + description = NULL, + libraries = NULL, + max_clusters_per_user = NULL, + policy_family_definition_overrides = NULL, + policy_family_id = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{policy_id}{Required. The ID of the policy to update.} + +\item{name}{Required. Cluster Policy name requested by the user.} + +\item{definition}{Policy definition document expressed in \href{https://docs.databricks.com/administration-guide/clusters/policy-definition.html}{Databricks Cluster Policy Definition Language}.} + +\item{description}{Additional human-readable description of the cluster policy.} + +\item{libraries}{A list of libraries to be installed on the next cluster restart that uses this policy.} + +\item{max_clusters_per_user}{Max number of clusters per user that can be active using this policy.} + +\item{policy_family_definition_overrides}{Policy definition JSON document expressed in \href{https://docs.databricks.com/administration-guide/clusters/policy-definition.html}{Databricks Policy Definition Language}.} + +\item{policy_family_id}{ID of the policy family.} +} +\description{ +Update an existing policy for cluster. This operation may make some clusters +governed by the previous policy invalid. +} diff --git a/man/edit_instance_pool.Rd b/man/edit_instance_pool.Rd new file mode 100644 index 00000000..bfbc41a6 --- /dev/null +++ b/man/edit_instance_pool.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/instance_pools.R +\name{edit_instance_pool} +\alias{edit_instance_pool} +\alias{instancePoolsEdit} +\title{Edit an existing instance pool.} +\usage{ +edit_instance_pool( + client, + instance_pool_id, + instance_pool_name, + node_type_id, + custom_tags = NULL, + idle_instance_autotermination_minutes = NULL, + max_capacity = NULL, + min_idle_instances = NULL +) + +instancePoolsEdit( + client, + instance_pool_id, + instance_pool_name, + node_type_id, + custom_tags = NULL, + idle_instance_autotermination_minutes = NULL, + max_capacity = NULL, + min_idle_instances = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{instance_pool_id}{Required. Instance pool ID.} + +\item{instance_pool_name}{Required. Pool name requested by the user.} + +\item{node_type_id}{Required. This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} + +\item{custom_tags}{Additional tags for pool resources.} + +\item{idle_instance_autotermination_minutes}{Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.} + +\item{max_capacity}{Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.} + +\item{min_idle_instances}{Minimum number of idle instances to keep in the instance pool.} +} +\description{ +Modifies the configuration of an existing instance pool. +} diff --git a/man/edit_instance_profile.Rd b/man/edit_instance_profile.Rd new file mode 100644 index 00000000..bb683ec9 --- /dev/null +++ b/man/edit_instance_profile.Rd @@ -0,0 +1,46 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/instance_profiles.R +\name{edit_instance_profile} +\alias{edit_instance_profile} +\alias{instanceProfilesEdit} +\title{Edit an instance profile.} +\usage{ +edit_instance_profile( + client, + instance_profile_arn, + iam_role_arn = NULL, + is_meta_instance_profile = NULL +) + +instanceProfilesEdit( + client, + instance_profile_arn, + iam_role_arn = NULL, + is_meta_instance_profile = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{instance_profile_arn}{Required. The AWS ARN of the instance profile to register with Databricks.} + +\item{iam_role_arn}{The AWS IAM role ARN of the role associated with the instance profile.} + +\item{is_meta_instance_profile}{Boolean flag indicating whether the instance profile should only be used in credential passthrough scenarios.} +} +\description{ +The only supported field to change is the optional IAM role ARN associated +with the instance profile. It is required to specify the IAM role ARN if both +of the following are true: +} +\details{ +\itemize{ +\item Your role name and instance profile name do not match. The name is the part +after the last slash in each ARN. * You want to use the instance profile with +\href{https://docs.databricks.com/sql/admin/serverless.html}{Databricks SQL Serverless}. +} + +To understand where these fields are in the AWS console, see \href{https://docs.databricks.com/sql/admin/serverless.html}{Enable serverless SQL warehouses}. + +This API is only available to admin users. +} diff --git a/man/edit_warehouse.Rd b/man/edit_warehouse.Rd new file mode 100644 index 00000000..6d163b91 --- /dev/null +++ b/man/edit_warehouse.Rd @@ -0,0 +1,77 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{edit_warehouse} +\alias{edit_warehouse} +\alias{warehousesEdit} +\title{Update a warehouse.} +\usage{ +edit_warehouse( + client, + id, + auto_stop_mins = NULL, + channel = NULL, + cluster_size = NULL, + creator_name = NULL, + enable_photon = NULL, + enable_serverless_compute = NULL, + instance_profile_arn = NULL, + max_num_clusters = NULL, + min_num_clusters = NULL, + name = NULL, + spot_instance_policy = NULL, + tags = NULL, + warehouse_type = NULL +) + +warehousesEdit( + client, + id, + auto_stop_mins = NULL, + channel = NULL, + cluster_size = NULL, + creator_name = NULL, + enable_photon = NULL, + enable_serverless_compute = NULL, + instance_profile_arn = NULL, + max_num_clusters = NULL, + min_num_clusters = NULL, + name = NULL, + spot_instance_policy = NULL, + tags = NULL, + warehouse_type = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Required.} + +\item{auto_stop_mins}{The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped.} + +\item{channel}{Channel Details.} + +\item{cluster_size}{Size of the clusters allocated for this warehouse.} + +\item{creator_name}{warehouse creator name.} + +\item{enable_photon}{Configures whether the warehouse should use Photon optimized clusters.} + +\item{enable_serverless_compute}{Configures whether the warehouse should use serverless compute.} + +\item{instance_profile_arn}{Deprecated.} + +\item{max_num_clusters}{Maximum number of clusters that the autoscaler will create to handle concurrent queries.} + +\item{min_num_clusters}{Minimum number of available clusters that will be maintained for this SQL warehouse.} + +\item{name}{Logical name for the cluster.} + +\item{spot_instance_policy}{Configurations whether the warehouse should use spot instances.} + +\item{tags}{A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse.} + +\item{warehouse_type}{Warehouse type: \code{PRO} or \code{CLASSIC}.} +} +\description{ +Updates the configuration for a SQL warehouse. +} diff --git a/man/edit_warehouse_and_wait.Rd b/man/edit_warehouse_and_wait.Rd new file mode 100644 index 00000000..7bc844f2 --- /dev/null +++ b/man/edit_warehouse_and_wait.Rd @@ -0,0 +1,70 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{edit_warehouse_and_wait} +\alias{edit_warehouse_and_wait} +\title{Update a warehouse.} +\usage{ +edit_warehouse_and_wait( + client, + id, + auto_stop_mins = NULL, + channel = NULL, + cluster_size = NULL, + creator_name = NULL, + enable_photon = NULL, + enable_serverless_compute = NULL, + instance_profile_arn = NULL, + max_num_clusters = NULL, + min_num_clusters = NULL, + name = NULL, + spot_instance_policy = NULL, + tags = NULL, + warehouse_type = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Required.} + +\item{auto_stop_mins}{The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped.} + +\item{channel}{Channel Details.} + +\item{cluster_size}{Size of the clusters allocated for this warehouse.} + +\item{creator_name}{warehouse creator name.} + +\item{enable_photon}{Configures whether the warehouse should use Photon optimized clusters.} + +\item{enable_serverless_compute}{Configures whether the warehouse should use serverless compute.} + +\item{instance_profile_arn}{Deprecated.} + +\item{max_num_clusters}{Maximum number of clusters that the autoscaler will create to handle concurrent queries.} + +\item{min_num_clusters}{Minimum number of available clusters that will be maintained for this SQL warehouse.} + +\item{name}{Logical name for the cluster.} + +\item{spot_instance_policy}{Configurations whether the warehouse should use spot instances.} + +\item{tags}{A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse.} + +\item{warehouse_type}{Warehouse type: \code{PRO} or \code{CLASSIC}.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Warehouses on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Warehouses is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Updates the configuration for a SQL warehouse. +} diff --git a/man/enable_system_schema.Rd b/man/enable_system_schema.Rd new file mode 100644 index 00000000..506e297b --- /dev/null +++ b/man/enable_system_schema.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/system_schemas.R +\name{enable_system_schema} +\alias{enable_system_schema} +\alias{systemSchemasEnable} +\title{Enable a system schema.} +\usage{ +enable_system_schema(client, metastore_id, schema_name) + +systemSchemasEnable(client, metastore_id, schema_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{metastore_id}{Required. The metastore ID under which the system schema lives.} + +\item{schema_name}{Required. Full name of the system schema.} +} +\description{ +Enables the system schema and adds it to the system catalog. The caller must +be an account admin or a metastore admin. +} diff --git a/man/esmEnablementGet.Rd b/man/esmEnablementGet.Rd deleted file mode 100644 index d5a287de..00000000 --- a/man/esmEnablementGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/esm_enablement.R -\name{esmEnablementGet} -\alias{esmEnablementGet} -\title{Get the enhanced security monitoring setting.} -\usage{ -esmEnablementGet(client, etag = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{etag}{etag used for versioning.} -} -\description{ -Gets the enhanced security monitoring setting. -} diff --git a/man/esmEnablementUpdate.Rd b/man/esmEnablementUpdate.Rd deleted file mode 100644 index 6ab4529d..00000000 --- a/man/esmEnablementUpdate.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/esm_enablement.R -\name{esmEnablementUpdate} -\alias{esmEnablementUpdate} -\title{Update the enhanced security monitoring setting.} -\usage{ -esmEnablementUpdate(client, allow_missing, setting, field_mask) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{allow_missing}{Required. This should always be set to true for Settings API.} - -\item{setting}{Required. This field has no description yet.} - -\item{field_mask}{Required. Field mask is required to be passed into the PATCH request.} -} -\description{ -Updates the enhanced security monitoring setting for the workspace. A fresh -etag needs to be provided in \code{PATCH} requests (as part of the setting field). -The etag can be retrieved by making a \code{GET} request before the \code{PATCH} -request. If the setting is updated concurrently, \code{PATCH} fails with 409 and -the request must be retried by using the fresh etag in the 409 response. -} diff --git a/man/events_cluster.Rd b/man/events_cluster.Rd new file mode 100644 index 00000000..3c0bbd17 --- /dev/null +++ b/man/events_cluster.Rd @@ -0,0 +1,54 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{events_cluster} +\alias{events_cluster} +\alias{clustersEvents} +\title{List cluster activity events.} +\usage{ +events_cluster( + client, + cluster_id, + end_time = NULL, + event_types = NULL, + limit = NULL, + offset = NULL, + order = NULL, + start_time = NULL +) + +clustersEvents( + client, + cluster_id, + end_time = NULL, + event_types = NULL, + limit = NULL, + offset = NULL, + order = NULL, + start_time = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The ID of the cluster to retrieve events about.} + +\item{end_time}{The end time in epoch milliseconds.} + +\item{event_types}{An optional set of event types to filter on.} + +\item{limit}{The maximum number of events to include in a page of events.} + +\item{offset}{The offset in the result set.} + +\item{order}{The order to list events in; either 'ASC' or 'DESC'.} + +\item{start_time}{The start time in epoch milliseconds.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Retrieves a list of events about the activity of a cluster. This API is +paginated. If there are more events to read, the response includes all the +nparameters necessary to request the next page of events. +} diff --git a/man/exchange_credentials_manager_token.Rd b/man/exchange_credentials_manager_token.Rd new file mode 100644 index 00000000..017fa476 --- /dev/null +++ b/man/exchange_credentials_manager_token.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/credentials_manager.R +\name{exchange_credentials_manager_token} +\alias{exchange_credentials_manager_token} +\alias{credentialsManagerExchangeToken} +\title{Exchange token.} +\usage{ +exchange_credentials_manager_token(client, partition_id, token_type, scopes) + +credentialsManagerExchangeToken(client, partition_id, token_type, scopes) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{partition_id}{Required. The partition of Credentials store.} + +\item{token_type}{Required. A list of token types being requested.} + +\item{scopes}{Required. Array of scopes for the token request.} +} +\description{ +Exchange tokens with an Identity Provider to get a new access token. It +allows specifying scopes to determine token permissions. +} diff --git a/man/execute_command.Rd b/man/execute_command.Rd new file mode 100644 index 00000000..e921d889 --- /dev/null +++ b/man/execute_command.Rd @@ -0,0 +1,42 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/command_execution.R +\name{execute_command} +\alias{execute_command} +\alias{commandExecutionExecute} +\title{Run a command.} +\usage{ +execute_command( + client, + cluster_id = NULL, + command = NULL, + context_id = NULL, + language = NULL +) + +commandExecutionExecute( + client, + cluster_id = NULL, + command = NULL, + context_id = NULL, + language = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Running cluster id.} + +\item{command}{Executable code.} + +\item{context_id}{Running context id.} + +\item{language}{This field has no description yet.} +} +\description{ +Runs a cluster command in the given execution context, using the provided +language. +} +\details{ +If successful, it returns an ID for tracking the status of the command's +execution. +} diff --git a/man/execute_command_and_wait.Rd b/man/execute_command_and_wait.Rd new file mode 100644 index 00000000..b12b2142 --- /dev/null +++ b/man/execute_command_and_wait.Rd @@ -0,0 +1,44 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/command_execution.R +\name{execute_command_and_wait} +\alias{execute_command_and_wait} +\title{Run a command.} +\usage{ +execute_command_and_wait( + client, + cluster_id = NULL, + command = NULL, + context_id = NULL, + language = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Running cluster id.} + +\item{command}{Executable code.} + +\item{context_id}{Running context id.} + +\item{language}{This field has no description yet.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Command Execution on Databricks reach +Finished or Error state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Command Execution is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Runs a cluster command in the given execution context, using the provided +language. + +If successful, it returns an ID for tracking the status of the command's +execution. +} diff --git a/man/execute_statement.Rd b/man/execute_statement.Rd new file mode 100644 index 00000000..05e2f486 --- /dev/null +++ b/man/execute_statement.Rd @@ -0,0 +1,65 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/statement_execution.R +\name{execute_statement} +\alias{execute_statement} +\alias{statementExecutionExecuteStatement} +\title{Execute a SQL statement.} +\usage{ +execute_statement( + client, + statement, + warehouse_id, + byte_limit = NULL, + catalog = NULL, + disposition = NULL, + format = NULL, + on_wait_timeout = NULL, + parameters = NULL, + row_limit = NULL, + schema = NULL, + wait_timeout = NULL +) + +statementExecutionExecuteStatement( + client, + statement, + warehouse_id, + byte_limit = NULL, + catalog = NULL, + disposition = NULL, + format = NULL, + on_wait_timeout = NULL, + parameters = NULL, + row_limit = NULL, + schema = NULL, + wait_timeout = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{statement}{Required. The SQL statement to execute.} + +\item{warehouse_id}{Required. Warehouse upon which to execute a statement.} + +\item{byte_limit}{Applies the given byte limit to the statement's result size.} + +\item{catalog}{Sets default catalog for statement execution, similar to \href{https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html}{\verb{USE CATALOG}} in SQL.} + +\item{disposition}{The fetch disposition provides two modes of fetching results: \code{INLINE} and \code{EXTERNAL_LINKS}.} + +\item{format}{Statement execution supports three result formats: \code{JSON_ARRAY} (default), \code{ARROW_STREAM}, and \code{CSV}.} + +\item{on_wait_timeout}{When \verb{wait_timeout > 0s}, the call will block up to the specified time.} + +\item{parameters}{A list of parameters to pass into a SQL statement containing parameter markers.} + +\item{row_limit}{Applies the given row limit to the statement's result set, but unlike the \code{LIMIT} clause in SQL, it also sets the \code{truncated} field in the response to indicate whether the result was trimmed due to the limit or not.} + +\item{schema}{Sets default schema for statement execution, similar to \href{https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-schema.html}{\verb{USE SCHEMA}} in SQL.} + +\item{wait_timeout}{The time in seconds the call will wait for the statement's result set as \code{Ns}, where \code{N} can be set to 0 or to a value between 5 and 50.} +} +\description{ +Execute a SQL statement. +} diff --git a/man/exists_table.Rd b/man/exists_table.Rd new file mode 100644 index 00000000..a92278f1 --- /dev/null +++ b/man/exists_table.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tables.R +\name{exists_table} +\alias{exists_table} +\alias{tablesExists} +\title{Get boolean reflecting if table exists.} +\usage{ +exists_table(client, full_name) + +tablesExists(client, full_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the table.} +} +\description{ +Gets if a table exists in the metastore for a specific catalog and schema. +The caller must satisfy one of the following requirements: * Be a metastore +admin * Be the owner of the parent catalog * Be the owner of the parent +schema and have the USE_CATALOG privilege on the parent catalog * Have the +\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} +privilege on the parent schema, and either be the table owner or have the +SELECT privilege on the table. * Have BROWSE privilege on the parent catalog +\itemize{ +\item Have BROWSE privilege on the parent schema. +} +} diff --git a/man/experimentsCreateExperiment.Rd b/man/experimentsCreateExperiment.Rd deleted file mode 100644 index 0d7e6823..00000000 --- a/man/experimentsCreateExperiment.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsCreateExperiment} -\alias{experimentsCreateExperiment} -\title{Create experiment.} -\usage{ -experimentsCreateExperiment( - client, - name, - artifact_location = NULL, - tags = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Experiment name.} - -\item{artifact_location}{Location where all artifacts for the experiment are stored.} - -\item{tags}{A collection of tags to set on the experiment.} -} -\description{ -Creates an experiment with a name. Returns the ID of the newly created -experiment. Validates that another experiment with the same name does not -already exist and fails if another experiment with the same name already -exists. -} -\details{ -Throws \code{RESOURCE_ALREADY_EXISTS} if a experiment with the given name exists. -} diff --git a/man/experimentsCreateRun.Rd b/man/experimentsCreateRun.Rd deleted file mode 100644 index 0320ee41..00000000 --- a/man/experimentsCreateRun.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsCreateRun} -\alias{experimentsCreateRun} -\title{Create a run.} -\usage{ -experimentsCreateRun( - client, - experiment_id = NULL, - start_time = NULL, - tags = NULL, - user_id = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_id}{ID of the associated experiment.} - -\item{start_time}{Unix timestamp in milliseconds of when the run started.} - -\item{tags}{Additional metadata for run.} - -\item{user_id}{ID of the user executing the run.} -} -\description{ -Creates a new run within an experiment. A run is usually a single execution -of a machine learning or data ETL pipeline. MLflow uses runs to track the -\code{mlflowParam}, \code{mlflowMetric} and \code{mlflowRunTag} associated with a single -execution. -} diff --git a/man/experimentsDeleteExperiment.Rd b/man/experimentsDeleteExperiment.Rd deleted file mode 100644 index b2f07ffd..00000000 --- a/man/experimentsDeleteExperiment.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsDeleteExperiment} -\alias{experimentsDeleteExperiment} -\title{Delete an experiment.} -\usage{ -experimentsDeleteExperiment(client, experiment_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_id}{Required. ID of the associated experiment.} -} -\description{ -Marks an experiment and associated metadata, runs, metrics, params, and tags -for deletion. If the experiment uses FileStore, artifacts associated with -experiment are also deleted. -} diff --git a/man/experimentsDeleteRun.Rd b/man/experimentsDeleteRun.Rd deleted file mode 100644 index 1178b544..00000000 --- a/man/experimentsDeleteRun.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsDeleteRun} -\alias{experimentsDeleteRun} -\title{Delete a run.} -\usage{ -experimentsDeleteRun(client, run_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_id}{Required. ID of the run to delete.} -} -\description{ -Marks a run for deletion. -} diff --git a/man/experimentsDeleteRuns.Rd b/man/experimentsDeleteRuns.Rd deleted file mode 100644 index 22648703..00000000 --- a/man/experimentsDeleteRuns.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsDeleteRuns} -\alias{experimentsDeleteRuns} -\title{Delete runs by creation time.} -\usage{ -experimentsDeleteRuns( - client, - experiment_id, - max_timestamp_millis, - max_runs = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_id}{Required. The ID of the experiment containing the runs to delete.} - -\item{max_timestamp_millis}{Required. The maximum creation timestamp in milliseconds since the UNIX epoch for deleting runs.} - -\item{max_runs}{An optional positive integer indicating the maximum number of runs to delete.} -} -\description{ -Bulk delete runs in an experiment that were created prior to or at the -specified timestamp. Deletes at most max_runs per request. To call this API -from a Databricks Notebook in Python, you can use the client code snippet on -https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete. -} diff --git a/man/experimentsDeleteTag.Rd b/man/experimentsDeleteTag.Rd deleted file mode 100644 index 61fbd4b4..00000000 --- a/man/experimentsDeleteTag.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsDeleteTag} -\alias{experimentsDeleteTag} -\title{Delete a tag.} -\usage{ -experimentsDeleteTag(client, run_id, key) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_id}{Required. ID of the run that the tag was logged under.} - -\item{key}{Required. Name of the tag.} -} -\description{ -Deletes a tag on a run. Tags are run metadata that can be updated during a -run and after a run completes. -} diff --git a/man/experimentsGetByName.Rd b/man/experimentsGetByName.Rd deleted file mode 100644 index 9b2fcc99..00000000 --- a/man/experimentsGetByName.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsGetByName} -\alias{experimentsGetByName} -\title{Get metadata.} -\usage{ -experimentsGetByName(client, experiment_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_name}{Required. Name of the associated experiment.} -} -\description{ -Gets metadata for an experiment. -} -\details{ -This endpoint will return deleted experiments, but prefers the active -experiment if an active and deleted experiment share the same name. If -multiple deleted experiments share the same name, the API will return one of -them. - -Throws \code{RESOURCE_DOES_NOT_EXIST} if no experiment with the specified name -exists. -} diff --git a/man/experimentsGetExperiment.Rd b/man/experimentsGetExperiment.Rd deleted file mode 100644 index d0c1f4eb..00000000 --- a/man/experimentsGetExperiment.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsGetExperiment} -\alias{experimentsGetExperiment} -\title{Get an experiment.} -\usage{ -experimentsGetExperiment(client, experiment_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_id}{Required. ID of the associated experiment.} -} -\description{ -Gets metadata for an experiment. This method works on deleted experiments. -} diff --git a/man/experimentsGetHistory.Rd b/man/experimentsGetHistory.Rd deleted file mode 100644 index 46719f96..00000000 --- a/man/experimentsGetHistory.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsGetHistory} -\alias{experimentsGetHistory} -\title{Get history of a given metric within a run.} -\usage{ -experimentsGetHistory( - client, - metric_key, - max_results = NULL, - page_token = NULL, - run_id = NULL, - run_uuid = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{metric_key}{Required. Name of the metric.} - -\item{max_results}{Maximum number of Metric records to return per paginated request.} - -\item{page_token}{Token indicating the page of metric histories to fetch.} - -\item{run_id}{ID of the run from which to fetch metric values.} - -\item{run_uuid}{Deprecated, use run_id instead. ID of the run from which to fetch metric values.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets a list of all values for the specified metric for a given run. -} diff --git a/man/experimentsGetPermissionLevels.Rd b/man/experimentsGetPermissionLevels.Rd deleted file mode 100644 index 447ef6de..00000000 --- a/man/experimentsGetPermissionLevels.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsGetPermissionLevels} -\alias{experimentsGetPermissionLevels} -\title{Get experiment permission levels.} -\usage{ -experimentsGetPermissionLevels(client, experiment_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_id}{Required. The experiment for which to get or manage permissions.} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/experimentsGetPermissions.Rd b/man/experimentsGetPermissions.Rd deleted file mode 100644 index c6c67966..00000000 --- a/man/experimentsGetPermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsGetPermissions} -\alias{experimentsGetPermissions} -\title{Get experiment permissions.} -\usage{ -experimentsGetPermissions(client, experiment_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_id}{Required. The experiment for which to get or manage permissions.} -} -\description{ -Gets the permissions of an experiment. Experiments can inherit permissions -from their root object. -} diff --git a/man/experimentsGetRun.Rd b/man/experimentsGetRun.Rd deleted file mode 100644 index 706f6371..00000000 --- a/man/experimentsGetRun.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsGetRun} -\alias{experimentsGetRun} -\title{Get a run.} -\usage{ -experimentsGetRun(client, run_id, run_uuid = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_id}{Required. ID of the run to fetch.} - -\item{run_uuid}{Deprecated, use run_id instead. ID of the run to fetch.} -} -\description{ -Gets the metadata, metrics, params, and tags for a run. In the case where -multiple metrics with the same key are logged for a run, return only the -value with the latest timestamp. -} -\details{ -If there are multiple values with the latest timestamp, return the maximum of -these values. -} diff --git a/man/experimentsListArtifacts.Rd b/man/experimentsListArtifacts.Rd deleted file mode 100644 index 145a38fd..00000000 --- a/man/experimentsListArtifacts.Rd +++ /dev/null @@ -1,32 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsListArtifacts} -\alias{experimentsListArtifacts} -\title{Get all artifacts.} -\usage{ -experimentsListArtifacts( - client, - page_token = NULL, - path = NULL, - run_id = NULL, - run_uuid = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{page_token}{Token indicating the page of artifact results to fetch.} - -\item{path}{Filter artifacts matching this path (a relative path from the root artifact directory).} - -\item{run_id}{ID of the run whose artifacts to list.} - -\item{run_uuid}{Deprecated, use run_id instead. ID of the run whose artifacts to list.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -List artifacts for a run. Takes an optional \code{artifact_path} prefix. If it is -specified, the response contains only artifacts with the specified prefix.', -} diff --git a/man/experimentsListExperiments.Rd b/man/experimentsListExperiments.Rd deleted file mode 100644 index afa9640c..00000000 --- a/man/experimentsListExperiments.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsListExperiments} -\alias{experimentsListExperiments} -\title{List experiments.} -\usage{ -experimentsListExperiments( - client, - max_results = NULL, - page_token = NULL, - view_type = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{max_results}{Maximum number of experiments desired.} - -\item{page_token}{Token indicating the page of experiments to fetch.} - -\item{view_type}{Qualifier for type of experiments to be returned.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets a list of all experiments. -} diff --git a/man/experimentsLogBatch.Rd b/man/experimentsLogBatch.Rd deleted file mode 100644 index ebf8525a..00000000 --- a/man/experimentsLogBatch.Rd +++ /dev/null @@ -1,66 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsLogBatch} -\alias{experimentsLogBatch} -\title{Log a batch.} -\usage{ -experimentsLogBatch( - client, - metrics = NULL, - params = NULL, - run_id = NULL, - tags = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{metrics}{Metrics to log.} - -\item{params}{Params to log.} - -\item{run_id}{ID of the run to log under.} - -\item{tags}{Tags to log.} -} -\description{ -Logs a batch of metrics, params, and tags for a run. If any data failed to be -persisted, the server will respond with an error (non-200 status code). -} -\details{ -In case of error (due to internal server error or an invalid request), -partial data may be written. - -You can write metrics, params, and tags in interleaving fashion, but within a -given entity type are guaranteed to follow the order specified in the request -body. - -The overwrite behavior for metrics, params, and tags is as follows: -\itemize{ -\item Metrics: metric values are never overwritten. Logging a metric (key, value, -timestamp) appends to the set of values for the metric with the provided key. -\item Tags: tag values can be overwritten by successive writes to the same tag -key. That is, if multiple tag values with the same key are provided in the -same API request, the last-provided tag value is written. Logging the same -tag (key, value) is permitted. Specifically, logging a tag is idempotent. -\item Parameters: once written, param values cannot be changed (attempting to -overwrite a param value will result in an error). However, logging the same -param (key, value) is permitted. Specifically, logging a param is idempotent. -} - -Request Limits ------------------------------- A single JSON-serialized API -request may be up to 1 MB in size and contain: -\itemize{ -\item No more than 1000 metrics, params, and tags in total * Up to 1000 metrics * -Up to 100 params * Up to 100 tags -} - -For example, a valid request might contain 900 metrics, 50 params, and 50 -tags, but logging 900 metrics, 50 params, and 51 tags is invalid. - -The following limits also apply to metric, param, and tag keys and values: -\itemize{ -\item Metric keys, param keys, and tag keys can be up to 250 characters in length -\item Parameter and tag values can be up to 250 characters in length -} -} diff --git a/man/experimentsLogInputs.Rd b/man/experimentsLogInputs.Rd deleted file mode 100644 index 9f584d01..00000000 --- a/man/experimentsLogInputs.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsLogInputs} -\alias{experimentsLogInputs} -\title{Log inputs to a run.} -\usage{ -experimentsLogInputs(client, datasets = NULL, run_id = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{datasets}{Dataset inputs.} - -\item{run_id}{ID of the run to log under.} -} -\description{ -\strong{NOTE:} Experimental: This API may change or be removed in a future release -without warning. -} diff --git a/man/experimentsLogMetric.Rd b/man/experimentsLogMetric.Rd deleted file mode 100644 index 08811edd..00000000 --- a/man/experimentsLogMetric.Rd +++ /dev/null @@ -1,36 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsLogMetric} -\alias{experimentsLogMetric} -\title{Log a metric.} -\usage{ -experimentsLogMetric( - client, - key, - value, - timestamp, - run_id = NULL, - run_uuid = NULL, - step = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{key}{Required. Name of the metric.} - -\item{value}{Required. Double value of the metric being logged.} - -\item{timestamp}{Required. Unix timestamp in milliseconds at the time metric was logged.} - -\item{run_id}{ID of the run under which to log the metric.} - -\item{run_uuid}{Deprecated, use run_id instead. ID of the run under which to log the metric.} - -\item{step}{Step at which to log the metric.} -} -\description{ -Logs a metric for a run. A metric is a key-value pair (string key, float -value) with an associated timestamp. Examples include the various metrics -that represent ML model accuracy. A metric can be logged multiple times. -} diff --git a/man/experimentsLogModel.Rd b/man/experimentsLogModel.Rd deleted file mode 100644 index c4eec774..00000000 --- a/man/experimentsLogModel.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsLogModel} -\alias{experimentsLogModel} -\title{Log a model.} -\usage{ -experimentsLogModel(client, model_json = NULL, run_id = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{model_json}{MLmodel file in json format.} - -\item{run_id}{ID of the run to log under.} -} -\description{ -\strong{NOTE:} Experimental: This API may change or be removed in a future release -without warning. -} diff --git a/man/experimentsLogParam.Rd b/man/experimentsLogParam.Rd deleted file mode 100644 index 0fbe4883..00000000 --- a/man/experimentsLogParam.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsLogParam} -\alias{experimentsLogParam} -\title{Log a param.} -\usage{ -experimentsLogParam(client, key, value, run_id = NULL, run_uuid = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{key}{Required. Name of the param.} - -\item{value}{Required. String value of the param being logged.} - -\item{run_id}{ID of the run under which to log the param.} - -\item{run_uuid}{Deprecated, use run_id instead. ID of the run under which to log the param.} -} -\description{ -Logs a param used for a run. A param is a key-value pair (string key, string -value). Examples include hyperparameters used for ML model training and -constant dates and values used in an ETL pipeline. A param can be logged only -once for a run. -} diff --git a/man/experimentsRestoreExperiment.Rd b/man/experimentsRestoreExperiment.Rd deleted file mode 100644 index 24cff68f..00000000 --- a/man/experimentsRestoreExperiment.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsRestoreExperiment} -\alias{experimentsRestoreExperiment} -\title{Restores an experiment.} -\usage{ -experimentsRestoreExperiment(client, experiment_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_id}{Required. ID of the associated experiment.} -} -\description{ -Restore an experiment marked for deletion. This also restores associated -metadata, runs, metrics, params, and tags. If experiment uses FileStore, -underlying artifacts associated with experiment are also restored. -} -\details{ -Throws \code{RESOURCE_DOES_NOT_EXIST} if experiment was never created or was -permanently deleted. -} diff --git a/man/experimentsRestoreRun.Rd b/man/experimentsRestoreRun.Rd deleted file mode 100644 index aeae0c44..00000000 --- a/man/experimentsRestoreRun.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsRestoreRun} -\alias{experimentsRestoreRun} -\title{Restore a run.} -\usage{ -experimentsRestoreRun(client, run_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_id}{Required. ID of the run to restore.} -} -\description{ -Restores a deleted run. -} diff --git a/man/experimentsRestoreRuns.Rd b/man/experimentsRestoreRuns.Rd deleted file mode 100644 index 7206be31..00000000 --- a/man/experimentsRestoreRuns.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsRestoreRuns} -\alias{experimentsRestoreRuns} -\title{Restore runs by deletion time.} -\usage{ -experimentsRestoreRuns( - client, - experiment_id, - min_timestamp_millis, - max_runs = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_id}{Required. The ID of the experiment containing the runs to restore.} - -\item{min_timestamp_millis}{Required. The minimum deletion timestamp in milliseconds since the UNIX epoch for restoring runs.} - -\item{max_runs}{An optional positive integer indicating the maximum number of runs to restore.} -} -\description{ -Bulk restore runs in an experiment that were deleted no earlier than the -specified timestamp. Restores at most max_runs per request. To call this API -from a Databricks Notebook in Python, you can use the client code snippet on -https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore. -} diff --git a/man/experimentsSearchExperiments.Rd b/man/experimentsSearchExperiments.Rd deleted file mode 100644 index 368af707..00000000 --- a/man/experimentsSearchExperiments.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsSearchExperiments} -\alias{experimentsSearchExperiments} -\title{Search experiments.} -\usage{ -experimentsSearchExperiments( - client, - filter = NULL, - max_results = NULL, - order_by = NULL, - page_token = NULL, - view_type = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{filter}{String representing a SQL filter condition (e.g.} - -\item{max_results}{Maximum number of experiments desired.} - -\item{order_by}{List of columns for ordering search results, which can include experiment name and last updated timestamp with an optional 'DESC' or 'ASC' annotation, where 'ASC' is the default.} - -\item{page_token}{Token indicating the page of experiments to fetch.} - -\item{view_type}{Qualifier for type of experiments to be returned.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Searches for experiments that satisfy specified search criteria. -} diff --git a/man/experimentsSearchRuns.Rd b/man/experimentsSearchRuns.Rd deleted file mode 100644 index d4390ea5..00000000 --- a/man/experimentsSearchRuns.Rd +++ /dev/null @@ -1,40 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsSearchRuns} -\alias{experimentsSearchRuns} -\title{Search for runs.} -\usage{ -experimentsSearchRuns( - client, - experiment_ids = NULL, - filter = NULL, - max_results = NULL, - order_by = NULL, - page_token = NULL, - run_view_type = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_ids}{List of experiment IDs to search over.} - -\item{filter}{A filter expression over params, metrics, and tags, that allows returning a subset of runs.} - -\item{max_results}{Maximum number of runs desired.} - -\item{order_by}{List of columns to be ordered by, including attributes, params, metrics, and tags with an optional 'DESC' or 'ASC' annotation, where 'ASC' is the default.} - -\item{page_token}{Token for the current page of runs.} - -\item{run_view_type}{Whether to display only active, only deleted, or all runs.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Searches for runs that satisfy expressions. -} -\details{ -Search expressions can use \code{mlflowMetric} and \code{mlflowParam} keys.', -} diff --git a/man/experimentsSetExperimentTag.Rd b/man/experimentsSetExperimentTag.Rd deleted file mode 100644 index 3eacea8a..00000000 --- a/man/experimentsSetExperimentTag.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsSetExperimentTag} -\alias{experimentsSetExperimentTag} -\title{Set a tag.} -\usage{ -experimentsSetExperimentTag(client, experiment_id, key, value) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_id}{Required. ID of the experiment under which to log the tag.} - -\item{key}{Required. Name of the tag.} - -\item{value}{Required. String value of the tag being logged.} -} -\description{ -Sets a tag on an experiment. Experiment tags are metadata that can be -updated. -} diff --git a/man/experimentsSetPermissions.Rd b/man/experimentsSetPermissions.Rd deleted file mode 100644 index 9b3df7de..00000000 --- a/man/experimentsSetPermissions.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsSetPermissions} -\alias{experimentsSetPermissions} -\title{Set experiment permissions.} -\usage{ -experimentsSetPermissions(client, experiment_id, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_id}{Required. The experiment for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on an experiment. Experiments can inherit permissions from -their root object. -} diff --git a/man/experimentsSetTag.Rd b/man/experimentsSetTag.Rd deleted file mode 100644 index dd0843e2..00000000 --- a/man/experimentsSetTag.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsSetTag} -\alias{experimentsSetTag} -\title{Set a tag.} -\usage{ -experimentsSetTag(client, key, value, run_id = NULL, run_uuid = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{key}{Required. Name of the tag.} - -\item{value}{Required. String value of the tag being logged.} - -\item{run_id}{ID of the run under which to log the tag.} - -\item{run_uuid}{Deprecated, use run_id instead. ID of the run under which to log the tag.} -} -\description{ -Sets a tag on a run. Tags are run metadata that can be updated during a run -and after a run completes. -} diff --git a/man/experimentsUpdateExperiment.Rd b/man/experimentsUpdateExperiment.Rd deleted file mode 100644 index 5c3ce54b..00000000 --- a/man/experimentsUpdateExperiment.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsUpdateExperiment} -\alias{experimentsUpdateExperiment} -\title{Update an experiment.} -\usage{ -experimentsUpdateExperiment(client, experiment_id, new_name = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_id}{Required. ID of the associated experiment.} - -\item{new_name}{If provided, the experiment's name is changed to the new name.} -} -\description{ -Updates experiment metadata. -} diff --git a/man/experimentsUpdatePermissions.Rd b/man/experimentsUpdatePermissions.Rd deleted file mode 100644 index 5fb8acfe..00000000 --- a/man/experimentsUpdatePermissions.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsUpdatePermissions} -\alias{experimentsUpdatePermissions} -\title{Update experiment permissions.} -\usage{ -experimentsUpdatePermissions(client, experiment_id, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{experiment_id}{Required. The experiment for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on an experiment. Experiments can inherit permissions -from their root object. -} diff --git a/man/experimentsUpdateRun.Rd b/man/experimentsUpdateRun.Rd deleted file mode 100644 index 0cbb95d3..00000000 --- a/man/experimentsUpdateRun.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/experiments.R -\name{experimentsUpdateRun} -\alias{experimentsUpdateRun} -\title{Update a run.} -\usage{ -experimentsUpdateRun( - client, - end_time = NULL, - run_id = NULL, - run_uuid = NULL, - status = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{end_time}{Unix timestamp in milliseconds of when the run ended.} - -\item{run_id}{ID of the run to update.} - -\item{run_uuid}{Deprecated, use run_id instead. ID of the run to update.} - -\item{status}{Updated status of the run.} -} -\description{ -Updates run metadata. -} diff --git a/man/export_job_run.Rd b/man/export_job_run.Rd new file mode 100644 index 00000000..06c51616 --- /dev/null +++ b/man/export_job_run.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{export_job_run} +\alias{export_job_run} +\alias{jobsExportRun} +\title{Export and retrieve a job run.} +\usage{ +export_job_run(client, run_id, views_to_export = NULL) + +jobsExportRun(client, run_id, views_to_export = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. The canonical identifier for the run.} + +\item{views_to_export}{Which views to export (CODE, DASHBOARDS, or ALL).} +} +\description{ +Export and retrieve the job run task. +} diff --git a/man/export_notebook.Rd b/man/export_notebook.Rd new file mode 100644 index 00000000..1c48fb53 --- /dev/null +++ b/man/export_notebook.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{export_notebook} +\alias{export_notebook} +\alias{workspaceExport} +\title{Export a workspace object.} +\usage{ +export_notebook(client, path, format = NULL) + +workspaceExport(client, path, format = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{path}{Required. The absolute path of the object or directory.} + +\item{format}{This specifies the format of the exported file.} +} +\description{ +Exports an object or the contents of an entire directory. +} +\details{ +If \code{path} does not exist, this call returns an error +\code{RESOURCE_DOES_NOT_EXIST}. + +If the exported data would exceed size limit, this call returns +\code{MAX_NOTEBOOK_SIZE_EXCEEDED}. Currently, this API does not support exporting +a library. +} diff --git a/man/export_serving_endpoint_metrics.Rd b/man/export_serving_endpoint_metrics.Rd new file mode 100644 index 00000000..638b9413 --- /dev/null +++ b/man/export_serving_endpoint_metrics.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{export_serving_endpoint_metrics} +\alias{export_serving_endpoint_metrics} +\alias{servingEndpointsExportMetrics} +\title{Get metrics of a serving endpoint.} +\usage{ +export_serving_endpoint_metrics(client, name) + +servingEndpointsExportMetrics(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint to retrieve metrics for.} +} +\description{ +Retrieves the metrics associated with the provided serving endpoint in either +Prometheus or OpenMetrics exposition format. +} diff --git a/man/externalLocationsCreate.Rd b/man/externalLocationsCreate.Rd deleted file mode 100644 index 8778b88f..00000000 --- a/man/externalLocationsCreate.Rd +++ /dev/null @@ -1,42 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/external_locations.R -\name{externalLocationsCreate} -\alias{externalLocationsCreate} -\title{Create an external location.} -\usage{ -externalLocationsCreate( - client, - name, - url, - credential_name, - access_point = NULL, - comment = NULL, - encryption_details = NULL, - read_only = NULL, - skip_validation = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the external location.} - -\item{url}{Required. Path URL of the external location.} - -\item{credential_name}{Required. Name of the storage credential used with this location.} - -\item{access_point}{The AWS access point to use when accesing s3 for this external location.} - -\item{comment}{User-provided free-form text description.} - -\item{encryption_details}{Encryption options that apply to clients connecting to cloud storage.} - -\item{read_only}{Indicates whether the external location is read-only.} - -\item{skip_validation}{Skips validation of the storage credential associated with the external location.} -} -\description{ -Creates a new external location entry in the metastore. The caller must be a -metastore admin or have the \strong{CREATE_EXTERNAL_LOCATION} privilege on both -the metastore and the associated storage credential. -} diff --git a/man/externalLocationsDelete.Rd b/man/externalLocationsDelete.Rd deleted file mode 100644 index 0269a0d9..00000000 --- a/man/externalLocationsDelete.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/external_locations.R -\name{externalLocationsDelete} -\alias{externalLocationsDelete} -\title{Delete an external location.} -\usage{ -externalLocationsDelete(client, name, force = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the external location.} - -\item{force}{Force deletion even if there are dependent external tables or mounts.} -} -\description{ -Deletes the specified external location from the metastore. The caller must -be the owner of the external location. -} diff --git a/man/externalLocationsGet.Rd b/man/externalLocationsGet.Rd deleted file mode 100644 index 5a5e95c4..00000000 --- a/man/externalLocationsGet.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/external_locations.R -\name{externalLocationsGet} -\alias{externalLocationsGet} -\title{Get an external location.} -\usage{ -externalLocationsGet(client, name, include_browse = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the external location.} - -\item{include_browse}{Whether to include external locations in the response for which the principal can only access selective metadata for.} -} -\description{ -Gets an external location from the metastore. The caller must be either a -metastore admin, the owner of the external location, or a user that has some -privilege on the external location. -} diff --git a/man/externalLocationsList.Rd b/man/externalLocationsList.Rd deleted file mode 100644 index e878eddf..00000000 --- a/man/externalLocationsList.Rd +++ /dev/null @@ -1,32 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/external_locations.R -\name{externalLocationsList} -\alias{externalLocationsList} -\title{List external locations.} -\usage{ -externalLocationsList( - client, - include_browse = NULL, - max_results = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{include_browse}{Whether to include external locations in the response for which the principal can only access selective metadata for.} - -\item{max_results}{Maximum number of external locations to return.} - -\item{page_token}{Opaque pagination token to go to next page based on previous query.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of external locations (\strong{ExternalLocationInfo} objects) from -the metastore. The caller must be a metastore admin, the owner of the -external location, or a user that has some privilege on the external -location. There is no guarantee of a specific ordering of the elements in the -array. -} diff --git a/man/externalLocationsUpdate.Rd b/man/externalLocationsUpdate.Rd deleted file mode 100644 index 4ebfb0f8..00000000 --- a/man/externalLocationsUpdate.Rd +++ /dev/null @@ -1,51 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/external_locations.R -\name{externalLocationsUpdate} -\alias{externalLocationsUpdate} -\title{Update an external location.} -\usage{ -externalLocationsUpdate( - client, - name, - access_point = NULL, - comment = NULL, - credential_name = NULL, - encryption_details = NULL, - force = NULL, - new_name = NULL, - owner = NULL, - read_only = NULL, - skip_validation = NULL, - url = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the external location.} - -\item{access_point}{The AWS access point to use when accesing s3 for this external location.} - -\item{comment}{User-provided free-form text description.} - -\item{credential_name}{Name of the storage credential used with this location.} - -\item{encryption_details}{Encryption options that apply to clients connecting to cloud storage.} - -\item{force}{Force update even if changing url invalidates dependent external tables or mounts.} - -\item{new_name}{New name for the external location.} - -\item{owner}{The owner of the external location.} - -\item{read_only}{Indicates whether the external location is read-only.} - -\item{skip_validation}{Skips validation of the storage credential associated with the external location.} - -\item{url}{Path URL of the external location.} -} -\description{ -Updates an external location in the metastore. The caller must be the owner -of the external location, or be a metastore admin. In the second case, the -admin can only update the name of the external location. -} diff --git a/man/filesCreateDirectory.Rd b/man/filesCreateDirectory.Rd deleted file mode 100644 index 13bb642a..00000000 --- a/man/filesCreateDirectory.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/files.R -\name{filesCreateDirectory} -\alias{filesCreateDirectory} -\title{Create a directory.} -\usage{ -filesCreateDirectory(client, directory_path) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{directory_path}{Required. The absolute path of a directory.} -} -\description{ -Creates an empty directory. If necessary, also creates any parent directories -of the new, empty directory (like the shell command \code{mkdir -p}). If called on -an existing directory, returns a success response; this method is idempotent -(it will succeed if the directory already exists). -} diff --git a/man/filesDelete.Rd b/man/filesDelete.Rd deleted file mode 100644 index 2dd4c67a..00000000 --- a/man/filesDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/files.R -\name{filesDelete} -\alias{filesDelete} -\title{Delete a file.} -\usage{ -filesDelete(client, file_path) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{file_path}{Required. The absolute path of the file.} -} -\description{ -Deletes a file. If the request is successful, there is no response body. -} diff --git a/man/filesDeleteDirectory.Rd b/man/filesDeleteDirectory.Rd deleted file mode 100644 index 36c32d96..00000000 --- a/man/filesDeleteDirectory.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/files.R -\name{filesDeleteDirectory} -\alias{filesDeleteDirectory} -\title{Delete a directory.} -\usage{ -filesDeleteDirectory(client, directory_path) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{directory_path}{Required. The absolute path of a directory.} -} -\description{ -Deletes an empty directory. -} -\details{ -To delete a non-empty directory, first delete all of its contents. This can -be done by listing the directory contents and deleting each file and -subdirectory recursively. -} diff --git a/man/filesDownload.Rd b/man/filesDownload.Rd deleted file mode 100644 index b53a5e46..00000000 --- a/man/filesDownload.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/files.R -\name{filesDownload} -\alias{filesDownload} -\title{Download a file.} -\usage{ -filesDownload(client, file_path) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{file_path}{Required. The absolute path of the file.} -} -\description{ -Downloads a file of up to 5 GiB. The file contents are the response body. -This is a standard HTTP file download, not a JSON RPC. -} diff --git a/man/filesGetDirectoryMetadata.Rd b/man/filesGetDirectoryMetadata.Rd deleted file mode 100644 index 30cbeb4e..00000000 --- a/man/filesGetDirectoryMetadata.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/files.R -\name{filesGetDirectoryMetadata} -\alias{filesGetDirectoryMetadata} -\title{Get directory metadata.} -\usage{ -filesGetDirectoryMetadata(client, directory_path) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{directory_path}{Required. The absolute path of a directory.} -} -\description{ -Get the metadata of a directory. The response HTTP headers contain the -metadata. There is no response body. -} -\details{ -This method is useful to check if a directory exists and the caller has -access to it. - -If you wish to ensure the directory exists, you can instead use \code{PUT}, which -will create the directory if it does not exist, and is idempotent (it will -succeed if the directory already exists). -} diff --git a/man/filesGetMetadata.Rd b/man/filesGetMetadata.Rd deleted file mode 100644 index 5bf5d399..00000000 --- a/man/filesGetMetadata.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/files.R -\name{filesGetMetadata} -\alias{filesGetMetadata} -\title{Get file metadata.} -\usage{ -filesGetMetadata(client, file_path) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{file_path}{Required. The absolute path of the file.} -} -\description{ -Get the metadata of a file. The response HTTP headers contain the metadata. -There is no response body. -} diff --git a/man/filesListDirectoryContents.Rd b/man/filesListDirectoryContents.Rd deleted file mode 100644 index c8d6496e..00000000 --- a/man/filesListDirectoryContents.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/files.R -\name{filesListDirectoryContents} -\alias{filesListDirectoryContents} -\title{List directory contents.} -\usage{ -filesListDirectoryContents( - client, - directory_path, - page_size = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{directory_path}{Required. The absolute path of a directory.} - -\item{page_size}{The maximum number of directory entries to return.} - -\item{page_token}{An opaque page token which was the \code{next_page_token} in the response of the previous request to list the contents of this directory.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Returns the contents of a directory. If there is no directory at the -specified path, the API returns a HTTP 404 error. -} diff --git a/man/filesUpload.Rd b/man/filesUpload.Rd deleted file mode 100644 index c581457f..00000000 --- a/man/filesUpload.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/files.R -\name{filesUpload} -\alias{filesUpload} -\title{Upload a file.} -\usage{ -filesUpload(client, file_path, contents, overwrite = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{file_path}{Required. The absolute path of the file.} - -\item{contents}{This field has no description yet.} - -\item{overwrite}{If true, an existing file will be overwritten.} -} -\description{ -Uploads a file of up to 5 GiB. The file contents should be sent as the -request body as raw bytes (an octet stream); do not encode or otherwise -modify the bytes before sending. The contents of the resulting file will be -exactly the bytes sent in the request body. If the request is successful, -there is no response body. -} diff --git a/man/functionsCreate.Rd b/man/functionsCreate.Rd deleted file mode 100644 index c75a40c8..00000000 --- a/man/functionsCreate.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/functions.R -\name{functionsCreate} -\alias{functionsCreate} -\title{Create a function.} -\usage{ -functionsCreate(client, function_info) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{function_info}{Required. Partial \strong{FunctionInfo} specifying the function to be created.} -} -\description{ -Creates a new function -} -\details{ -The user must have the following permissions in order for the function to be -created: - \strong{USE_CATALOG} on the function's parent catalog - \strong{USE_SCHEMA} -and \strong{CREATE_FUNCTION} on the function's parent schema -} diff --git a/man/functionsDelete.Rd b/man/functionsDelete.Rd deleted file mode 100644 index 8129c1f9..00000000 --- a/man/functionsDelete.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/functions.R -\name{functionsDelete} -\alias{functionsDelete} -\title{Delete a function.} -\usage{ -functionsDelete(client, name, force = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The fully-qualified name of the function (of the form \strong{catalog_name}.\strong{schema_name}.\strong{function__name}).} - -\item{force}{Force deletion even if the function is notempty.} -} -\description{ -Deletes the function that matches the supplied name. For the deletion to -succeed, the user must satisfy one of the following conditions: - Is the -owner of the function's parent catalog - Is the owner of the function's -parent schema and have the \strong{USE_CATALOG} privilege on its parent catalog - -Is the owner of the function itself and have both the \strong{USE_CATALOG} -privilege on its parent catalog and the \strong{USE_SCHEMA} privilege on its -parent schema -} diff --git a/man/functionsGet.Rd b/man/functionsGet.Rd deleted file mode 100644 index 1a0693d1..00000000 --- a/man/functionsGet.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/functions.R -\name{functionsGet} -\alias{functionsGet} -\title{Get a function.} -\usage{ -functionsGet(client, name, include_browse = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The fully-qualified name of the function (of the form \strong{catalog_name}.\strong{schema_name}.\strong{function__name}).} - -\item{include_browse}{Whether to include functions in the response for which the principal can only access selective metadata for.} -} -\description{ -Gets a function from within a parent catalog and schema. For the fetch to -succeed, the user must satisfy one of the following requirements: - Is a -metastore admin - Is an owner of the function's parent catalog - Have the -\strong{USE_CATALOG} privilege on the function's parent catalog and be the owner -of the function - Have the \strong{USE_CATALOG} privilege on the function's parent -catalog, the \strong{USE_SCHEMA} privilege on the function's parent schema, and -the \strong{EXECUTE} privilege on the function itself -} diff --git a/man/functionsList.Rd b/man/functionsList.Rd deleted file mode 100644 index ca03b5f5..00000000 --- a/man/functionsList.Rd +++ /dev/null @@ -1,40 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/functions.R -\name{functionsList} -\alias{functionsList} -\title{List functions.} -\usage{ -functionsList( - client, - catalog_name, - schema_name, - include_browse = NULL, - max_results = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{catalog_name}{Required. Name of parent catalog for functions of interest.} - -\item{schema_name}{Required. Parent schema of functions.} - -\item{include_browse}{Whether to include functions in the response for which the principal can only access selective metadata for.} - -\item{max_results}{Maximum number of functions to return.} - -\item{page_token}{Opaque pagination token to go to next page based on previous query.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -List functions within the specified parent catalog and schema. If the user is -a metastore admin, all functions are returned in the output list. Otherwise, -the user must have the \strong{USE_CATALOG} privilege on the catalog and the -\strong{USE_SCHEMA} privilege on the schema, and the output list contains only -functions for which either the user has the \strong{EXECUTE} privilege or the user -is the owner. There is no guarantee of a specific ordering of the elements in -the array. -} diff --git a/man/functionsUpdate.Rd b/man/functionsUpdate.Rd deleted file mode 100644 index 98586c41..00000000 --- a/man/functionsUpdate.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/functions.R -\name{functionsUpdate} -\alias{functionsUpdate} -\title{Update a function.} -\usage{ -functionsUpdate(client, name, owner = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The fully-qualified name of the function (of the form \strong{catalog_name}.\strong{schema_name}.\strong{function__name}).} - -\item{owner}{Username of current owner of function.} -} -\description{ -Updates the function that matches the supplied name. Only the owner of the -function can be updated. If the user is not a metastore admin, the user must -be a member of the group that is the new function owner. - Is a metastore -admin - Is the owner of the function's parent catalog - Is the owner of the -function's parent schema and has the \strong{USE_CATALOG} privilege on its parent -catalog - Is the owner of the function itself and has the \strong{USE_CATALOG} -privilege on its parent catalog as well as the \strong{USE_SCHEMA} privilege on -the function's parent schema. -} diff --git a/man/get_account_access_control_proxy_assignable_roles_for_resource.Rd b/man/get_account_access_control_proxy_assignable_roles_for_resource.Rd new file mode 100644 index 00000000..16c9ee84 --- /dev/null +++ b/man/get_account_access_control_proxy_assignable_roles_for_resource.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/account_access_control_proxy.R +\name{get_account_access_control_proxy_assignable_roles_for_resource} +\alias{get_account_access_control_proxy_assignable_roles_for_resource} +\alias{accountAccessControlProxyGetAssignableRolesForResource} +\title{Get assignable roles for a resource.} +\usage{ +get_account_access_control_proxy_assignable_roles_for_resource( + client, + resource +) + +accountAccessControlProxyGetAssignableRolesForResource(client, resource) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{resource}{Required. The resource name for which assignable roles will be listed.} +} +\description{ +Gets all the roles that can be granted on an account-level resource. A role +is grantable if the rule set on the resource can contain an access rule of +the role. +} diff --git a/man/get_account_access_control_proxy_rule_set.Rd b/man/get_account_access_control_proxy_rule_set.Rd new file mode 100644 index 00000000..5231337b --- /dev/null +++ b/man/get_account_access_control_proxy_rule_set.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/account_access_control_proxy.R +\name{get_account_access_control_proxy_rule_set} +\alias{get_account_access_control_proxy_rule_set} +\alias{accountAccessControlProxyGetRuleSet} +\title{Get a rule set.} +\usage{ +get_account_access_control_proxy_rule_set(client, name, etag) + +accountAccessControlProxyGetRuleSet(client, name, etag) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The ruleset name associated with the request.} + +\item{etag}{Required. Etag used for versioning.} +} +\description{ +Get a rule set by its name. A rule set is always attached to a resource and +contains a list of access rules on the said resource. Currently only a +default rule set for each resource is supported. +} diff --git a/man/get_alert.Rd b/man/get_alert.Rd new file mode 100644 index 00000000..5600e6df --- /dev/null +++ b/man/get_alert.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/alerts.R +\name{get_alert} +\alias{get_alert} +\alias{alertsGet} +\title{Get an alert.} +\usage{ +get_alert(client, alert_id) + +alertsGet(client, alert_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{alert_id}{Required. This field has no description yet.} +} +\description{ +Gets an alert. +} diff --git a/man/get_app.Rd b/man/get_app.Rd new file mode 100644 index 00000000..5bbc2197 --- /dev/null +++ b/man/get_app.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/apps.R +\name{get_app} +\alias{get_app} +\alias{appsGetApp} +\title{Get definition for an application.} +\usage{ +get_app(client, name) + +appsGetApp(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of an application.} +} +\description{ +Get an application definition +} diff --git a/man/get_app_apps.Rd b/man/get_app_apps.Rd new file mode 100644 index 00000000..3bee1b50 --- /dev/null +++ b/man/get_app_apps.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/apps.R +\name{get_app_apps} +\alias{get_app_apps} +\alias{appsGetApps} +\title{List all applications.} +\usage{ +get_app_apps(client) + +appsGetApps(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +List all available applications +} diff --git a/man/get_app_deployment_status.Rd b/man/get_app_deployment_status.Rd new file mode 100644 index 00000000..99285848 --- /dev/null +++ b/man/get_app_deployment_status.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/apps.R +\name{get_app_deployment_status} +\alias{get_app_deployment_status} +\alias{appsGetAppDeploymentStatus} +\title{Get deployment status for an application.} +\usage{ +get_app_deployment_status(client, deployment_id, include_app_log = NULL) + +appsGetAppDeploymentStatus(client, deployment_id, include_app_log = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{deployment_id}{Required. The deployment id for an application.} + +\item{include_app_log}{Boolean flag to include application logs.} +} +\description{ +Get deployment status for an application +} diff --git a/man/get_app_events.Rd b/man/get_app_events.Rd new file mode 100644 index 00000000..c64e364e --- /dev/null +++ b/man/get_app_events.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/apps.R +\name{get_app_events} +\alias{get_app_events} +\alias{appsGetEvents} +\title{Get deployment events for an application.} +\usage{ +get_app_events(client, name) + +appsGetEvents(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of an application.} +} +\description{ +Get deployment events for an application +} diff --git a/man/get_artifact_allowlist.Rd b/man/get_artifact_allowlist.Rd new file mode 100644 index 00000000..30b6c46c --- /dev/null +++ b/man/get_artifact_allowlist.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/artifact_allowlists.R +\name{get_artifact_allowlist} +\alias{get_artifact_allowlist} +\alias{artifactAllowlistsGet} +\title{Get an artifact allowlist.} +\usage{ +get_artifact_allowlist(client, artifact_type) + +artifactAllowlistsGet(client, artifact_type) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{artifact_type}{Required. The artifact type of the allowlist.} +} +\description{ +Get the artifact allowlist of a certain artifact type. The caller must be a +metastore admin or have the \strong{MANAGE ALLOWLIST} privilege on the metastore. +} diff --git a/man/get_automatic_cluster_update.Rd b/man/get_automatic_cluster_update.Rd new file mode 100644 index 00000000..4d6113e5 --- /dev/null +++ b/man/get_automatic_cluster_update.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/automatic_cluster_update.R +\name{get_automatic_cluster_update} +\alias{get_automatic_cluster_update} +\alias{automaticClusterUpdateGet} +\title{Get the automatic cluster update setting.} +\usage{ +get_automatic_cluster_update(client, etag = NULL) + +automaticClusterUpdateGet(client, etag = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{etag}{etag used for versioning.} +} +\description{ +Gets the automatic cluster update setting. +} diff --git a/man/get_catalog.Rd b/man/get_catalog.Rd new file mode 100644 index 00000000..631ddb6e --- /dev/null +++ b/man/get_catalog.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/catalogs.R +\name{get_catalog} +\alias{get_catalog} +\alias{catalogsGet} +\title{Get a catalog.} +\usage{ +get_catalog(client, name, include_browse = NULL) + +catalogsGet(client, name, include_browse = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the catalog.} + +\item{include_browse}{Whether to include catalogs in the response for which the principal can only access selective metadata for.} +} +\description{ +Gets the specified catalog in a metastore. The caller must be a metastore +admin, the owner of the catalog, or a user that has the \strong{USE_CATALOG} +privilege set for their account. +} diff --git a/man/get_clean_room.Rd b/man/get_clean_room.Rd new file mode 100644 index 00000000..5a373738 --- /dev/null +++ b/man/get_clean_room.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clean_rooms.R +\name{get_clean_room} +\alias{get_clean_room} +\alias{cleanRoomsGet} +\title{Get a clean room.} +\usage{ +get_clean_room(client, name, include_remote_details = NULL) + +cleanRoomsGet(client, name, include_remote_details = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the clean room.} + +\item{include_remote_details}{Whether to include remote details (central) on the clean room.} +} +\description{ +Gets a data object clean room from the metastore. The caller must be a +metastore admin or the owner of the clean room. +} diff --git a/man/get_cluster.Rd b/man/get_cluster.Rd new file mode 100644 index 00000000..7ed2ef31 --- /dev/null +++ b/man/get_cluster.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{get_cluster} +\alias{get_cluster} +\alias{clustersGet} +\title{Get cluster info.} +\usage{ +get_cluster(client, cluster_id) + +clustersGet(client, cluster_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster about which to retrieve information.} +} +\description{ +Retrieves the information for a cluster given its identifier. Clusters can be +described while they are running, or up to 60 days after they are terminated. +} diff --git a/man/get_cluster_permission_levels.Rd b/man/get_cluster_permission_levels.Rd new file mode 100644 index 00000000..c3b95d61 --- /dev/null +++ b/man/get_cluster_permission_levels.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{get_cluster_permission_levels} +\alias{get_cluster_permission_levels} +\alias{clustersGetPermissionLevels} +\title{Get cluster permission levels.} +\usage{ +get_cluster_permission_levels(client, cluster_id) + +clustersGetPermissionLevels(client, cluster_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster for which to get or manage permissions.} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_cluster_permissions.Rd b/man/get_cluster_permissions.Rd new file mode 100644 index 00000000..d13c78f8 --- /dev/null +++ b/man/get_cluster_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{get_cluster_permissions} +\alias{get_cluster_permissions} +\alias{clustersGetPermissions} +\title{Get cluster permissions.} +\usage{ +get_cluster_permissions(client, cluster_id) + +clustersGetPermissions(client, cluster_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster for which to get or manage permissions.} +} +\description{ +Gets the permissions of a cluster. Clusters can inherit permissions from +their root object. +} diff --git a/man/get_cluster_policy.Rd b/man/get_cluster_policy.Rd new file mode 100644 index 00000000..cb317f09 --- /dev/null +++ b/man/get_cluster_policy.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/cluster_policies.R +\name{get_cluster_policy} +\alias{get_cluster_policy} +\alias{clusterPoliciesGet} +\title{Get a cluster policy.} +\usage{ +get_cluster_policy(client, policy_id) + +clusterPoliciesGet(client, policy_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{policy_id}{Required. Canonical unique identifier for the cluster policy.} +} +\description{ +Get a cluster policy entity. Creation and editing is available to admins +only. +} diff --git a/man/get_cluster_policy_family.Rd b/man/get_cluster_policy_family.Rd new file mode 100644 index 00000000..15a636c2 --- /dev/null +++ b/man/get_cluster_policy_family.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/policy_families.R +\name{get_cluster_policy_family} +\alias{get_cluster_policy_family} +\alias{policyFamiliesGet} +\title{Get policy family information.} +\usage{ +get_cluster_policy_family(client, policy_family_id) + +policyFamiliesGet(client, policy_family_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{policy_family_id}{Required. This field has no description yet.} +} +\description{ +Retrieve the information for an policy family based on its identifier. +} diff --git a/man/get_cluster_policy_permission_levels.Rd b/man/get_cluster_policy_permission_levels.Rd new file mode 100644 index 00000000..cd876632 --- /dev/null +++ b/man/get_cluster_policy_permission_levels.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/cluster_policies.R +\name{get_cluster_policy_permission_levels} +\alias{get_cluster_policy_permission_levels} +\alias{clusterPoliciesGetPermissionLevels} +\title{Get cluster policy permission levels.} +\usage{ +get_cluster_policy_permission_levels(client, cluster_policy_id) + +clusterPoliciesGetPermissionLevels(client, cluster_policy_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_policy_id}{Required. The cluster policy for which to get or manage permissions.} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_cluster_policy_permissions.Rd b/man/get_cluster_policy_permissions.Rd new file mode 100644 index 00000000..5e823d31 --- /dev/null +++ b/man/get_cluster_policy_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/cluster_policies.R +\name{get_cluster_policy_permissions} +\alias{get_cluster_policy_permissions} +\alias{clusterPoliciesGetPermissions} +\title{Get cluster policy permissions.} +\usage{ +get_cluster_policy_permissions(client, cluster_policy_id) + +clusterPoliciesGetPermissions(client, cluster_policy_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_policy_id}{Required. The cluster policy for which to get or manage permissions.} +} +\description{ +Gets the permissions of a cluster policy. Cluster policies can inherit +permissions from their root object. +} diff --git a/man/get_connection.Rd b/man/get_connection.Rd new file mode 100644 index 00000000..3e28bd55 --- /dev/null +++ b/man/get_connection.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/connections.R +\name{get_connection} +\alias{get_connection} +\alias{connectionsGet} +\title{Get a connection.} +\usage{ +get_connection(client, name) + +connectionsGet(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the connection.} +} +\description{ +Gets a connection from it's name. +} diff --git a/man/get_csp_enablement.Rd b/man/get_csp_enablement.Rd new file mode 100644 index 00000000..c3d138e6 --- /dev/null +++ b/man/get_csp_enablement.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/csp_enablement.R +\name{get_csp_enablement} +\alias{get_csp_enablement} +\alias{cspEnablementGet} +\title{Get the compliance security profile setting.} +\usage{ +get_csp_enablement(client, etag = NULL) + +cspEnablementGet(client, etag = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{etag}{etag used for versioning.} +} +\description{ +Gets the compliance security profile setting. +} diff --git a/man/get_dashboard.Rd b/man/get_dashboard.Rd new file mode 100644 index 00000000..8b4b903b --- /dev/null +++ b/man/get_dashboard.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dashboards.R +\name{get_dashboard} +\alias{get_dashboard} +\alias{dashboardsGet} +\title{Retrieve a definition.} +\usage{ +get_dashboard(client, dashboard_id) + +dashboardsGet(client, dashboard_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{dashboard_id}{Required. This field has no description yet.} +} +\description{ +Returns a JSON representation of a dashboard object, including its +visualization and query objects. +} diff --git a/man/get_dbfs_status.Rd b/man/get_dbfs_status.Rd new file mode 100644 index 00000000..46a00ec9 --- /dev/null +++ b/man/get_dbfs_status.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbfs.R +\name{get_dbfs_status} +\alias{get_dbfs_status} +\alias{dbfsGetStatus} +\title{Get the information of a file or directory.} +\usage{ +get_dbfs_status(client, path) + +dbfsGetStatus(client, path) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{path}{Required. The path of the file or directory.} +} +\description{ +Gets the file information for a file or directory. If the file or directory +does not exist, this call throws an exception with \code{RESOURCE_DOES_NOT_EXIST}. +} diff --git a/man/get_dbsql_permission.Rd b/man/get_dbsql_permission.Rd new file mode 100644 index 00000000..0f2a8855 --- /dev/null +++ b/man/get_dbsql_permission.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbsql_permissions.R +\name{get_dbsql_permission} +\alias{get_dbsql_permission} +\alias{dbsqlPermissionsGet} +\title{Get object ACL.} +\usage{ +get_dbsql_permission(client, object_type, object_id) + +dbsqlPermissionsGet(client, object_type, object_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{object_type}{Required. The type of object permissions to check.} + +\item{object_id}{Required. Object ID.} +} +\description{ +Gets a JSON representation of the access control list (ACL) for a specified +object. +} diff --git a/man/get_default_namespace.Rd b/man/get_default_namespace.Rd new file mode 100644 index 00000000..2b4f2b31 --- /dev/null +++ b/man/get_default_namespace.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/default_namespace.R +\name{get_default_namespace} +\alias{get_default_namespace} +\alias{defaultNamespaceGet} +\title{Get the default namespace setting.} +\usage{ +get_default_namespace(client, etag = NULL) + +defaultNamespaceGet(client, etag = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{etag}{etag used for versioning.} +} +\description{ +Gets the default namespace setting. +} diff --git a/man/get_esm_enablement.Rd b/man/get_esm_enablement.Rd new file mode 100644 index 00000000..fe840bac --- /dev/null +++ b/man/get_esm_enablement.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/esm_enablement.R +\name{get_esm_enablement} +\alias{get_esm_enablement} +\alias{esmEnablementGet} +\title{Get the enhanced security monitoring setting.} +\usage{ +get_esm_enablement(client, etag = NULL) + +esmEnablementGet(client, etag = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{etag}{etag used for versioning.} +} +\description{ +Gets the enhanced security monitoring setting. +} diff --git a/man/get_experiment.Rd b/man/get_experiment.Rd new file mode 100644 index 00000000..3cd6a2a2 --- /dev/null +++ b/man/get_experiment.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{get_experiment} +\alias{get_experiment} +\alias{experimentsGetExperiment} +\title{Get an experiment.} +\usage{ +get_experiment(client, experiment_id) + +experimentsGetExperiment(client, experiment_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{experiment_id}{Required. ID of the associated experiment.} +} +\description{ +Gets metadata for an experiment. This method works on deleted experiments. +} diff --git a/man/get_experiment_by_name.Rd b/man/get_experiment_by_name.Rd new file mode 100644 index 00000000..cbb86613 --- /dev/null +++ b/man/get_experiment_by_name.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{get_experiment_by_name} +\alias{get_experiment_by_name} +\alias{experimentsGetByName} +\title{Get metadata.} +\usage{ +get_experiment_by_name(client, experiment_name) + +experimentsGetByName(client, experiment_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{experiment_name}{Required. Name of the associated experiment.} +} +\description{ +Gets metadata for an experiment. +} +\details{ +This endpoint will return deleted experiments, but prefers the active +experiment if an active and deleted experiment share the same name. If +multiple deleted experiments share the same name, the API will return one of +them. + +Throws \code{RESOURCE_DOES_NOT_EXIST} if no experiment with the specified name +exists. +} diff --git a/man/get_experiment_history.Rd b/man/get_experiment_history.Rd new file mode 100644 index 00000000..9f88ae4f --- /dev/null +++ b/man/get_experiment_history.Rd @@ -0,0 +1,44 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{get_experiment_history} +\alias{get_experiment_history} +\alias{experimentsGetHistory} +\title{Get history of a given metric within a run.} +\usage{ +get_experiment_history( + client, + metric_key, + max_results = NULL, + page_token = NULL, + run_id = NULL, + run_uuid = NULL +) + +experimentsGetHistory( + client, + metric_key, + max_results = NULL, + page_token = NULL, + run_id = NULL, + run_uuid = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{metric_key}{Required. Name of the metric.} + +\item{max_results}{Maximum number of Metric records to return per paginated request.} + +\item{page_token}{Token indicating the page of metric histories to fetch.} + +\item{run_id}{ID of the run from which to fetch metric values.} + +\item{run_uuid}{Deprecated, use run_id instead. ID of the run from which to fetch metric values.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets a list of all values for the specified metric for a given run. +} diff --git a/man/get_experiment_permission_levels.Rd b/man/get_experiment_permission_levels.Rd new file mode 100644 index 00000000..7ea6fb7f --- /dev/null +++ b/man/get_experiment_permission_levels.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{get_experiment_permission_levels} +\alias{get_experiment_permission_levels} +\alias{experimentsGetPermissionLevels} +\title{Get experiment permission levels.} +\usage{ +get_experiment_permission_levels(client, experiment_id) + +experimentsGetPermissionLevels(client, experiment_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{experiment_id}{Required. The experiment for which to get or manage permissions.} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_experiment_permissions.Rd b/man/get_experiment_permissions.Rd new file mode 100644 index 00000000..f0ad0be5 --- /dev/null +++ b/man/get_experiment_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{get_experiment_permissions} +\alias{get_experiment_permissions} +\alias{experimentsGetPermissions} +\title{Get experiment permissions.} +\usage{ +get_experiment_permissions(client, experiment_id) + +experimentsGetPermissions(client, experiment_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{experiment_id}{Required. The experiment for which to get or manage permissions.} +} +\description{ +Gets the permissions of an experiment. Experiments can inherit permissions +from their root object. +} diff --git a/man/get_experiment_run.Rd b/man/get_experiment_run.Rd new file mode 100644 index 00000000..db7d8d61 --- /dev/null +++ b/man/get_experiment_run.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{get_experiment_run} +\alias{get_experiment_run} +\alias{experimentsGetRun} +\title{Get a run.} +\usage{ +get_experiment_run(client, run_id, run_uuid = NULL) + +experimentsGetRun(client, run_id, run_uuid = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. ID of the run to fetch.} + +\item{run_uuid}{Deprecated, use run_id instead. ID of the run to fetch.} +} +\description{ +Gets the metadata, metrics, params, and tags for a run. In the case where +multiple metrics with the same key are logged for a run, return only the +value with the latest timestamp. +} +\details{ +If there are multiple values with the latest timestamp, return the maximum of +these values. +} diff --git a/man/get_external_location.Rd b/man/get_external_location.Rd new file mode 100644 index 00000000..321c7f52 --- /dev/null +++ b/man/get_external_location.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/external_locations.R +\name{get_external_location} +\alias{get_external_location} +\alias{externalLocationsGet} +\title{Get an external location.} +\usage{ +get_external_location(client, name, include_browse = NULL) + +externalLocationsGet(client, name, include_browse = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the external location.} + +\item{include_browse}{Whether to include external locations in the response for which the principal can only access selective metadata for.} +} +\description{ +Gets an external location from the metastore. The caller must be either a +metastore admin, the owner of the external location, or a user that has some +privilege on the external location. +} diff --git a/man/get_file_directory_metadata.Rd b/man/get_file_directory_metadata.Rd new file mode 100644 index 00000000..5f21befc --- /dev/null +++ b/man/get_file_directory_metadata.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/files.R +\name{get_file_directory_metadata} +\alias{get_file_directory_metadata} +\alias{filesGetDirectoryMetadata} +\title{Get directory metadata.} +\usage{ +get_file_directory_metadata(client, directory_path) + +filesGetDirectoryMetadata(client, directory_path) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{directory_path}{Required. The absolute path of a directory.} +} +\description{ +Get the metadata of a directory. The response HTTP headers contain the +metadata. There is no response body. +} +\details{ +This method is useful to check if a directory exists and the caller has +access to it. + +If you wish to ensure the directory exists, you can instead use \code{PUT}, which +will create the directory if it does not exist, and is idempotent (it will +succeed if the directory already exists). +} diff --git a/man/get_file_metadata.Rd b/man/get_file_metadata.Rd new file mode 100644 index 00000000..d7d75e23 --- /dev/null +++ b/man/get_file_metadata.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/files.R +\name{get_file_metadata} +\alias{get_file_metadata} +\alias{filesGetMetadata} +\title{Get file metadata.} +\usage{ +get_file_metadata(client, file_path) + +filesGetMetadata(client, file_path) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{file_path}{Required. The absolute path of the file.} +} +\description{ +Get the metadata of a file. The response HTTP headers contain the metadata. +There is no response body. +} diff --git a/man/get_function.Rd b/man/get_function.Rd new file mode 100644 index 00000000..769430c6 --- /dev/null +++ b/man/get_function.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/functions.R +\name{get_function} +\alias{get_function} +\alias{functionsGet} +\title{Get a function.} +\usage{ +get_function(client, name, include_browse = NULL) + +functionsGet(client, name, include_browse = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The fully-qualified name of the function (of the form \strong{catalog_name}.\strong{schema_name}.\strong{function__name}).} + +\item{include_browse}{Whether to include functions in the response for which the principal can only access selective metadata for.} +} +\description{ +Gets a function from within a parent catalog and schema. For the fetch to +succeed, the user must satisfy one of the following requirements: - Is a +metastore admin - Is an owner of the function's parent catalog - Have the +\strong{USE_CATALOG} privilege on the function's parent catalog and be the owner +of the function - Have the \strong{USE_CATALOG} privilege on the function's parent +catalog, the \strong{USE_SCHEMA} privilege on the function's parent schema, and +the \strong{EXECUTE} privilege on the function itself +} diff --git a/man/get_git_credential.Rd b/man/get_git_credential.Rd new file mode 100644 index 00000000..1c33ea79 --- /dev/null +++ b/man/get_git_credential.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/git_credentials.R +\name{get_git_credential} +\alias{get_git_credential} +\alias{gitCredentialsGet} +\title{Get a credential entry.} +\usage{ +get_git_credential(client, credential_id) + +gitCredentialsGet(client, credential_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{credential_id}{Required. The ID for the corresponding credential to access.} +} +\description{ +Gets the Git credential with the specified credential ID. +} diff --git a/man/get_global_init_script.Rd b/man/get_global_init_script.Rd new file mode 100644 index 00000000..acce8c1e --- /dev/null +++ b/man/get_global_init_script.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/global_init_scripts.R +\name{get_global_init_script} +\alias{get_global_init_script} +\alias{globalInitScriptsGet} +\title{Get an init script.} +\usage{ +get_global_init_script(client, script_id) + +globalInitScriptsGet(client, script_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{script_id}{Required. The ID of the global init script.} +} +\description{ +Gets all the details of a script, including its Base64-encoded contents. +} diff --git a/man/get_grant.Rd b/man/get_grant.Rd new file mode 100644 index 00000000..a37cf959 --- /dev/null +++ b/man/get_grant.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/grants.R +\name{get_grant} +\alias{get_grant} +\alias{grantsGet} +\title{Get permissions.} +\usage{ +get_grant(client, securable_type, full_name, principal = NULL) + +grantsGet(client, securable_type, full_name, principal = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{securable_type}{Required. Type of securable.} + +\item{full_name}{Required. Full name of securable.} + +\item{principal}{If provided, only the permissions for the specified principal (user or group) are returned.} +} +\description{ +Gets the permissions for a securable. +} diff --git a/man/get_grant_effective.Rd b/man/get_grant_effective.Rd new file mode 100644 index 00000000..0f7ce127 --- /dev/null +++ b/man/get_grant_effective.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/grants.R +\name{get_grant_effective} +\alias{get_grant_effective} +\alias{grantsGetEffective} +\title{Get effective permissions.} +\usage{ +get_grant_effective(client, securable_type, full_name, principal = NULL) + +grantsGetEffective(client, securable_type, full_name, principal = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{securable_type}{Required. Type of securable.} + +\item{full_name}{Required. Full name of securable.} + +\item{principal}{If provided, only the effective permissions for the specified principal (user or group) are returned.} +} +\description{ +Gets the effective permissions for a securable. +} diff --git a/man/get_group.Rd b/man/get_group.Rd new file mode 100644 index 00000000..e720a401 --- /dev/null +++ b/man/get_group.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/groups.R +\name{get_group} +\alias{get_group} +\alias{groupsGet} +\title{Get group details.} +\usage{ +get_group(client, id) + +groupsGet(client, id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Unique ID for a group in the Databricks workspace.} +} +\description{ +Gets the information for a specific group in the Databricks workspace. +} diff --git a/man/get_instance_pool.Rd b/man/get_instance_pool.Rd new file mode 100644 index 00000000..f9ff7b5e --- /dev/null +++ b/man/get_instance_pool.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/instance_pools.R +\name{get_instance_pool} +\alias{get_instance_pool} +\alias{instancePoolsGet} +\title{Get instance pool information.} +\usage{ +get_instance_pool(client, instance_pool_id) + +instancePoolsGet(client, instance_pool_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{instance_pool_id}{Required. The canonical unique identifier for the instance pool.} +} +\description{ +Retrieve the information for an instance pool based on its identifier. +} diff --git a/man/get_instance_pool_permission_levels.Rd b/man/get_instance_pool_permission_levels.Rd new file mode 100644 index 00000000..f1f2ca05 --- /dev/null +++ b/man/get_instance_pool_permission_levels.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/instance_pools.R +\name{get_instance_pool_permission_levels} +\alias{get_instance_pool_permission_levels} +\alias{instancePoolsGetPermissionLevels} +\title{Get instance pool permission levels.} +\usage{ +get_instance_pool_permission_levels(client, instance_pool_id) + +instancePoolsGetPermissionLevels(client, instance_pool_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{instance_pool_id}{Required. The instance pool for which to get or manage permissions.} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_instance_pool_permissions.Rd b/man/get_instance_pool_permissions.Rd new file mode 100644 index 00000000..7d6ad0d1 --- /dev/null +++ b/man/get_instance_pool_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/instance_pools.R +\name{get_instance_pool_permissions} +\alias{get_instance_pool_permissions} +\alias{instancePoolsGetPermissions} +\title{Get instance pool permissions.} +\usage{ +get_instance_pool_permissions(client, instance_pool_id) + +instancePoolsGetPermissions(client, instance_pool_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{instance_pool_id}{Required. The instance pool for which to get or manage permissions.} +} +\description{ +Gets the permissions of an instance pool. Instance pools can inherit +permissions from their root object. +} diff --git a/man/get_ip_access_list.Rd b/man/get_ip_access_list.Rd new file mode 100644 index 00000000..04213dbe --- /dev/null +++ b/man/get_ip_access_list.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ip_access_lists.R +\name{get_ip_access_list} +\alias{get_ip_access_list} +\alias{ipAccessListsGet} +\title{Get access list.} +\usage{ +get_ip_access_list(client, ip_access_list_id) + +ipAccessListsGet(client, ip_access_list_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{ip_access_list_id}{Required. The ID for the corresponding IP access list.} +} +\description{ +Gets an IP access list, specified by its list ID. +} diff --git a/man/get_job.Rd b/man/get_job.Rd new file mode 100644 index 00000000..e124015d --- /dev/null +++ b/man/get_job.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{get_job} +\alias{get_job} +\alias{jobsGet} +\title{Get a single job.} +\usage{ +get_job(client, job_id) + +jobsGet(client, job_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{job_id}{Required. The canonical identifier of the job to retrieve information about.} +} +\description{ +Retrieves the details for a single job. +} diff --git a/man/get_job_permission_levels.Rd b/man/get_job_permission_levels.Rd new file mode 100644 index 00000000..16b7b605 --- /dev/null +++ b/man/get_job_permission_levels.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{get_job_permission_levels} +\alias{get_job_permission_levels} +\alias{jobsGetPermissionLevels} +\title{Get job permission levels.} +\usage{ +get_job_permission_levels(client, job_id) + +jobsGetPermissionLevels(client, job_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{job_id}{Required. The job for which to get or manage permissions.} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_job_permissions.Rd b/man/get_job_permissions.Rd new file mode 100644 index 00000000..a25efb91 --- /dev/null +++ b/man/get_job_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{get_job_permissions} +\alias{get_job_permissions} +\alias{jobsGetPermissions} +\title{Get job permissions.} +\usage{ +get_job_permissions(client, job_id) + +jobsGetPermissions(client, job_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{job_id}{Required. The job for which to get or manage permissions.} +} +\description{ +Gets the permissions of a job. Jobs can inherit permissions from their root +object. +} diff --git a/man/get_job_run.Rd b/man/get_job_run.Rd new file mode 100644 index 00000000..9894f243 --- /dev/null +++ b/man/get_job_run.Rd @@ -0,0 +1,33 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{get_job_run} +\alias{get_job_run} +\alias{jobsGetRun} +\title{Get a single job run.} +\usage{ +get_job_run( + client, + run_id, + include_history = NULL, + include_resolved_values = NULL +) + +jobsGetRun( + client, + run_id, + include_history = NULL, + include_resolved_values = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. The canonical identifier of the run for which to retrieve the metadata.} + +\item{include_history}{Whether to include the repair history in the response.} + +\item{include_resolved_values}{Whether to include resolved parameter values in the response.} +} +\description{ +Retrieve the metadata of a run. +} diff --git a/man/get_job_run_and_wait.Rd b/man/get_job_run_and_wait.Rd new file mode 100644 index 00000000..6f7512b8 --- /dev/null +++ b/man/get_job_run_and_wait.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{get_job_run_and_wait} +\alias{get_job_run_and_wait} +\title{Get a single job run.} +\usage{ +get_job_run_and_wait( + client, + run_id, + include_history = NULL, + include_resolved_values = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. The canonical identifier of the run for which to retrieve the metadata.} + +\item{include_history}{Whether to include the repair history in the response.} + +\item{include_resolved_values}{Whether to include resolved parameter values in the response.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Jobs on Databricks reach +TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Jobs is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Retrieve the metadata of a run. +} diff --git a/man/get_job_run_output.Rd b/man/get_job_run_output.Rd new file mode 100644 index 00000000..ae111a2f --- /dev/null +++ b/man/get_job_run_output.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{get_job_run_output} +\alias{get_job_run_output} +\alias{jobsGetRunOutput} +\title{Get the output for a single run.} +\usage{ +get_job_run_output(client, run_id) + +jobsGetRunOutput(client, run_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. The canonical identifier for the run.} +} +\description{ +Retrieve the output and metadata of a single task run. When a notebook task +returns a value through the \code{dbutils.notebook.exit()} call, you can use this +endpoint to retrieve that value. Databricks restricts this API to returning +the first 5 MB of the output. To return a larger result, you can store job +results in a cloud storage service. +} +\details{ +This endpoint validates that the \strong{run_id} parameter is valid and returns an +HTTP status code 400 if the \strong{run_id} parameter is invalid. Runs are +automatically removed after 60 days. If you to want to reference them beyond +60 days, you must save old run results before they expire. +} diff --git a/man/get_lakehouse_monitor.Rd b/man/get_lakehouse_monitor.Rd new file mode 100644 index 00000000..5cea1480 --- /dev/null +++ b/man/get_lakehouse_monitor.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakehouse_monitors.R +\name{get_lakehouse_monitor} +\alias{get_lakehouse_monitor} +\alias{lakehouseMonitorsGet} +\title{Get a table monitor.} +\usage{ +get_lakehouse_monitor(client, full_name) + +lakehouseMonitorsGet(client, full_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the table.} +} +\description{ +Gets a monitor for the specified table. +} +\details{ +The caller must either: 1. be an owner of the table's parent catalog 2. have +\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's +parent schema. 3. have the following permissions: - \strong{USE_CATALOG} on the +table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - +\strong{SELECT} privilege on the table. + +The returned information includes configuration values, as well as +information on assets created by the monitor. Some information (e.g., +dashboard) may be filtered out if the caller is in a different workspace than +where the monitor was created. +} diff --git a/man/get_lakehouse_monitor_refresh.Rd b/man/get_lakehouse_monitor_refresh.Rd new file mode 100644 index 00000000..df0ed178 --- /dev/null +++ b/man/get_lakehouse_monitor_refresh.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakehouse_monitors.R +\name{get_lakehouse_monitor_refresh} +\alias{get_lakehouse_monitor_refresh} +\alias{lakehouseMonitorsGetRefresh} +\title{Get refresh.} +\usage{ +get_lakehouse_monitor_refresh(client, full_name, refresh_id) + +lakehouseMonitorsGetRefresh(client, full_name, refresh_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the table.} + +\item{refresh_id}{Required. ID of the refresh.} +} +\description{ +Gets info about a specific monitor refresh using the given refresh ID. +} +\details{ +The caller must either: 1. be an owner of the table's parent catalog 2. have +\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's +parent schema 3. have the following permissions: - \strong{USE_CATALOG} on the +table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - +\strong{SELECT} privilege on the table. + +Additionally, the call must be made from the workspace where the monitor was +created. +} diff --git a/man/get_lakeview.Rd b/man/get_lakeview.Rd new file mode 100644 index 00000000..12234994 --- /dev/null +++ b/man/get_lakeview.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakeview.R +\name{get_lakeview} +\alias{get_lakeview} +\alias{lakeviewGet} +\title{Get dashboard.} +\usage{ +get_lakeview(client, dashboard_id) + +lakeviewGet(client, dashboard_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{dashboard_id}{Required. UUID identifying the dashboard.} +} +\description{ +Get a draft dashboard. +} diff --git a/man/get_lakeview_published.Rd b/man/get_lakeview_published.Rd new file mode 100644 index 00000000..d51dc166 --- /dev/null +++ b/man/get_lakeview_published.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakeview.R +\name{get_lakeview_published} +\alias{get_lakeview_published} +\alias{lakeviewGetPublished} +\title{Get published dashboard.} +\usage{ +get_lakeview_published(client, dashboard_id) + +lakeviewGetPublished(client, dashboard_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{dashboard_id}{Required. UUID identifying the dashboard to be published.} +} +\description{ +Get the current published dashboard. +} diff --git a/man/get_metastore.Rd b/man/get_metastore.Rd new file mode 100644 index 00000000..1e99a152 --- /dev/null +++ b/man/get_metastore.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/metastores.R +\name{get_metastore} +\alias{get_metastore} +\alias{metastoresGet} +\title{Get a metastore.} +\usage{ +get_metastore(client, id) + +metastoresGet(client, id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Unique ID of the metastore.} +} +\description{ +Gets a metastore that matches the supplied ID. The caller must be a metastore +admin to retrieve this info. +} diff --git a/man/get_model.Rd b/man/get_model.Rd new file mode 100644 index 00000000..7d9700ff --- /dev/null +++ b/man/get_model.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{get_model} +\alias{get_model} +\alias{modelRegistryGetModel} +\title{Get model.} +\usage{ +get_model(client, name) + +modelRegistryGetModel(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Registered model unique name identifier.} +} +\description{ +Get the details of a model. This is a Databricks workspace version of the +MLflow endpoint that also returns the model's Databricks workspace ID and +the permission level of the requesting user on the model. +} +\details{ +MLflow endpoint: https://www.mlflow.org/docs/latest/rest-api.html#get-registeredmodel +} diff --git a/man/get_model_latest_versions.Rd b/man/get_model_latest_versions.Rd new file mode 100644 index 00000000..7ec0799e --- /dev/null +++ b/man/get_model_latest_versions.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{get_model_latest_versions} +\alias{get_model_latest_versions} +\alias{modelRegistryGetLatestVersions} +\title{Get the latest version.} +\usage{ +get_model_latest_versions(client, name, stages = NULL) + +modelRegistryGetLatestVersions(client, name, stages = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Registered model unique name identifier.} + +\item{stages}{List of stages.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets the latest version of a registered model. +} diff --git a/man/get_model_permission_levels.Rd b/man/get_model_permission_levels.Rd new file mode 100644 index 00000000..d088598f --- /dev/null +++ b/man/get_model_permission_levels.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{get_model_permission_levels} +\alias{get_model_permission_levels} +\alias{modelRegistryGetPermissionLevels} +\title{Get registered model permission levels.} +\usage{ +get_model_permission_levels(client, registered_model_id) + +modelRegistryGetPermissionLevels(client, registered_model_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{registered_model_id}{Required. The registered model for which to get or manage permissions.} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_model_permissions.Rd b/man/get_model_permissions.Rd new file mode 100644 index 00000000..db3ec3eb --- /dev/null +++ b/man/get_model_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{get_model_permissions} +\alias{get_model_permissions} +\alias{modelRegistryGetPermissions} +\title{Get registered model permissions.} +\usage{ +get_model_permissions(client, registered_model_id) + +modelRegistryGetPermissions(client, registered_model_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{registered_model_id}{Required. The registered model for which to get or manage permissions.} +} +\description{ +Gets the permissions of a registered model. Registered models can inherit +permissions from their root object. +} diff --git a/man/get_model_version.Rd b/man/get_model_version.Rd new file mode 100644 index 00000000..9477924c --- /dev/null +++ b/man/get_model_version.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R, R/model_versions.R +\name{get_model_version} +\alias{get_model_version} +\alias{modelRegistryGetModelVersion} +\alias{modelVersionsGet} +\title{Get a model version.} +\usage{ +get_model_version(client, full_name, version, include_browse = NULL) + +modelRegistryGetModelVersion(client, name, version) + +get_model_version(client, full_name, version, include_browse = NULL) + +modelVersionsGet(client, full_name, version, include_browse = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. The three-level (fully qualified) name of the model version.} + +\item{version}{Required. The integer version number of the model version.} + +\item{include_browse}{Whether to include model versions in the response for which the principal can only access selective metadata for.} + +\item{name}{Required. Name of the registered model.} +} +\description{ +Get a model version. + +Get a model version. +} +\details{ +The caller must be a metastore admin or an owner of (or have the \strong{EXECUTE} +privilege on) the parent registered model. For the latter case, the caller +must also be the owner or have the \strong{USE_CATALOG} privilege on the parent +catalog and the \strong{USE_SCHEMA} privilege on the parent schema. +} diff --git a/man/get_model_version_by_alias.Rd b/man/get_model_version_by_alias.Rd new file mode 100644 index 00000000..a62b9cf9 --- /dev/null +++ b/man/get_model_version_by_alias.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_versions.R +\name{get_model_version_by_alias} +\alias{get_model_version_by_alias} +\alias{modelVersionsGetByAlias} +\title{Get Model Version By Alias.} +\usage{ +get_model_version_by_alias(client, full_name, alias) + +modelVersionsGetByAlias(client, full_name, alias) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. The three-level (fully qualified) name of the registered model.} + +\item{alias}{Required. The name of the alias.} +} +\description{ +Get a model version by alias. +} +\details{ +The caller must be a metastore admin or an owner of (or have the \strong{EXECUTE} +privilege on) the registered model. For the latter case, the caller must also +be the owner or have the \strong{USE_CATALOG} privilege on the parent catalog and +the \strong{USE_SCHEMA} privilege on the parent schema. +} diff --git a/man/get_model_version_download_uri.Rd b/man/get_model_version_download_uri.Rd new file mode 100644 index 00000000..2981a505 --- /dev/null +++ b/man/get_model_version_download_uri.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{get_model_version_download_uri} +\alias{get_model_version_download_uri} +\alias{modelRegistryGetModelVersionDownloadUri} +\title{Get a model version URI.} +\usage{ +get_model_version_download_uri(client, name, version) + +modelRegistryGetModelVersionDownloadUri(client, name, version) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the registered model.} + +\item{version}{Required. Model version number.} +} +\description{ +Gets a URI to download the model version. +} diff --git a/man/get_notebook_permission_levels.Rd b/man/get_notebook_permission_levels.Rd new file mode 100644 index 00000000..701849f7 --- /dev/null +++ b/man/get_notebook_permission_levels.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{get_notebook_permission_levels} +\alias{get_notebook_permission_levels} +\alias{workspaceGetPermissionLevels} +\title{Get workspace object permission levels.} +\usage{ +get_notebook_permission_levels( + client, + workspace_object_type, + workspace_object_id +) + +workspaceGetPermissionLevels( + client, + workspace_object_type, + workspace_object_id +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{workspace_object_type}{Required. The workspace object type for which to get or manage permissions.} + +\item{workspace_object_id}{Required. The workspace object for which to get or manage permissions.} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_notebook_permissions.Rd b/man/get_notebook_permissions.Rd new file mode 100644 index 00000000..0c56cf20 --- /dev/null +++ b/man/get_notebook_permissions.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{get_notebook_permissions} +\alias{get_notebook_permissions} +\alias{workspaceGetPermissions} +\title{Get workspace object permissions.} +\usage{ +get_notebook_permissions(client, workspace_object_type, workspace_object_id) + +workspaceGetPermissions(client, workspace_object_type, workspace_object_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{workspace_object_type}{Required. The workspace object type for which to get or manage permissions.} + +\item{workspace_object_id}{Required. The workspace object for which to get or manage permissions.} +} +\description{ +Gets the permissions of a workspace object. Workspace objects can inherit +permissions from their parent objects or root object. +} diff --git a/man/get_notebook_status.Rd b/man/get_notebook_status.Rd new file mode 100644 index 00000000..1fb60b23 --- /dev/null +++ b/man/get_notebook_status.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{get_notebook_status} +\alias{get_notebook_status} +\alias{workspaceGetStatus} +\title{Get status.} +\usage{ +get_notebook_status(client, path) + +workspaceGetStatus(client, path) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{path}{Required. The absolute path of the notebook or directory.} +} +\description{ +Gets the status of an object or a directory. If \code{path} does not exist, this +call returns an error \code{RESOURCE_DOES_NOT_EXIST}. +} diff --git a/man/get_online_table.Rd b/man/get_online_table.Rd new file mode 100644 index 00000000..3cc103c9 --- /dev/null +++ b/man/get_online_table.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/online_tables.R +\name{get_online_table} +\alias{get_online_table} +\alias{onlineTablesGet} +\title{Get an Online Table.} +\usage{ +get_online_table(client, name) + +onlineTablesGet(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Full three-part (catalog, schema, table) name of the table.} +} +\description{ +Get information about an existing online table and its status. +} diff --git a/man/get_permission.Rd b/man/get_permission.Rd new file mode 100644 index 00000000..f57f1444 --- /dev/null +++ b/man/get_permission.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/permissions.R +\name{get_permission} +\alias{get_permission} +\alias{permissionsGet} +\title{Get object permissions.} +\usage{ +get_permission(client, request_object_type, request_object_id) + +permissionsGet(client, request_object_type, request_object_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{request_object_type}{Required. The type of the request object.} + +\item{request_object_id}{Required. The id of the request object.} +} +\description{ +Gets the permissions of an object. Objects can inherit permissions from their +parent objects or root object. +} diff --git a/man/get_permission_levels.Rd b/man/get_permission_levels.Rd new file mode 100644 index 00000000..bd271431 --- /dev/null +++ b/man/get_permission_levels.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/permissions.R +\name{get_permission_levels} +\alias{get_permission_levels} +\alias{permissionsGetPermissionLevels} +\title{Get object permission levels.} +\usage{ +get_permission_levels(client, request_object_type, request_object_id) + +permissionsGetPermissionLevels(client, request_object_type, request_object_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{request_object_type}{Required. \if{html}{\out{}}.} + +\item{request_object_id}{Required. \if{html}{\out{}}.} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_pipeline.Rd b/man/get_pipeline.Rd new file mode 100644 index 00000000..c7165e98 --- /dev/null +++ b/man/get_pipeline.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{get_pipeline} +\alias{get_pipeline} +\alias{pipelinesGet} +\title{Get a pipeline.} +\usage{ +get_pipeline(client, pipeline_id) + +pipelinesGet(client, pipeline_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Required. This field has no description yet.} +} +\description{ +Get a pipeline. +} diff --git a/man/get_pipeline_permission_levels.Rd b/man/get_pipeline_permission_levels.Rd new file mode 100644 index 00000000..76607ec9 --- /dev/null +++ b/man/get_pipeline_permission_levels.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{get_pipeline_permission_levels} +\alias{get_pipeline_permission_levels} +\alias{pipelinesGetPermissionLevels} +\title{Get pipeline permission levels.} +\usage{ +get_pipeline_permission_levels(client, pipeline_id) + +pipelinesGetPermissionLevels(client, pipeline_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Required. The pipeline for which to get or manage permissions.} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_pipeline_permissions.Rd b/man/get_pipeline_permissions.Rd new file mode 100644 index 00000000..8c725ec6 --- /dev/null +++ b/man/get_pipeline_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{get_pipeline_permissions} +\alias{get_pipeline_permissions} +\alias{pipelinesGetPermissions} +\title{Get pipeline permissions.} +\usage{ +get_pipeline_permissions(client, pipeline_id) + +pipelinesGetPermissions(client, pipeline_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Required. The pipeline for which to get or manage permissions.} +} +\description{ +Gets the permissions of a pipeline. Pipelines can inherit permissions from +their root object. +} diff --git a/man/get_pipeline_update.Rd b/man/get_pipeline_update.Rd new file mode 100644 index 00000000..ab17795e --- /dev/null +++ b/man/get_pipeline_update.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{get_pipeline_update} +\alias{get_pipeline_update} +\alias{pipelinesGetUpdate} +\title{Get a pipeline update.} +\usage{ +get_pipeline_update(client, pipeline_id, update_id) + +pipelinesGetUpdate(client, pipeline_id, update_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Required. The ID of the pipeline.} + +\item{update_id}{Required. The ID of the update.} +} +\description{ +Gets an update from an active pipeline. +} diff --git a/man/get_provider.Rd b/man/get_provider.Rd new file mode 100644 index 00000000..47f20349 --- /dev/null +++ b/man/get_provider.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/providers.R +\name{get_provider} +\alias{get_provider} +\alias{providersGet} +\title{Get a provider.} +\usage{ +get_provider(client, name) + +providersGet(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the provider.} +} +\description{ +Gets a specific authentication provider. The caller must supply the name of +the provider, and must either be a metastore admin or the owner of the +provider. +} diff --git a/man/get_query.Rd b/man/get_query.Rd new file mode 100644 index 00000000..954ff304 --- /dev/null +++ b/man/get_query.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/queries.R +\name{get_query} +\alias{get_query} +\alias{queriesGet} +\title{Get a query definition.} +\usage{ +get_query(client, query_id) + +queriesGet(client, query_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{query_id}{Required. This field has no description yet.} +} +\description{ +Retrieve a query object definition along with contextual permissions +information about the currently authenticated user. +} diff --git a/man/get_recipient.Rd b/man/get_recipient.Rd new file mode 100644 index 00000000..4b39531d --- /dev/null +++ b/man/get_recipient.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/recipients.R +\name{get_recipient} +\alias{get_recipient} +\alias{recipientsGet} +\title{Get a share recipient.} +\usage{ +get_recipient(client, name) + +recipientsGet(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the recipient.} +} +\description{ +Gets a share recipient from the metastore if: +} +\details{ +\itemize{ +\item the caller is the owner of the share recipient, or: * is a metastore admin +} +} diff --git a/man/get_recipient_activation_url_info.Rd b/man/get_recipient_activation_url_info.Rd new file mode 100644 index 00000000..b41c2bac --- /dev/null +++ b/man/get_recipient_activation_url_info.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/recipient_activation.R +\name{get_recipient_activation_url_info} +\alias{get_recipient_activation_url_info} +\alias{recipientActivationGetActivationUrlInfo} +\title{Get a share activation URL.} +\usage{ +get_recipient_activation_url_info(client, activation_url) + +recipientActivationGetActivationUrlInfo(client, activation_url) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{activation_url}{Required. The one time activation url.} +} +\description{ +Gets an activation URL for a share. +} diff --git a/man/get_registered_model.Rd b/man/get_registered_model.Rd new file mode 100644 index 00000000..91b70a78 --- /dev/null +++ b/man/get_registered_model.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/registered_models.R +\name{get_registered_model} +\alias{get_registered_model} +\alias{registeredModelsGet} +\title{Get a Registered Model.} +\usage{ +get_registered_model(client, full_name, include_browse = NULL) + +registeredModelsGet(client, full_name, include_browse = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. The three-level (fully qualified) name of the registered model.} + +\item{include_browse}{Whether to include registered models in the response for which the principal can only access selective metadata for.} +} +\description{ +Get a registered model. +} +\details{ +The caller must be a metastore admin or an owner of (or have the \strong{EXECUTE} +privilege on) the registered model. For the latter case, the caller must also +be the owner or have the \strong{USE_CATALOG} privilege on the parent catalog and +the \strong{USE_SCHEMA} privilege on the parent schema. +} diff --git a/man/get_repo.Rd b/man/get_repo.Rd new file mode 100644 index 00000000..e334e293 --- /dev/null +++ b/man/get_repo.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/repos.R +\name{get_repo} +\alias{get_repo} +\alias{reposGet} +\title{Get a repo.} +\usage{ +get_repo(client, repo_id) + +reposGet(client, repo_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{repo_id}{Required. The ID for the corresponding repo to access.} +} +\description{ +Returns the repo with the given repo ID. +} diff --git a/man/get_repo_permission_levels.Rd b/man/get_repo_permission_levels.Rd new file mode 100644 index 00000000..53d8845a --- /dev/null +++ b/man/get_repo_permission_levels.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/repos.R +\name{get_repo_permission_levels} +\alias{get_repo_permission_levels} +\alias{reposGetPermissionLevels} +\title{Get repo permission levels.} +\usage{ +get_repo_permission_levels(client, repo_id) + +reposGetPermissionLevels(client, repo_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{repo_id}{Required. The repo for which to get or manage permissions.} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_repo_permissions.Rd b/man/get_repo_permissions.Rd new file mode 100644 index 00000000..f8abd6e4 --- /dev/null +++ b/man/get_repo_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/repos.R +\name{get_repo_permissions} +\alias{get_repo_permissions} +\alias{reposGetPermissions} +\title{Get repo permissions.} +\usage{ +get_repo_permissions(client, repo_id) + +reposGetPermissions(client, repo_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{repo_id}{Required. The repo for which to get or manage permissions.} +} +\description{ +Gets the permissions of a repo. Repos can inherit permissions from their root +object. +} diff --git a/man/get_restrict_workspace_admin.Rd b/man/get_restrict_workspace_admin.Rd new file mode 100644 index 00000000..7245a3de --- /dev/null +++ b/man/get_restrict_workspace_admin.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/restrict_workspace_admins.R +\name{get_restrict_workspace_admin} +\alias{get_restrict_workspace_admin} +\alias{restrictWorkspaceAdminsGet} +\title{Get the restrict workspace admins setting.} +\usage{ +get_restrict_workspace_admin(client, etag = NULL) + +restrictWorkspaceAdminsGet(client, etag = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{etag}{etag used for versioning.} +} +\description{ +Gets the restrict workspace admins setting. +} diff --git a/man/get_schema.Rd b/man/get_schema.Rd new file mode 100644 index 00000000..6b2f429f --- /dev/null +++ b/man/get_schema.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/schemas.R +\name{get_schema} +\alias{get_schema} +\alias{schemasGet} +\title{Get a schema.} +\usage{ +get_schema(client, full_name, include_browse = NULL) + +schemasGet(client, full_name, include_browse = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the schema.} + +\item{include_browse}{Whether to include schemas in the response for which the principal can only access selective metadata for.} +} +\description{ +Gets the specified schema within the metastore. The caller must be a +metastore admin, the owner of the schema, or a user that has the +\strong{USE_SCHEMA} privilege on the schema. +} diff --git a/man/get_secret.Rd b/man/get_secret.Rd new file mode 100644 index 00000000..7c06dd5b --- /dev/null +++ b/man/get_secret.Rd @@ -0,0 +1,33 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/secrets.R +\name{get_secret} +\alias{get_secret} +\alias{secretsGetSecret} +\title{Get a secret.} +\usage{ +get_secret(client, scope, key) + +secretsGetSecret(client, scope, key) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{scope}{Required. The name of the scope to fetch secret information from.} + +\item{key}{Required. The key to fetch secret for.} +} +\description{ +Gets the bytes representation of a secret value for the specified scope and +key. +} +\details{ +Users need the READ permission to make this call. + +Note that the secret value returned is in bytes. The interpretation of the +bytes is determined by the caller in DBUtils and the type the data is decoded +into. + +Throws \code{PERMISSION_DENIED} if the user does not have permission to make +this API call. Throws \code{RESOURCE_DOES_NOT_EXIST} if no such secret or secret +scope exists. +} diff --git a/man/get_secret_acl.Rd b/man/get_secret_acl.Rd new file mode 100644 index 00000000..da364846 --- /dev/null +++ b/man/get_secret_acl.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/secrets.R +\name{get_secret_acl} +\alias{get_secret_acl} +\alias{secretsGetAcl} +\title{Get secret ACL details.} +\usage{ +get_secret_acl(client, scope, principal) + +secretsGetAcl(client, scope, principal) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{scope}{Required. The name of the scope to fetch ACL information from.} + +\item{principal}{Required. The principal to fetch ACL information for.} +} +\description{ +Gets the details about the given ACL, such as the group and permission. Users +must have the \code{MANAGE} permission to invoke this API. +} +\details{ +Throws \code{RESOURCE_DOES_NOT_EXIST} if no such secret scope exists. Throws +\code{PERMISSION_DENIED} if the user does not have permission to make this API +call. +} diff --git a/man/get_service_principal.Rd b/man/get_service_principal.Rd new file mode 100644 index 00000000..de10425e --- /dev/null +++ b/man/get_service_principal.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/service_principals.R +\name{get_service_principal} +\alias{get_service_principal} +\alias{servicePrincipalsGet} +\title{Get service principal details.} +\usage{ +get_service_principal(client, id) + +servicePrincipalsGet(client, id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Unique ID for a service principal in the Databricks workspace.} +} +\description{ +Gets the details for a single service principal define in the Databricks +workspace. +} diff --git a/man/get_serving_endpoint.Rd b/man/get_serving_endpoint.Rd new file mode 100644 index 00000000..5449df29 --- /dev/null +++ b/man/get_serving_endpoint.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{get_serving_endpoint} +\alias{get_serving_endpoint} +\alias{servingEndpointsGet} +\title{Get a single serving endpoint.} +\usage{ +get_serving_endpoint(client, name) + +servingEndpointsGet(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint.} +} +\description{ +Retrieves the details for a single serving endpoint. +} diff --git a/man/get_serving_endpoint_permission_levels.Rd b/man/get_serving_endpoint_permission_levels.Rd new file mode 100644 index 00000000..08724b4a --- /dev/null +++ b/man/get_serving_endpoint_permission_levels.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{get_serving_endpoint_permission_levels} +\alias{get_serving_endpoint_permission_levels} +\alias{servingEndpointsGetPermissionLevels} +\title{Get serving endpoint permission levels.} +\usage{ +get_serving_endpoint_permission_levels(client, serving_endpoint_id) + +servingEndpointsGetPermissionLevels(client, serving_endpoint_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{serving_endpoint_id}{Required. The serving endpoint for which to get or manage permissions.} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_serving_endpoint_permissions.Rd b/man/get_serving_endpoint_permissions.Rd new file mode 100644 index 00000000..997908e7 --- /dev/null +++ b/man/get_serving_endpoint_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{get_serving_endpoint_permissions} +\alias{get_serving_endpoint_permissions} +\alias{servingEndpointsGetPermissions} +\title{Get serving endpoint permissions.} +\usage{ +get_serving_endpoint_permissions(client, serving_endpoint_id) + +servingEndpointsGetPermissions(client, serving_endpoint_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{serving_endpoint_id}{Required. The serving endpoint for which to get or manage permissions.} +} +\description{ +Gets the permissions of a serving endpoint. Serving endpoints can inherit +permissions from their root object. +} diff --git a/man/get_share.Rd b/man/get_share.Rd new file mode 100644 index 00000000..bdfffdc7 --- /dev/null +++ b/man/get_share.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/shares.R +\name{get_share} +\alias{get_share} +\alias{sharesGet} +\title{Get a share.} +\usage{ +get_share(client, name, include_shared_data = NULL) + +sharesGet(client, name, include_shared_data = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the share.} + +\item{include_shared_data}{Query for data to include in the share.} +} +\description{ +Gets a data object share from the metastore. The caller must be a metastore +admin or the owner of the share. +} diff --git a/man/get_statement_execution.Rd b/man/get_statement_execution.Rd new file mode 100644 index 00000000..8827d223 --- /dev/null +++ b/man/get_statement_execution.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/statement_execution.R +\name{get_statement_execution} +\alias{get_statement_execution} +\alias{statementExecutionGetStatement} +\title{Get status, manifest, and result first chunk.} +\usage{ +get_statement_execution(client, statement_id) + +statementExecutionGetStatement(client, statement_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{statement_id}{Required. The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls.} +} +\description{ +This request can be used to poll for the statement's status. When the +\code{status.state} field is \code{SUCCEEDED} it will also return the result manifest +and the first chunk of the result data. When the statement is in the terminal +states \code{CANCELED}, \code{CLOSED} or \code{FAILED}, it returns HTTP 200 with the state +set. After at least 12 hours in terminal state, the statement is removed from +the warehouse and further calls will receive an HTTP 404 response. +} +\details{ +\strong{NOTE} This call currently might take up to 5 seconds to get the latest +status and result. +} diff --git a/man/get_statement_execution_result_chunk_n.Rd b/man/get_statement_execution_result_chunk_n.Rd new file mode 100644 index 00000000..b7df987b --- /dev/null +++ b/man/get_statement_execution_result_chunk_n.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/statement_execution.R +\name{get_statement_execution_result_chunk_n} +\alias{get_statement_execution_result_chunk_n} +\alias{statementExecutionGetStatementResultChunkN} +\title{Get result chunk by index.} +\usage{ +get_statement_execution_result_chunk_n(client, statement_id, chunk_index) + +statementExecutionGetStatementResultChunkN(client, statement_id, chunk_index) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{statement_id}{Required. The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls.} + +\item{chunk_index}{Required. This field has no description yet.} +} +\description{ +After the statement execution has \code{SUCCEEDED}, this request can be used to +fetch any chunk by index. Whereas the first chunk with \code{chunk_index=0} is +typically fetched with :method:statementexecution/executeStatement or +:method:statementexecution/getStatement, this request can be used to fetch +subsequent chunks. The response structure is identical to the nested \code{result} +element described in the :method:statementexecution/getStatement request, and +similarly includes the \code{next_chunk_index} and \code{next_chunk_internal_link} +fields for simple iteration through the result set. +} diff --git a/man/get_storage_credential.Rd b/man/get_storage_credential.Rd new file mode 100644 index 00000000..d379624d --- /dev/null +++ b/man/get_storage_credential.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/storage_credentials.R +\name{get_storage_credential} +\alias{get_storage_credential} +\alias{storageCredentialsGet} +\title{Get a credential.} +\usage{ +get_storage_credential(client, name) + +storageCredentialsGet(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the storage credential.} +} +\description{ +Gets a storage credential from the metastore. The caller must be a metastore +admin, the owner of the storage credential, or have some permission on the +storage credential. +} diff --git a/man/get_table.Rd b/man/get_table.Rd new file mode 100644 index 00000000..e5e2f9eb --- /dev/null +++ b/man/get_table.Rd @@ -0,0 +1,39 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tables.R +\name{get_table} +\alias{get_table} +\alias{tablesGet} +\title{Get a table.} +\usage{ +get_table( + client, + full_name, + include_browse = NULL, + include_delta_metadata = NULL +) + +tablesGet( + client, + full_name, + include_browse = NULL, + include_delta_metadata = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the table.} + +\item{include_browse}{Whether to include tables in the response for which the principal can only access selective metadata for.} + +\item{include_delta_metadata}{Whether delta metadata should be included in the response.} +} +\description{ +Gets a table from the metastore for a specific catalog and schema. The caller +must satisfy one of the following requirements: * Be a metastore admin * Be +the owner of the parent catalog * Be the owner of the parent schema and have +the USE_CATALOG privilege on the parent catalog * Have the \strong{USE_CATALOG} +privilege on the parent catalog and the \strong{USE_SCHEMA} privilege on the +parent schema, and either be the table owner or have the SELECT privilege on +the table. +} diff --git a/man/get_token_management.Rd b/man/get_token_management.Rd new file mode 100644 index 00000000..ec108b51 --- /dev/null +++ b/man/get_token_management.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token_management.R +\name{get_token_management} +\alias{get_token_management} +\alias{tokenManagementGet} +\title{Get token info.} +\usage{ +get_token_management(client, token_id) + +tokenManagementGet(client, token_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{token_id}{Required. The ID of the token to get.} +} +\description{ +Gets information about a token, specified by its ID. +} diff --git a/man/get_token_management_permission_levels.Rd b/man/get_token_management_permission_levels.Rd new file mode 100644 index 00000000..b0618e20 --- /dev/null +++ b/man/get_token_management_permission_levels.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token_management.R +\name{get_token_management_permission_levels} +\alias{get_token_management_permission_levels} +\alias{tokenManagementGetPermissionLevels} +\title{Get token permission levels.} +\usage{ +get_token_management_permission_levels(client) + +tokenManagementGetPermissionLevels(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_token_management_permissions.Rd b/man/get_token_management_permissions.Rd new file mode 100644 index 00000000..0dcd3be6 --- /dev/null +++ b/man/get_token_management_permissions.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token_management.R +\name{get_token_management_permissions} +\alias{get_token_management_permissions} +\alias{tokenManagementGetPermissions} +\title{Get token permissions.} +\usage{ +get_token_management_permissions(client) + +tokenManagementGetPermissions(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Gets the permissions of all tokens. Tokens can inherit permissions from their +root object. +} diff --git a/man/get_user.Rd b/man/get_user.Rd new file mode 100644 index 00000000..ef0c3a6c --- /dev/null +++ b/man/get_user.Rd @@ -0,0 +1,53 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/users.R +\name{get_user} +\alias{get_user} +\alias{usersGet} +\title{Get user details.} +\usage{ +get_user( + client, + id, + attributes = NULL, + count = NULL, + excluded_attributes = NULL, + filter = NULL, + sort_by = NULL, + sort_order = NULL, + start_index = NULL +) + +usersGet( + client, + id, + attributes = NULL, + count = NULL, + excluded_attributes = NULL, + filter = NULL, + sort_by = NULL, + sort_order = NULL, + start_index = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Unique ID for a user in the Databricks workspace.} + +\item{attributes}{Comma-separated list of attributes to return in response.} + +\item{count}{Desired number of results per page.} + +\item{excluded_attributes}{Comma-separated list of attributes to exclude in response.} + +\item{filter}{Query by which the results have to be filtered.} + +\item{sort_by}{Attribute to sort the results.} + +\item{sort_order}{The order to sort the results.} + +\item{start_index}{Specifies the index of the first result.} +} +\description{ +Gets information for a specific user in Databricks workspace. +} diff --git a/man/get_user_permission_levels.Rd b/man/get_user_permission_levels.Rd new file mode 100644 index 00000000..c6f25d32 --- /dev/null +++ b/man/get_user_permission_levels.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/users.R +\name{get_user_permission_levels} +\alias{get_user_permission_levels} +\alias{usersGetPermissionLevels} +\title{Get password permission levels.} +\usage{ +get_user_permission_levels(client) + +usersGetPermissionLevels(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_user_permissions.Rd b/man/get_user_permissions.Rd new file mode 100644 index 00000000..bf4a7d14 --- /dev/null +++ b/man/get_user_permissions.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/users.R +\name{get_user_permissions} +\alias{get_user_permissions} +\alias{usersGetPermissions} +\title{Get password permissions.} +\usage{ +get_user_permissions(client) + +usersGetPermissions(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Gets the permissions of all passwords. Passwords can inherit permissions from +their root object. +} diff --git a/man/get_vector_search_endpoint.Rd b/man/get_vector_search_endpoint.Rd new file mode 100644 index 00000000..ae8bf95b --- /dev/null +++ b/man/get_vector_search_endpoint.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_endpoints.R +\name{get_vector_search_endpoint} +\alias{get_vector_search_endpoint} +\alias{vectorSearchEndpointsGetEndpoint} +\title{Get an endpoint.} +\usage{ +get_vector_search_endpoint(client, endpoint_name) + +vectorSearchEndpointsGetEndpoint(client, endpoint_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{endpoint_name}{Required. Name of the endpoint.} +} +\description{ +Get an endpoint. +} diff --git a/man/get_vector_search_index.Rd b/man/get_vector_search_index.Rd new file mode 100644 index 00000000..8665a541 --- /dev/null +++ b/man/get_vector_search_index.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_indexes.R +\name{get_vector_search_index} +\alias{get_vector_search_index} +\alias{vectorSearchIndexesGetIndex} +\title{Get an index.} +\usage{ +get_vector_search_index(client, index_name) + +vectorSearchIndexesGetIndex(client, index_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{index_name}{Required. Name of the index.} +} +\description{ +Get an index. +} diff --git a/man/get_warehouse.Rd b/man/get_warehouse.Rd new file mode 100644 index 00000000..6f232a7a --- /dev/null +++ b/man/get_warehouse.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{get_warehouse} +\alias{get_warehouse} +\alias{warehousesGet} +\title{Get warehouse info.} +\usage{ +get_warehouse(client, id) + +warehousesGet(client, id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Required.} +} +\description{ +Gets the information for a single SQL warehouse. +} diff --git a/man/get_warehouse_permission_levels.Rd b/man/get_warehouse_permission_levels.Rd new file mode 100644 index 00000000..84390a90 --- /dev/null +++ b/man/get_warehouse_permission_levels.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{get_warehouse_permission_levels} +\alias{get_warehouse_permission_levels} +\alias{warehousesGetPermissionLevels} +\title{Get SQL warehouse permission levels.} +\usage{ +get_warehouse_permission_levels(client, warehouse_id) + +warehousesGetPermissionLevels(client, warehouse_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{warehouse_id}{Required. The SQL warehouse for which to get or manage permissions.} +} +\description{ +Gets the permission levels that a user can have on an object. +} diff --git a/man/get_warehouse_permissions.Rd b/man/get_warehouse_permissions.Rd new file mode 100644 index 00000000..53ac8049 --- /dev/null +++ b/man/get_warehouse_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{get_warehouse_permissions} +\alias{get_warehouse_permissions} +\alias{warehousesGetPermissions} +\title{Get SQL warehouse permissions.} +\usage{ +get_warehouse_permissions(client, warehouse_id) + +warehousesGetPermissions(client, warehouse_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{warehouse_id}{Required. The SQL warehouse for which to get or manage permissions.} +} +\description{ +Gets the permissions of a SQL warehouse. SQL warehouses can inherit +permissions from their root object. +} diff --git a/man/get_warehouse_workspace_config.Rd b/man/get_warehouse_workspace_config.Rd new file mode 100644 index 00000000..101d34ee --- /dev/null +++ b/man/get_warehouse_workspace_config.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{get_warehouse_workspace_config} +\alias{get_warehouse_workspace_config} +\alias{warehousesGetWorkspaceWarehouseConfig} +\title{Get the workspace configuration.} +\usage{ +get_warehouse_workspace_config(client) + +warehousesGetWorkspaceWarehouseConfig(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Gets the workspace level configuration that is shared by all SQL warehouses +in a workspace. +} diff --git a/man/get_workspace_binding.Rd b/man/get_workspace_binding.Rd new file mode 100644 index 00000000..9a7610e2 --- /dev/null +++ b/man/get_workspace_binding.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace_bindings.R +\name{get_workspace_binding} +\alias{get_workspace_binding} +\alias{workspaceBindingsGet} +\title{Get catalog workspace bindings.} +\usage{ +get_workspace_binding(client, name) + +workspaceBindingsGet(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the catalog.} +} +\description{ +Gets workspace bindings of the catalog. The caller must be a metastore admin +or an owner of the catalog. +} diff --git a/man/get_workspace_binding_bindings.Rd b/man/get_workspace_binding_bindings.Rd new file mode 100644 index 00000000..f46f2ac2 --- /dev/null +++ b/man/get_workspace_binding_bindings.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace_bindings.R +\name{get_workspace_binding_bindings} +\alias{get_workspace_binding_bindings} +\alias{workspaceBindingsGetBindings} +\title{Get securable workspace bindings.} +\usage{ +get_workspace_binding_bindings(client, securable_type, securable_name) + +workspaceBindingsGetBindings(client, securable_type, securable_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{securable_type}{Required. The type of the securable.} + +\item{securable_name}{Required. The name of the securable.} +} +\description{ +Gets workspace bindings of the securable. The caller must be a metastore +admin or an owner of the securable. +} diff --git a/man/get_workspace_conf_status.Rd b/man/get_workspace_conf_status.Rd new file mode 100644 index 00000000..974bda60 --- /dev/null +++ b/man/get_workspace_conf_status.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace_conf.R +\name{get_workspace_conf_status} +\alias{get_workspace_conf_status} +\alias{workspaceConfGetStatus} +\title{Check configuration status.} +\usage{ +get_workspace_conf_status(client, keys) + +workspaceConfGetStatus(client, keys) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{keys}{Required. This field has no description yet.} +} +\description{ +Gets the configuration status for a workspace. +} diff --git a/man/gitCredentialsCreate.Rd b/man/gitCredentialsCreate.Rd deleted file mode 100644 index 8c1af941..00000000 --- a/man/gitCredentialsCreate.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/git_credentials.R -\name{gitCredentialsCreate} -\alias{gitCredentialsCreate} -\title{Create a credential entry.} -\usage{ -gitCredentialsCreate( - client, - git_provider, - git_username = NULL, - personal_access_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{git_provider}{Required. Git provider.} - -\item{git_username}{Git username.} - -\item{personal_access_token}{The personal access token used to authenticate to the corresponding Git provider.} -} -\description{ -Creates a Git credential entry for the user. Only one Git credential per user -is supported, so any attempts to create credentials if an entry already -exists will fail. Use the PATCH endpoint to update existing credentials, or -the DELETE endpoint to delete existing credentials. -} diff --git a/man/gitCredentialsDelete.Rd b/man/gitCredentialsDelete.Rd deleted file mode 100644 index 24449890..00000000 --- a/man/gitCredentialsDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/git_credentials.R -\name{gitCredentialsDelete} -\alias{gitCredentialsDelete} -\title{Delete a credential.} -\usage{ -gitCredentialsDelete(client, credential_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{credential_id}{Required. The ID for the corresponding credential to access.} -} -\description{ -Deletes the specified Git credential. -} diff --git a/man/gitCredentialsGet.Rd b/man/gitCredentialsGet.Rd deleted file mode 100644 index 9eefe546..00000000 --- a/man/gitCredentialsGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/git_credentials.R -\name{gitCredentialsGet} -\alias{gitCredentialsGet} -\title{Get a credential entry.} -\usage{ -gitCredentialsGet(client, credential_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{credential_id}{Required. The ID for the corresponding credential to access.} -} -\description{ -Gets the Git credential with the specified credential ID. -} diff --git a/man/gitCredentialsList.Rd b/man/gitCredentialsList.Rd deleted file mode 100644 index 70e54119..00000000 --- a/man/gitCredentialsList.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/git_credentials.R -\name{gitCredentialsList} -\alias{gitCredentialsList} -\title{Get Git credentials.} -\usage{ -gitCredentialsList(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Lists the calling user's Git credentials. One credential per user is -supported. -} diff --git a/man/gitCredentialsUpdate.Rd b/man/gitCredentialsUpdate.Rd deleted file mode 100644 index 0b0c75a7..00000000 --- a/man/gitCredentialsUpdate.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/git_credentials.R -\name{gitCredentialsUpdate} -\alias{gitCredentialsUpdate} -\title{Update a credential.} -\usage{ -gitCredentialsUpdate( - client, - credential_id, - git_provider = NULL, - git_username = NULL, - personal_access_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{credential_id}{Required. The ID for the corresponding credential to access.} - -\item{git_provider}{Git provider.} - -\item{git_username}{Git username.} - -\item{personal_access_token}{The personal access token used to authenticate to the corresponding Git provider.} -} -\description{ -Updates the specified Git credential. -} diff --git a/man/globalInitScriptsCreate.Rd b/man/globalInitScriptsCreate.Rd deleted file mode 100644 index 471617b5..00000000 --- a/man/globalInitScriptsCreate.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/global_init_scripts.R -\name{globalInitScriptsCreate} -\alias{globalInitScriptsCreate} -\title{Create init script.} -\usage{ -globalInitScriptsCreate(client, name, script, enabled = NULL, position = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the script.} - -\item{script}{Required. The Base64-encoded content of the script.} - -\item{enabled}{Specifies whether the script is enabled.} - -\item{position}{The position of a global init script, where 0 represents the first script to run, 1 is the second script to run, in ascending order.} -} -\description{ -Creates a new global init script in this workspace. -} diff --git a/man/globalInitScriptsDelete.Rd b/man/globalInitScriptsDelete.Rd deleted file mode 100644 index e6e7fc4b..00000000 --- a/man/globalInitScriptsDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/global_init_scripts.R -\name{globalInitScriptsDelete} -\alias{globalInitScriptsDelete} -\title{Delete init script.} -\usage{ -globalInitScriptsDelete(client, script_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{script_id}{Required. The ID of the global init script.} -} -\description{ -Deletes a global init script. -} diff --git a/man/globalInitScriptsGet.Rd b/man/globalInitScriptsGet.Rd deleted file mode 100644 index 22714ca8..00000000 --- a/man/globalInitScriptsGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/global_init_scripts.R -\name{globalInitScriptsGet} -\alias{globalInitScriptsGet} -\title{Get an init script.} -\usage{ -globalInitScriptsGet(client, script_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{script_id}{Required. The ID of the global init script.} -} -\description{ -Gets all the details of a script, including its Base64-encoded contents. -} diff --git a/man/globalInitScriptsList.Rd b/man/globalInitScriptsList.Rd deleted file mode 100644 index a828e74b..00000000 --- a/man/globalInitScriptsList.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/global_init_scripts.R -\name{globalInitScriptsList} -\alias{globalInitScriptsList} -\title{Get init scripts.} -\usage{ -globalInitScriptsList(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Get a list of all global init scripts for this workspace. This returns all -properties for each script but \strong{not} the script contents. To retrieve the -contents of a script, use the \href{:method:globalinitscripts/get}{get a global init script} operation. -} diff --git a/man/globalInitScriptsUpdate.Rd b/man/globalInitScriptsUpdate.Rd deleted file mode 100644 index bf5a3922..00000000 --- a/man/globalInitScriptsUpdate.Rd +++ /dev/null @@ -1,32 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/global_init_scripts.R -\name{globalInitScriptsUpdate} -\alias{globalInitScriptsUpdate} -\title{Update init script.} -\usage{ -globalInitScriptsUpdate( - client, - script_id, - name, - script, - enabled = NULL, - position = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{script_id}{Required. The ID of the global init script.} - -\item{name}{Required. The name of the script.} - -\item{script}{Required. The Base64-encoded content of the script.} - -\item{enabled}{Specifies whether the script is enabled.} - -\item{position}{The position of a script, where 0 represents the first script to run, 1 is the second script to run, in ascending order.} -} -\description{ -Updates a global init script, specifying only the fields to change. All -fields are optional. Unspecified fields retain their current value. -} diff --git a/man/grantsGet.Rd b/man/grantsGet.Rd deleted file mode 100644 index f03c5f6f..00000000 --- a/man/grantsGet.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/grants.R -\name{grantsGet} -\alias{grantsGet} -\title{Get permissions.} -\usage{ -grantsGet(client, securable_type, full_name, principal = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{securable_type}{Required. Type of securable.} - -\item{full_name}{Required. Full name of securable.} - -\item{principal}{If provided, only the permissions for the specified principal (user or group) are returned.} -} -\description{ -Gets the permissions for a securable. -} diff --git a/man/grantsGetEffective.Rd b/man/grantsGetEffective.Rd deleted file mode 100644 index fb2007a5..00000000 --- a/man/grantsGetEffective.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/grants.R -\name{grantsGetEffective} -\alias{grantsGetEffective} -\title{Get effective permissions.} -\usage{ -grantsGetEffective(client, securable_type, full_name, principal = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{securable_type}{Required. Type of securable.} - -\item{full_name}{Required. Full name of securable.} - -\item{principal}{If provided, only the effective permissions for the specified principal (user or group) are returned.} -} -\description{ -Gets the effective permissions for a securable. -} diff --git a/man/grantsUpdate.Rd b/man/grantsUpdate.Rd deleted file mode 100644 index d39b9f5e..00000000 --- a/man/grantsUpdate.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/grants.R -\name{grantsUpdate} -\alias{grantsUpdate} -\title{Update permissions.} -\usage{ -grantsUpdate(client, securable_type, full_name, changes = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{securable_type}{Required. Type of securable.} - -\item{full_name}{Required. Full name of securable.} - -\item{changes}{Array of permissions change objects.} -} -\description{ -Updates the permissions for a securable. -} diff --git a/man/groupsCreate.Rd b/man/groupsCreate.Rd deleted file mode 100644 index fd2b1df9..00000000 --- a/man/groupsCreate.Rd +++ /dev/null @@ -1,44 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/groups.R -\name{groupsCreate} -\alias{groupsCreate} -\title{Create a new group.} -\usage{ -groupsCreate( - client, - display_name = NULL, - entitlements = NULL, - external_id = NULL, - groups = NULL, - id = NULL, - members = NULL, - meta = NULL, - roles = NULL, - schemas = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{display_name}{String that represents a human-readable group name.} - -\item{entitlements}{Entitlements assigned to the group.} - -\item{external_id}{This field has no description yet.} - -\item{groups}{This field has no description yet.} - -\item{id}{Databricks group ID.} - -\item{members}{This field has no description yet.} - -\item{meta}{Container for the group identifier.} - -\item{roles}{Corresponds to AWS instance profile/arn role.} - -\item{schemas}{The schema of the group.} -} -\description{ -Creates a group in the Databricks workspace with a unique name, using the -supplied group details. -} diff --git a/man/groupsDelete.Rd b/man/groupsDelete.Rd deleted file mode 100644 index 93985aec..00000000 --- a/man/groupsDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/groups.R -\name{groupsDelete} -\alias{groupsDelete} -\title{Delete a group.} -\usage{ -groupsDelete(client, id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Unique ID for a group in the Databricks workspace.} -} -\description{ -Deletes a group from the Databricks workspace. -} diff --git a/man/groupsGet.Rd b/man/groupsGet.Rd deleted file mode 100644 index 2b80633e..00000000 --- a/man/groupsGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/groups.R -\name{groupsGet} -\alias{groupsGet} -\title{Get group details.} -\usage{ -groupsGet(client, id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Unique ID for a group in the Databricks workspace.} -} -\description{ -Gets the information for a specific group in the Databricks workspace. -} diff --git a/man/groupsList.Rd b/man/groupsList.Rd deleted file mode 100644 index ecd9b891..00000000 --- a/man/groupsList.Rd +++ /dev/null @@ -1,40 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/groups.R -\name{groupsList} -\alias{groupsList} -\title{List group details.} -\usage{ -groupsList( - client, - attributes = NULL, - count = NULL, - excluded_attributes = NULL, - filter = NULL, - sort_by = NULL, - sort_order = NULL, - start_index = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{attributes}{Comma-separated list of attributes to return in response.} - -\item{count}{Desired number of results per page.} - -\item{excluded_attributes}{Comma-separated list of attributes to exclude in response.} - -\item{filter}{Query by which the results have to be filtered.} - -\item{sort_by}{Attribute to sort the results.} - -\item{sort_order}{The order to sort the results.} - -\item{start_index}{Specifies the index of the first result.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets all details of the groups associated with the Databricks workspace. -} diff --git a/man/groupsPatch.Rd b/man/groupsPatch.Rd deleted file mode 100644 index 88c184fd..00000000 --- a/man/groupsPatch.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/groups.R -\name{groupsPatch} -\alias{groupsPatch} -\title{Update group details.} -\usage{ -groupsPatch(client, id, operations = NULL, schemas = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Unique ID for a group in the Databricks workspace.} - -\item{operations}{This field has no description yet.} - -\item{schemas}{The schema of the patch request.} -} -\description{ -Partially updates the details of a group. -} diff --git a/man/groupsUpdate.Rd b/man/groupsUpdate.Rd deleted file mode 100644 index b8dd4035..00000000 --- a/man/groupsUpdate.Rd +++ /dev/null @@ -1,43 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/groups.R -\name{groupsUpdate} -\alias{groupsUpdate} -\title{Replace a group.} -\usage{ -groupsUpdate( - client, - id, - display_name = NULL, - entitlements = NULL, - external_id = NULL, - groups = NULL, - members = NULL, - meta = NULL, - roles = NULL, - schemas = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Databricks group ID.} - -\item{display_name}{String that represents a human-readable group name.} - -\item{entitlements}{Entitlements assigned to the group.} - -\item{external_id}{This field has no description yet.} - -\item{groups}{This field has no description yet.} - -\item{members}{This field has no description yet.} - -\item{meta}{Container for the group identifier.} - -\item{roles}{Corresponds to AWS instance profile/arn role.} - -\item{schemas}{The schema of the group.} -} -\description{ -Updates the details of a group by replacing the entire group entity. -} diff --git a/man/import_notebook.Rd b/man/import_notebook.Rd new file mode 100644 index 00000000..abd0fc03 --- /dev/null +++ b/man/import_notebook.Rd @@ -0,0 +1,46 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{import_notebook} +\alias{import_notebook} +\alias{workspaceImport} +\title{Import a workspace object.} +\usage{ +import_notebook( + client, + path, + content = NULL, + format = NULL, + language = NULL, + overwrite = NULL +) + +workspaceImport( + client, + path, + content = NULL, + format = NULL, + language = NULL, + overwrite = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{path}{Required. The absolute path of the object or directory.} + +\item{content}{The base64-encoded content.} + +\item{format}{This specifies the format of the file to be imported.} + +\item{language}{The language of the object.} + +\item{overwrite}{The flag that specifies whether to overwrite existing object.} +} +\description{ +Imports a workspace object (for example, a notebook or file) or the contents +of an entire directory. If \code{path} already exists and \code{overwrite} is set to +\code{false}, this call returns an error \code{RESOURCE_ALREADY_EXISTS}. To import a +directory, you can use either the \code{DBC} format or the \code{SOURCE} format with +the \code{language} field unset. To import a single file as \code{SOURCE}, you must set +the \code{language} field. +} diff --git a/man/install_cluster_library.Rd b/man/install_cluster_library.Rd new file mode 100644 index 00000000..0111a051 --- /dev/null +++ b/man/install_cluster_library.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/libraries.R +\name{install_cluster_library} +\alias{install_cluster_library} +\alias{librariesInstall} +\title{Add a library.} +\usage{ +install_cluster_library(client, cluster_id, libraries) + +librariesInstall(client, cluster_id, libraries) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. Unique identifier for the cluster on which to install these libraries.} + +\item{libraries}{Required. The libraries to install.} +} +\description{ +Add libraries to be installed on a cluster. The installation is asynchronous; +it happens in the background after the completion of this request. +} +\details{ +\strong{Note}: The actual set of libraries to be installed on a cluster is the +union of the libraries specified via this method and the libraries set to be +installed on all clusters via the libraries UI. +} diff --git a/man/instancePoolsCreate.Rd b/man/instancePoolsCreate.Rd deleted file mode 100644 index 52a80dc6..00000000 --- a/man/instancePoolsCreate.Rd +++ /dev/null @@ -1,55 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/instance_pools.R -\name{instancePoolsCreate} -\alias{instancePoolsCreate} -\title{Create a new instance pool.} -\usage{ -instancePoolsCreate( - client, - instance_pool_name, - node_type_id, - aws_attributes = NULL, - azure_attributes = NULL, - custom_tags = NULL, - disk_spec = NULL, - enable_elastic_disk = NULL, - gcp_attributes = NULL, - idle_instance_autotermination_minutes = NULL, - max_capacity = NULL, - min_idle_instances = NULL, - preloaded_docker_images = NULL, - preloaded_spark_versions = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{instance_pool_name}{Required. Pool name requested by the user.} - -\item{node_type_id}{Required. This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} - -\item{aws_attributes}{Attributes related to instance pools running on Amazon Web Services.} - -\item{azure_attributes}{Attributes related to instance pools running on Azure.} - -\item{custom_tags}{Additional tags for pool resources.} - -\item{disk_spec}{Defines the specification of the disks that will be attached to all spark containers.} - -\item{enable_elastic_disk}{Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.} - -\item{gcp_attributes}{Attributes related to instance pools running on Google Cloud Platform.} - -\item{idle_instance_autotermination_minutes}{Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.} - -\item{max_capacity}{Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.} - -\item{min_idle_instances}{Minimum number of idle instances to keep in the instance pool.} - -\item{preloaded_docker_images}{Custom Docker Image BYOC.} - -\item{preloaded_spark_versions}{A list containing at most one preloaded Spark image version for the pool.} -} -\description{ -Creates a new instance pool using idle and ready-to-use cloud instances. -} diff --git a/man/instancePoolsDelete.Rd b/man/instancePoolsDelete.Rd deleted file mode 100644 index 1c3f4a84..00000000 --- a/man/instancePoolsDelete.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/instance_pools.R -\name{instancePoolsDelete} -\alias{instancePoolsDelete} -\title{Delete an instance pool.} -\usage{ -instancePoolsDelete(client, instance_pool_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{instance_pool_id}{Required. The instance pool to be terminated.} -} -\description{ -Deletes the instance pool permanently. The idle instances in the pool are -terminated asynchronously. -} diff --git a/man/instancePoolsEdit.Rd b/man/instancePoolsEdit.Rd deleted file mode 100644 index be2eff38..00000000 --- a/man/instancePoolsEdit.Rd +++ /dev/null @@ -1,37 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/instance_pools.R -\name{instancePoolsEdit} -\alias{instancePoolsEdit} -\title{Edit an existing instance pool.} -\usage{ -instancePoolsEdit( - client, - instance_pool_id, - instance_pool_name, - node_type_id, - custom_tags = NULL, - idle_instance_autotermination_minutes = NULL, - max_capacity = NULL, - min_idle_instances = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{instance_pool_id}{Required. Instance pool ID.} - -\item{instance_pool_name}{Required. Pool name requested by the user.} - -\item{node_type_id}{Required. This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster.} - -\item{custom_tags}{Additional tags for pool resources.} - -\item{idle_instance_autotermination_minutes}{Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.} - -\item{max_capacity}{Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.} - -\item{min_idle_instances}{Minimum number of idle instances to keep in the instance pool.} -} -\description{ -Modifies the configuration of an existing instance pool. -} diff --git a/man/instancePoolsGet.Rd b/man/instancePoolsGet.Rd deleted file mode 100644 index c948b23a..00000000 --- a/man/instancePoolsGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/instance_pools.R -\name{instancePoolsGet} -\alias{instancePoolsGet} -\title{Get instance pool information.} -\usage{ -instancePoolsGet(client, instance_pool_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{instance_pool_id}{Required. The canonical unique identifier for the instance pool.} -} -\description{ -Retrieve the information for an instance pool based on its identifier. -} diff --git a/man/instancePoolsGetPermissionLevels.Rd b/man/instancePoolsGetPermissionLevels.Rd deleted file mode 100644 index 98243eaa..00000000 --- a/man/instancePoolsGetPermissionLevels.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/instance_pools.R -\name{instancePoolsGetPermissionLevels} -\alias{instancePoolsGetPermissionLevels} -\title{Get instance pool permission levels.} -\usage{ -instancePoolsGetPermissionLevels(client, instance_pool_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{instance_pool_id}{Required. The instance pool for which to get or manage permissions.} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/instancePoolsGetPermissions.Rd b/man/instancePoolsGetPermissions.Rd deleted file mode 100644 index 6d88e05e..00000000 --- a/man/instancePoolsGetPermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/instance_pools.R -\name{instancePoolsGetPermissions} -\alias{instancePoolsGetPermissions} -\title{Get instance pool permissions.} -\usage{ -instancePoolsGetPermissions(client, instance_pool_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{instance_pool_id}{Required. The instance pool for which to get or manage permissions.} -} -\description{ -Gets the permissions of an instance pool. Instance pools can inherit -permissions from their root object. -} diff --git a/man/instancePoolsList.Rd b/man/instancePoolsList.Rd deleted file mode 100644 index 672a19a3..00000000 --- a/man/instancePoolsList.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/instance_pools.R -\name{instancePoolsList} -\alias{instancePoolsList} -\title{List instance pool info.} -\usage{ -instancePoolsList(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets a list of instance pools with their statistics. -} diff --git a/man/instancePoolsSetPermissions.Rd b/man/instancePoolsSetPermissions.Rd deleted file mode 100644 index 0485180f..00000000 --- a/man/instancePoolsSetPermissions.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/instance_pools.R -\name{instancePoolsSetPermissions} -\alias{instancePoolsSetPermissions} -\title{Set instance pool permissions.} -\usage{ -instancePoolsSetPermissions( - client, - instance_pool_id, - access_control_list = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{instance_pool_id}{Required. The instance pool for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on an instance pool. Instance pools can inherit permissions -from their root object. -} diff --git a/man/instancePoolsUpdatePermissions.Rd b/man/instancePoolsUpdatePermissions.Rd deleted file mode 100644 index 99354aa1..00000000 --- a/man/instancePoolsUpdatePermissions.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/instance_pools.R -\name{instancePoolsUpdatePermissions} -\alias{instancePoolsUpdatePermissions} -\title{Update instance pool permissions.} -\usage{ -instancePoolsUpdatePermissions( - client, - instance_pool_id, - access_control_list = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{instance_pool_id}{Required. The instance pool for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on an instance pool. Instance pools can inherit -permissions from their root object. -} diff --git a/man/instanceProfilesAdd.Rd b/man/instanceProfilesAdd.Rd deleted file mode 100644 index ff93816d..00000000 --- a/man/instanceProfilesAdd.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/instance_profiles.R -\name{instanceProfilesAdd} -\alias{instanceProfilesAdd} -\title{Register an instance profile.} -\usage{ -instanceProfilesAdd( - client, - instance_profile_arn, - iam_role_arn = NULL, - is_meta_instance_profile = NULL, - skip_validation = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{instance_profile_arn}{Required. The AWS ARN of the instance profile to register with Databricks.} - -\item{iam_role_arn}{The AWS IAM role ARN of the role associated with the instance profile.} - -\item{is_meta_instance_profile}{Boolean flag indicating whether the instance profile should only be used in credential passthrough scenarios.} - -\item{skip_validation}{By default, Databricks validates that it has sufficient permissions to launch instances with the instance profile.} -} -\description{ -In the UI, you can select the instance profile when launching clusters. This -API is only available to admin users. -} diff --git a/man/instanceProfilesEdit.Rd b/man/instanceProfilesEdit.Rd deleted file mode 100644 index 274df87f..00000000 --- a/man/instanceProfilesEdit.Rd +++ /dev/null @@ -1,38 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/instance_profiles.R -\name{instanceProfilesEdit} -\alias{instanceProfilesEdit} -\title{Edit an instance profile.} -\usage{ -instanceProfilesEdit( - client, - instance_profile_arn, - iam_role_arn = NULL, - is_meta_instance_profile = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{instance_profile_arn}{Required. The AWS ARN of the instance profile to register with Databricks.} - -\item{iam_role_arn}{The AWS IAM role ARN of the role associated with the instance profile.} - -\item{is_meta_instance_profile}{Boolean flag indicating whether the instance profile should only be used in credential passthrough scenarios.} -} -\description{ -The only supported field to change is the optional IAM role ARN associated -with the instance profile. It is required to specify the IAM role ARN if both -of the following are true: -} -\details{ -\itemize{ -\item Your role name and instance profile name do not match. The name is the part -after the last slash in each ARN. * You want to use the instance profile with -\href{https://docs.databricks.com/sql/admin/serverless.html}{Databricks SQL Serverless}. -} - -To understand where these fields are in the AWS console, see \href{https://docs.databricks.com/sql/admin/serverless.html}{Enable serverless SQL warehouses}. - -This API is only available to admin users. -} diff --git a/man/instanceProfilesList.Rd b/man/instanceProfilesList.Rd deleted file mode 100644 index 11afd5b4..00000000 --- a/man/instanceProfilesList.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/instance_profiles.R -\name{instanceProfilesList} -\alias{instanceProfilesList} -\title{List available instance profiles.} -\usage{ -instanceProfilesList(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -List the instance profiles that the calling user can use to launch a cluster. -} -\details{ -This API is available to all users. -} diff --git a/man/instanceProfilesRemove.Rd b/man/instanceProfilesRemove.Rd deleted file mode 100644 index 97f67661..00000000 --- a/man/instanceProfilesRemove.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/instance_profiles.R -\name{instanceProfilesRemove} -\alias{instanceProfilesRemove} -\title{Remove the instance profile.} -\usage{ -instanceProfilesRemove(client, instance_profile_arn) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{instance_profile_arn}{Required. The ARN of the instance profile to remove.} -} -\description{ -Remove the instance profile with the provided ARN. Existing clusters with -this instance profile will continue to function. -} -\details{ -This API is only accessible to admin users. -} diff --git a/man/ipAccessListsCreate.Rd b/man/ipAccessListsCreate.Rd deleted file mode 100644 index 4ea9e40e..00000000 --- a/man/ipAccessListsCreate.Rd +++ /dev/null @@ -1,37 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ip_access_lists.R -\name{ipAccessListsCreate} -\alias{ipAccessListsCreate} -\title{Create access list.} -\usage{ -ipAccessListsCreate(client, label, list_type, ip_addresses = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{label}{Required. Label for the IP access list.} - -\item{list_type}{Required. Type of IP access list.} - -\item{ip_addresses}{This field has no description yet.} -} -\description{ -Creates an IP access list for this workspace. -} -\details{ -A list can be an allow list or a block list. See the top of this file for a -description of how the server treats allow lists and block lists at runtime. - -When creating or updating an IP access list: -\itemize{ -\item For all allow lists and block lists combined, the API supports a maximum of -1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to -exceed that number return error 400 with \code{error_code} value \code{QUOTA_EXCEEDED}. -\item If the new list would block the calling user's current IP, error 400 is -returned with \code{error_code} value \code{INVALID_STATE}. -} - -It can take a few minutes for the changes to take effect. \strong{Note}: Your new -IP access list has no effect until you enable the feature. See -:method:workspaceconf/setStatus -} diff --git a/man/ipAccessListsDelete.Rd b/man/ipAccessListsDelete.Rd deleted file mode 100644 index 7c9c6fb2..00000000 --- a/man/ipAccessListsDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ip_access_lists.R -\name{ipAccessListsDelete} -\alias{ipAccessListsDelete} -\title{Delete access list.} -\usage{ -ipAccessListsDelete(client, ip_access_list_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{ip_access_list_id}{Required. The ID for the corresponding IP access list.} -} -\description{ -Deletes an IP access list, specified by its list ID. -} diff --git a/man/ipAccessListsGet.Rd b/man/ipAccessListsGet.Rd deleted file mode 100644 index 8328989e..00000000 --- a/man/ipAccessListsGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ip_access_lists.R -\name{ipAccessListsGet} -\alias{ipAccessListsGet} -\title{Get access list.} -\usage{ -ipAccessListsGet(client, ip_access_list_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{ip_access_list_id}{Required. The ID for the corresponding IP access list.} -} -\description{ -Gets an IP access list, specified by its list ID. -} diff --git a/man/ipAccessListsList.Rd b/man/ipAccessListsList.Rd deleted file mode 100644 index 0e27cf2d..00000000 --- a/man/ipAccessListsList.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ip_access_lists.R -\name{ipAccessListsList} -\alias{ipAccessListsList} -\title{Get access lists.} -\usage{ -ipAccessListsList(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets all IP access lists for the specified workspace. -} diff --git a/man/ipAccessListsReplace.Rd b/man/ipAccessListsReplace.Rd deleted file mode 100644 index c1a6bad0..00000000 --- a/man/ipAccessListsReplace.Rd +++ /dev/null @@ -1,43 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ip_access_lists.R -\name{ipAccessListsReplace} -\alias{ipAccessListsReplace} -\title{Replace access list.} -\usage{ -ipAccessListsReplace( - client, - ip_access_list_id, - label, - list_type, - enabled, - ip_addresses = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{ip_access_list_id}{Required. The ID for the corresponding IP access list.} - -\item{label}{Required. Label for the IP access list.} - -\item{list_type}{Required. Type of IP access list.} - -\item{enabled}{Required. Specifies whether this IP access list is enabled.} - -\item{ip_addresses}{This field has no description yet.} -} -\description{ -Replaces an IP access list, specified by its ID. -} -\details{ -A list can include allow lists and block lists. See the top of this file for -a description of how the server treats allow lists and block lists at run -time. When replacing an IP access list: * For all allow lists and block lists -combined, the API supports a maximum of 1000 IP/CIDR values, where one CIDR -counts as a single value. Attempts to exceed that number return error 400 -with \code{error_code} value \code{QUOTA_EXCEEDED}. * If the resulting list would block -the calling user's current IP, error 400 is returned with \code{error_code} value -\code{INVALID_STATE}. It can take a few minutes for the changes to take effect. -Note that your resulting IP access list has no effect until you enable the -feature. See :method:workspaceconf/setStatus. -} diff --git a/man/ipAccessListsUpdate.Rd b/man/ipAccessListsUpdate.Rd deleted file mode 100644 index aef947f7..00000000 --- a/man/ipAccessListsUpdate.Rd +++ /dev/null @@ -1,49 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/ip_access_lists.R -\name{ipAccessListsUpdate} -\alias{ipAccessListsUpdate} -\title{Update access list.} -\usage{ -ipAccessListsUpdate( - client, - ip_access_list_id, - enabled = NULL, - ip_addresses = NULL, - label = NULL, - list_type = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{ip_access_list_id}{Required. The ID for the corresponding IP access list.} - -\item{enabled}{Specifies whether this IP access list is enabled.} - -\item{ip_addresses}{This field has no description yet.} - -\item{label}{Label for the IP access list.} - -\item{list_type}{Type of IP access list.} -} -\description{ -Updates an existing IP access list, specified by its ID. -} -\details{ -A list can include allow lists and block lists. See the top of this file for -a description of how the server treats allow lists and block lists at run -time. - -When updating an IP access list: -\itemize{ -\item For all allow lists and block lists combined, the API supports a maximum of -1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to -exceed that number return error 400 with \code{error_code} value \code{QUOTA_EXCEEDED}. -\item If the updated list would block the calling user's current IP, error 400 is -returned with \code{error_code} value \code{INVALID_STATE}. -} - -It can take a few minutes for the changes to take effect. Note that your -resulting IP access list has no effect until you enable the feature. See -:method:workspaceconf/setStatus. -} diff --git a/man/jobsCancelAllRuns.Rd b/man/jobsCancelAllRuns.Rd deleted file mode 100644 index 0faaaf39..00000000 --- a/man/jobsCancelAllRuns.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsCancelAllRuns} -\alias{jobsCancelAllRuns} -\title{Cancel all runs of a job.} -\usage{ -jobsCancelAllRuns(client, all_queued_runs = NULL, job_id = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{all_queued_runs}{Optional boolean parameter to cancel all queued runs.} - -\item{job_id}{The canonical identifier of the job to cancel all runs of.} -} -\description{ -Cancels all active runs of a job. The runs are canceled asynchronously, so it -doesn't prevent new runs from being started. -} diff --git a/man/jobsCancelRun.Rd b/man/jobsCancelRun.Rd deleted file mode 100644 index 08ceb5eb..00000000 --- a/man/jobsCancelRun.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsCancelRun} -\alias{jobsCancelRun} -\title{Cancel a run.} -\usage{ -jobsCancelRun(client, run_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_id}{Required. This field is required.} -} -\description{ -Cancels a job run or a task run. The run is canceled asynchronously, so it -may still be running when this request completes. -} diff --git a/man/jobsCancelRunAndWait.Rd b/man/jobsCancelRunAndWait.Rd deleted file mode 100644 index 8b926eeb..00000000 --- a/man/jobsCancelRunAndWait.Rd +++ /dev/null @@ -1,27 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsCancelRunAndWait} -\alias{jobsCancelRunAndWait} -\title{Cancel a run.} -\usage{ -jobsCancelRunAndWait(client, run_id, timeout = 20, callback = cli_reporter) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_id}{Required. This field is required.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Jobs on Databricks reach -TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Jobs is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Cancels a job run or a task run. The run is canceled asynchronously, so it -may still be running when this request completes. -} diff --git a/man/jobsCreate.Rd b/man/jobsCreate.Rd deleted file mode 100644 index 90fbebd4..00000000 --- a/man/jobsCreate.Rd +++ /dev/null @@ -1,85 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsCreate} -\alias{jobsCreate} -\title{Create a new job.} -\usage{ -jobsCreate( - client, - access_control_list = NULL, - compute = NULL, - continuous = NULL, - deployment = NULL, - description = NULL, - edit_mode = NULL, - email_notifications = NULL, - format = NULL, - git_source = NULL, - health = NULL, - job_clusters = NULL, - max_concurrent_runs = NULL, - name = NULL, - notification_settings = NULL, - parameters = NULL, - queue = NULL, - run_as = NULL, - schedule = NULL, - tags = NULL, - tasks = NULL, - timeout_seconds = NULL, - trigger = NULL, - webhook_notifications = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{access_control_list}{List of permissions to set on the job.} - -\item{compute}{A list of compute requirements that can be referenced by tasks of this job.} - -\item{continuous}{An optional continuous property for this job.} - -\item{deployment}{Deployment information for jobs managed by external sources.} - -\item{description}{An optional description for the job.} - -\item{edit_mode}{Edit mode of the job.} - -\item{email_notifications}{An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.} - -\item{format}{Used to tell what is the format of the job.} - -\item{git_source}{An optional specification for a remote Git repository containing the source code used by tasks.} - -\item{health}{An optional set of health rules that can be defined for this job.} - -\item{job_clusters}{A list of job cluster specifications that can be shared and reused by tasks of this job.} - -\item{max_concurrent_runs}{An optional maximum allowed number of concurrent runs of the job.} - -\item{name}{An optional name for the job.} - -\item{notification_settings}{Optional notification settings that are used when sending notifications to each of the \code{email_notifications} and \code{webhook_notifications} for this job.} - -\item{parameters}{Job-level parameter definitions.} - -\item{queue}{The queue settings of the job.} - -\item{run_as}{Write-only setting, available only in Create/Update/Reset and Submit calls.} - -\item{schedule}{An optional periodic schedule for this job.} - -\item{tags}{A map of tags associated with the job.} - -\item{tasks}{A list of task specifications to be executed by this job.} - -\item{timeout_seconds}{An optional timeout applied to each run of this job.} - -\item{trigger}{A configuration to trigger a run when certain conditions are met.} - -\item{webhook_notifications}{A collection of system notification IDs to notify when runs of this job begin or complete.} -} -\description{ -Create a new job. -} diff --git a/man/jobsDelete.Rd b/man/jobsDelete.Rd deleted file mode 100644 index f17a436a..00000000 --- a/man/jobsDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsDelete} -\alias{jobsDelete} -\title{Delete a job.} -\usage{ -jobsDelete(client, job_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{job_id}{Required. The canonical identifier of the job to delete.} -} -\description{ -Deletes a job. -} diff --git a/man/jobsDeleteRun.Rd b/man/jobsDeleteRun.Rd deleted file mode 100644 index ff183728..00000000 --- a/man/jobsDeleteRun.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsDeleteRun} -\alias{jobsDeleteRun} -\title{Delete a job run.} -\usage{ -jobsDeleteRun(client, run_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_id}{Required. The canonical identifier of the run for which to retrieve the metadata.} -} -\description{ -Deletes a non-active run. Returns an error if the run is active. -} diff --git a/man/jobsExportRun.Rd b/man/jobsExportRun.Rd deleted file mode 100644 index d66eef7e..00000000 --- a/man/jobsExportRun.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsExportRun} -\alias{jobsExportRun} -\title{Export and retrieve a job run.} -\usage{ -jobsExportRun(client, run_id, views_to_export = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_id}{Required. The canonical identifier for the run.} - -\item{views_to_export}{Which views to export (CODE, DASHBOARDS, or ALL).} -} -\description{ -Export and retrieve the job run task. -} diff --git a/man/jobsGet.Rd b/man/jobsGet.Rd deleted file mode 100644 index 168be343..00000000 --- a/man/jobsGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsGet} -\alias{jobsGet} -\title{Get a single job.} -\usage{ -jobsGet(client, job_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{job_id}{Required. The canonical identifier of the job to retrieve information about.} -} -\description{ -Retrieves the details for a single job. -} diff --git a/man/jobsGetPermissionLevels.Rd b/man/jobsGetPermissionLevels.Rd deleted file mode 100644 index 26f922cf..00000000 --- a/man/jobsGetPermissionLevels.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsGetPermissionLevels} -\alias{jobsGetPermissionLevels} -\title{Get job permission levels.} -\usage{ -jobsGetPermissionLevels(client, job_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{job_id}{Required. The job for which to get or manage permissions.} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/jobsGetPermissions.Rd b/man/jobsGetPermissions.Rd deleted file mode 100644 index ea202f21..00000000 --- a/man/jobsGetPermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsGetPermissions} -\alias{jobsGetPermissions} -\title{Get job permissions.} -\usage{ -jobsGetPermissions(client, job_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{job_id}{Required. The job for which to get or manage permissions.} -} -\description{ -Gets the permissions of a job. Jobs can inherit permissions from their root -object. -} diff --git a/man/jobsGetRun.Rd b/man/jobsGetRun.Rd deleted file mode 100644 index 02c13718..00000000 --- a/man/jobsGetRun.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsGetRun} -\alias{jobsGetRun} -\title{Get a single job run.} -\usage{ -jobsGetRun( - client, - run_id, - include_history = NULL, - include_resolved_values = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_id}{Required. The canonical identifier of the run for which to retrieve the metadata.} - -\item{include_history}{Whether to include the repair history in the response.} - -\item{include_resolved_values}{Whether to include resolved parameter values in the response.} -} -\description{ -Retrieve the metadata of a run. -} diff --git a/man/jobsGetRunAndWait.Rd b/man/jobsGetRunAndWait.Rd deleted file mode 100644 index 8513d563..00000000 --- a/man/jobsGetRunAndWait.Rd +++ /dev/null @@ -1,37 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsGetRunAndWait} -\alias{jobsGetRunAndWait} -\title{Get a single job run.} -\usage{ -jobsGetRunAndWait( - client, - run_id, - include_history = NULL, - include_resolved_values = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_id}{Required. The canonical identifier of the run for which to retrieve the metadata.} - -\item{include_history}{Whether to include the repair history in the response.} - -\item{include_resolved_values}{Whether to include resolved parameter values in the response.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Jobs on Databricks reach -TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Jobs is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Retrieve the metadata of a run. -} diff --git a/man/jobsGetRunOutput.Rd b/man/jobsGetRunOutput.Rd deleted file mode 100644 index 1ff9be6d..00000000 --- a/man/jobsGetRunOutput.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsGetRunOutput} -\alias{jobsGetRunOutput} -\title{Get the output for a single run.} -\usage{ -jobsGetRunOutput(client, run_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_id}{Required. The canonical identifier for the run.} -} -\description{ -Retrieve the output and metadata of a single task run. When a notebook task -returns a value through the \code{dbutils.notebook.exit()} call, you can use this -endpoint to retrieve that value. Databricks restricts this API to returning -the first 5 MB of the output. To return a larger result, you can store job -results in a cloud storage service. -} -\details{ -This endpoint validates that the \strong{run_id} parameter is valid and returns an -HTTP status code 400 if the \strong{run_id} parameter is invalid. Runs are -automatically removed after 60 days. If you to want to reference them beyond -60 days, you must save old run results before they expire. -} diff --git a/man/jobsList.Rd b/man/jobsList.Rd deleted file mode 100644 index 2a923c78..00000000 --- a/man/jobsList.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsList} -\alias{jobsList} -\title{List jobs.} -\usage{ -jobsList( - client, - expand_tasks = NULL, - limit = NULL, - name = NULL, - offset = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{expand_tasks}{Whether to include task and cluster details in the response.} - -\item{limit}{The number of jobs to return.} - -\item{name}{A filter on the list based on the exact (case insensitive) job name.} - -\item{offset}{The offset of the first job to return, relative to the most recently created job.} - -\item{page_token}{Use \code{next_page_token} or \code{prev_page_token} returned from the previous request to list the next or previous page of jobs respectively.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Retrieves a list of jobs. -} diff --git a/man/jobsListRuns.Rd b/man/jobsListRuns.Rd deleted file mode 100644 index 4c5dd1a7..00000000 --- a/man/jobsListRuns.Rd +++ /dev/null @@ -1,49 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsListRuns} -\alias{jobsListRuns} -\title{List job runs.} -\usage{ -jobsListRuns( - client, - active_only = NULL, - completed_only = NULL, - expand_tasks = NULL, - job_id = NULL, - limit = NULL, - offset = NULL, - page_token = NULL, - run_type = NULL, - start_time_from = NULL, - start_time_to = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{active_only}{If active_only is \code{true}, only active runs are included in the results; otherwise, lists both active and completed runs.} - -\item{completed_only}{If completed_only is \code{true}, only completed runs are included in the results; otherwise, lists both active and completed runs.} - -\item{expand_tasks}{Whether to include task and cluster details in the response.} - -\item{job_id}{The job for which to list runs.} - -\item{limit}{The number of runs to return.} - -\item{offset}{The offset of the first run to return, relative to the most recent run.} - -\item{page_token}{Use \code{next_page_token} or \code{prev_page_token} returned from the previous request to list the next or previous page of runs respectively.} - -\item{run_type}{The type of runs to return.} - -\item{start_time_from}{Show runs that started \emph{at or after} this value.} - -\item{start_time_to}{Show runs that started \emph{at or before} this value.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -List runs in descending order by start time. -} diff --git a/man/jobsRepairRun.Rd b/man/jobsRepairRun.Rd deleted file mode 100644 index fbcb164c..00000000 --- a/man/jobsRepairRun.Rd +++ /dev/null @@ -1,60 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsRepairRun} -\alias{jobsRepairRun} -\title{Repair a job run.} -\usage{ -jobsRepairRun( - client, - run_id, - dbt_commands = NULL, - jar_params = NULL, - job_parameters = NULL, - latest_repair_id = NULL, - notebook_params = NULL, - pipeline_params = NULL, - python_named_params = NULL, - python_params = NULL, - rerun_all_failed_tasks = NULL, - rerun_dependent_tasks = NULL, - rerun_tasks = NULL, - spark_submit_params = NULL, - sql_params = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_id}{Required. The job run ID of the run to repair.} - -\item{dbt_commands}{An array of commands to execute for jobs with the dbt task, for example \verb{'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']}.} - -\item{jar_params}{A list of parameters for jobs with Spark JAR tasks, for example \verb{'jar_params': ['john doe', '35']}.} - -\item{job_parameters}{Job-level parameters used in the run.} - -\item{latest_repair_id}{The ID of the latest repair.} - -\item{notebook_params}{A map from keys to values for jobs with notebook task, for example \verb{'notebook_params': \{'name': 'john doe', 'age': '35'\}}.} - -\item{pipeline_params}{This field has no description yet.} - -\item{python_named_params}{A map from keys to values for jobs with Python wheel task, for example \verb{'python_named_params': \{'name': 'task', 'data': 'dbfs:/path/to/data.json'\}}.} - -\item{python_params}{A list of parameters for jobs with Python tasks, for example \verb{'python_params': ['john doe', '35']}.} - -\item{rerun_all_failed_tasks}{If true, repair all failed tasks.} - -\item{rerun_dependent_tasks}{If true, repair all tasks that depend on the tasks in \code{rerun_tasks}, even if they were previously successful.} - -\item{rerun_tasks}{The task keys of the task runs to repair.} - -\item{spark_submit_params}{A list of parameters for jobs with spark submit task, for example \verb{'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']}.} - -\item{sql_params}{A map from keys to values for jobs with SQL task, for example \verb{'sql_params': \{'name': 'john doe', 'age': '35'\}}.} -} -\description{ -Re-run one or more tasks. Tasks are re-run as part of the original job run. -They use the current job and task settings, and can be viewed in the history -for the original job run. -} diff --git a/man/jobsRepairRunAndWait.Rd b/man/jobsRepairRunAndWait.Rd deleted file mode 100644 index 7708b997..00000000 --- a/man/jobsRepairRunAndWait.Rd +++ /dev/null @@ -1,72 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsRepairRunAndWait} -\alias{jobsRepairRunAndWait} -\title{Repair a job run.} -\usage{ -jobsRepairRunAndWait( - client, - run_id, - dbt_commands = NULL, - jar_params = NULL, - job_parameters = NULL, - latest_repair_id = NULL, - notebook_params = NULL, - pipeline_params = NULL, - python_named_params = NULL, - python_params = NULL, - rerun_all_failed_tasks = NULL, - rerun_dependent_tasks = NULL, - rerun_tasks = NULL, - spark_submit_params = NULL, - sql_params = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_id}{Required. The job run ID of the run to repair.} - -\item{dbt_commands}{An array of commands to execute for jobs with the dbt task, for example \verb{'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']}.} - -\item{jar_params}{A list of parameters for jobs with Spark JAR tasks, for example \verb{'jar_params': ['john doe', '35']}.} - -\item{job_parameters}{Job-level parameters used in the run.} - -\item{latest_repair_id}{The ID of the latest repair.} - -\item{notebook_params}{A map from keys to values for jobs with notebook task, for example \verb{'notebook_params': \{'name': 'john doe', 'age': '35'\}}.} - -\item{pipeline_params}{This field has no description yet.} - -\item{python_named_params}{A map from keys to values for jobs with Python wheel task, for example \verb{'python_named_params': \{'name': 'task', 'data': 'dbfs:/path/to/data.json'\}}.} - -\item{python_params}{A list of parameters for jobs with Python tasks, for example \verb{'python_params': ['john doe', '35']}.} - -\item{rerun_all_failed_tasks}{If true, repair all failed tasks.} - -\item{rerun_dependent_tasks}{If true, repair all tasks that depend on the tasks in \code{rerun_tasks}, even if they were previously successful.} - -\item{rerun_tasks}{The task keys of the task runs to repair.} - -\item{spark_submit_params}{A list of parameters for jobs with spark submit task, for example \verb{'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']}.} - -\item{sql_params}{A map from keys to values for jobs with SQL task, for example \verb{'sql_params': \{'name': 'john doe', 'age': '35'\}}.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Jobs on Databricks reach -TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Jobs is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Re-run one or more tasks. Tasks are re-run as part of the original job run. -They use the current job and task settings, and can be viewed in the history -for the original job run. -} diff --git a/man/jobsReset.Rd b/man/jobsReset.Rd deleted file mode 100644 index 4f9c763d..00000000 --- a/man/jobsReset.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsReset} -\alias{jobsReset} -\title{Update all job settings (reset).} -\usage{ -jobsReset(client, job_id, new_settings) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{job_id}{Required. The canonical identifier of the job to reset.} - -\item{new_settings}{Required. The new settings of the job.} -} -\description{ -Overwrite all settings for the given job. Use the \href{:method:jobs/update}{\emph{Update} endpoint} to update job settings partially. -} diff --git a/man/jobsRunNow.Rd b/man/jobsRunNow.Rd deleted file mode 100644 index dfbe9df2..00000000 --- a/man/jobsRunNow.Rd +++ /dev/null @@ -1,52 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsRunNow} -\alias{jobsRunNow} -\title{Trigger a new job run.} -\usage{ -jobsRunNow( - client, - job_id, - dbt_commands = NULL, - idempotency_token = NULL, - jar_params = NULL, - job_parameters = NULL, - notebook_params = NULL, - pipeline_params = NULL, - python_named_params = NULL, - python_params = NULL, - queue = NULL, - spark_submit_params = NULL, - sql_params = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{job_id}{Required. The ID of the job to be executed.} - -\item{dbt_commands}{An array of commands to execute for jobs with the dbt task, for example \verb{'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']}.} - -\item{idempotency_token}{An optional token to guarantee the idempotency of job run requests.} - -\item{jar_params}{A list of parameters for jobs with Spark JAR tasks, for example \verb{'jar_params': ['john doe', '35']}.} - -\item{job_parameters}{Job-level parameters used in the run.} - -\item{notebook_params}{A map from keys to values for jobs with notebook task, for example \verb{'notebook_params': \{'name': 'john doe', 'age': '35'\}}.} - -\item{pipeline_params}{This field has no description yet.} - -\item{python_named_params}{A map from keys to values for jobs with Python wheel task, for example \verb{'python_named_params': \{'name': 'task', 'data': 'dbfs:/path/to/data.json'\}}.} - -\item{python_params}{A list of parameters for jobs with Python tasks, for example \verb{'python_params': ['john doe', '35']}.} - -\item{queue}{The queue settings of the run.} - -\item{spark_submit_params}{A list of parameters for jobs with spark submit task, for example \verb{'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']}.} - -\item{sql_params}{A map from keys to values for jobs with SQL task, for example \verb{'sql_params': \{'name': 'john doe', 'age': '35'\}}.} -} -\description{ -Run a job and return the \code{run_id} of the triggered run. -} diff --git a/man/jobsRunNowAndWait.Rd b/man/jobsRunNowAndWait.Rd deleted file mode 100644 index 3a4131af..00000000 --- a/man/jobsRunNowAndWait.Rd +++ /dev/null @@ -1,64 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsRunNowAndWait} -\alias{jobsRunNowAndWait} -\title{Trigger a new job run.} -\usage{ -jobsRunNowAndWait( - client, - job_id, - dbt_commands = NULL, - idempotency_token = NULL, - jar_params = NULL, - job_parameters = NULL, - notebook_params = NULL, - pipeline_params = NULL, - python_named_params = NULL, - python_params = NULL, - queue = NULL, - spark_submit_params = NULL, - sql_params = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{job_id}{Required. The ID of the job to be executed.} - -\item{dbt_commands}{An array of commands to execute for jobs with the dbt task, for example \verb{'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']}.} - -\item{idempotency_token}{An optional token to guarantee the idempotency of job run requests.} - -\item{jar_params}{A list of parameters for jobs with Spark JAR tasks, for example \verb{'jar_params': ['john doe', '35']}.} - -\item{job_parameters}{Job-level parameters used in the run.} - -\item{notebook_params}{A map from keys to values for jobs with notebook task, for example \verb{'notebook_params': \{'name': 'john doe', 'age': '35'\}}.} - -\item{pipeline_params}{This field has no description yet.} - -\item{python_named_params}{A map from keys to values for jobs with Python wheel task, for example \verb{'python_named_params': \{'name': 'task', 'data': 'dbfs:/path/to/data.json'\}}.} - -\item{python_params}{A list of parameters for jobs with Python tasks, for example \verb{'python_params': ['john doe', '35']}.} - -\item{queue}{The queue settings of the run.} - -\item{spark_submit_params}{A list of parameters for jobs with spark submit task, for example \verb{'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']}.} - -\item{sql_params}{A map from keys to values for jobs with SQL task, for example \verb{'sql_params': \{'name': 'john doe', 'age': '35'\}}.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Jobs on Databricks reach -TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Jobs is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Run a job and return the \code{run_id} of the triggered run. -} diff --git a/man/jobsSetPermissions.Rd b/man/jobsSetPermissions.Rd deleted file mode 100644 index 01062527..00000000 --- a/man/jobsSetPermissions.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsSetPermissions} -\alias{jobsSetPermissions} -\title{Set job permissions.} -\usage{ -jobsSetPermissions(client, job_id, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{job_id}{Required. The job for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on a job. Jobs can inherit permissions from their root -object. -} diff --git a/man/jobsSubmit.Rd b/man/jobsSubmit.Rd deleted file mode 100644 index 58fa92d8..00000000 --- a/man/jobsSubmit.Rd +++ /dev/null @@ -1,52 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsSubmit} -\alias{jobsSubmit} -\title{Create and trigger a one-time run.} -\usage{ -jobsSubmit( - client, - access_control_list = NULL, - email_notifications = NULL, - git_source = NULL, - health = NULL, - idempotency_token = NULL, - notification_settings = NULL, - queue = NULL, - run_name = NULL, - tasks = NULL, - timeout_seconds = NULL, - webhook_notifications = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{access_control_list}{List of permissions to set on the job.} - -\item{email_notifications}{An optional set of email addresses notified when the run begins or completes.} - -\item{git_source}{An optional specification for a remote Git repository containing the source code used by tasks.} - -\item{health}{An optional set of health rules that can be defined for this job.} - -\item{idempotency_token}{An optional token that can be used to guarantee the idempotency of job run requests.} - -\item{notification_settings}{Optional notification settings that are used when sending notifications to each of the \code{email_notifications} and \code{webhook_notifications} for this run.} - -\item{queue}{The queue settings of the one-time run.} - -\item{run_name}{An optional name for the run.} - -\item{tasks}{This field has no description yet.} - -\item{timeout_seconds}{An optional timeout applied to each run of this job.} - -\item{webhook_notifications}{A collection of system notification IDs to notify when the run begins or completes.} -} -\description{ -Submit a one-time run. This endpoint allows you to submit a workload directly -without creating a job. Runs submitted using this endpoint don’t display in -the UI. Use the \code{jobs/runs/get} API to check the run state after the job is -submitted. -} diff --git a/man/jobsSubmitAndWait.Rd b/man/jobsSubmitAndWait.Rd deleted file mode 100644 index 244c3fd0..00000000 --- a/man/jobsSubmitAndWait.Rd +++ /dev/null @@ -1,64 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsSubmitAndWait} -\alias{jobsSubmitAndWait} -\title{Create and trigger a one-time run.} -\usage{ -jobsSubmitAndWait( - client, - access_control_list = NULL, - email_notifications = NULL, - git_source = NULL, - health = NULL, - idempotency_token = NULL, - notification_settings = NULL, - queue = NULL, - run_name = NULL, - tasks = NULL, - timeout_seconds = NULL, - webhook_notifications = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{access_control_list}{List of permissions to set on the job.} - -\item{email_notifications}{An optional set of email addresses notified when the run begins or completes.} - -\item{git_source}{An optional specification for a remote Git repository containing the source code used by tasks.} - -\item{health}{An optional set of health rules that can be defined for this job.} - -\item{idempotency_token}{An optional token that can be used to guarantee the idempotency of job run requests.} - -\item{notification_settings}{Optional notification settings that are used when sending notifications to each of the \code{email_notifications} and \code{webhook_notifications} for this run.} - -\item{queue}{The queue settings of the one-time run.} - -\item{run_name}{An optional name for the run.} - -\item{tasks}{This field has no description yet.} - -\item{timeout_seconds}{An optional timeout applied to each run of this job.} - -\item{webhook_notifications}{A collection of system notification IDs to notify when the run begins or completes.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Jobs on Databricks reach -TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Jobs is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Submit a one-time run. This endpoint allows you to submit a workload directly -without creating a job. Runs submitted using this endpoint don’t display in -the UI. Use the \code{jobs/runs/get} API to check the run state after the job is -submitted. -} diff --git a/man/jobsUpdate.Rd b/man/jobsUpdate.Rd deleted file mode 100644 index d300d3a5..00000000 --- a/man/jobsUpdate.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsUpdate} -\alias{jobsUpdate} -\title{Update job settings partially.} -\usage{ -jobsUpdate(client, job_id, fields_to_remove = NULL, new_settings = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{job_id}{Required. The canonical identifier of the job to update.} - -\item{fields_to_remove}{Remove top-level fields in the job settings.} - -\item{new_settings}{The new settings for the job.} -} -\description{ -Add, update, or remove specific settings of an existing job. Use the \href{:method:jobs/reset}{\emph{Reset} endpoint} to overwrite all job settings. -} diff --git a/man/jobsUpdatePermissions.Rd b/man/jobsUpdatePermissions.Rd deleted file mode 100644 index 194c4ece..00000000 --- a/man/jobsUpdatePermissions.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/jobs.R -\name{jobsUpdatePermissions} -\alias{jobsUpdatePermissions} -\title{Update job permissions.} -\usage{ -jobsUpdatePermissions(client, job_id, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{job_id}{Required. The job for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on a job. Jobs can inherit permissions from their -root object. -} diff --git a/man/lakehouseMonitorsCancelRefresh.Rd b/man/lakehouseMonitorsCancelRefresh.Rd deleted file mode 100644 index db5a98ac..00000000 --- a/man/lakehouseMonitorsCancelRefresh.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakehouse_monitors.R -\name{lakehouseMonitorsCancelRefresh} -\alias{lakehouseMonitorsCancelRefresh} -\title{Cancel refresh.} -\usage{ -lakehouseMonitorsCancelRefresh(client, full_name, refresh_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the table.} - -\item{refresh_id}{Required. ID of the refresh.} -} -\description{ -Cancel an active monitor refresh for the given refresh ID. -} -\details{ -The caller must either: 1. be an owner of the table's parent catalog 2. have -\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's -parent schema 3. have the following permissions: - \strong{USE_CATALOG} on the -table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - be an -owner of the table - -Additionally, the call must be made from the workspace where the monitor was -created. -} diff --git a/man/lakehouseMonitorsCreate.Rd b/man/lakehouseMonitorsCreate.Rd deleted file mode 100644 index 5019964a..00000000 --- a/man/lakehouseMonitorsCreate.Rd +++ /dev/null @@ -1,70 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakehouse_monitors.R -\name{lakehouseMonitorsCreate} -\alias{lakehouseMonitorsCreate} -\title{Create a table monitor.} -\usage{ -lakehouseMonitorsCreate( - client, - full_name, - assets_dir, - output_schema_name, - baseline_table_name = NULL, - custom_metrics = NULL, - data_classification_config = NULL, - inference_log = NULL, - notifications = NULL, - schedule = NULL, - skip_builtin_dashboard = NULL, - slicing_exprs = NULL, - snapshot = NULL, - time_series = NULL, - warehouse_id = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the table.} - -\item{assets_dir}{Required. The directory to store monitoring assets (e.g.} - -\item{output_schema_name}{Required. Schema where output metric tables are created.} - -\item{baseline_table_name}{Name of the baseline table from which drift metrics are computed from.} - -\item{custom_metrics}{Custom metrics to compute on the monitored table.} - -\item{data_classification_config}{The data classification config for the monitor.} - -\item{inference_log}{Configuration for monitoring inference logs.} - -\item{notifications}{The notification settings for the monitor.} - -\item{schedule}{The schedule for automatically updating and refreshing metric tables.} - -\item{skip_builtin_dashboard}{Whether to skip creating a default dashboard summarizing data quality metrics.} - -\item{slicing_exprs}{List of column expressions to slice data with for targeted analysis.} - -\item{snapshot}{Configuration for monitoring snapshot tables.} - -\item{time_series}{Configuration for monitoring time series tables.} - -\item{warehouse_id}{Optional argument to specify the warehouse for dashboard creation.} -} -\description{ -Creates a new monitor for the specified table. -} -\details{ -The caller must either: 1. be an owner of the table's parent catalog, have -\strong{USE_SCHEMA} on the table's parent schema, and have \strong{SELECT} access on -the table 2. have \strong{USE_CATALOG} on the table's parent catalog, be an owner -of the table's parent schema, and have \strong{SELECT} access on the table. 3. -have the following permissions: - \strong{USE_CATALOG} on the table's parent -catalog - \strong{USE_SCHEMA} on the table's parent schema - be an owner of the -table. - -Workspace assets, such as the dashboard, will be created in the workspace -where this call was made. -} diff --git a/man/lakehouseMonitorsDelete.Rd b/man/lakehouseMonitorsDelete.Rd deleted file mode 100644 index c5f5d1dd..00000000 --- a/man/lakehouseMonitorsDelete.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakehouse_monitors.R -\name{lakehouseMonitorsDelete} -\alias{lakehouseMonitorsDelete} -\title{Delete a table monitor.} -\usage{ -lakehouseMonitorsDelete(client, full_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the table.} -} -\description{ -Deletes a monitor for the specified table. -} -\details{ -The caller must either: 1. be an owner of the table's parent catalog 2. have -\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's -parent schema 3. have the following permissions: - \strong{USE_CATALOG} on the -table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - be an -owner of the table. - -Additionally, the call must be made from the workspace where the monitor was -created. - -Note that the metric tables and dashboard will not be deleted as part of this -call; those assets must be manually cleaned up (if desired). -} diff --git a/man/lakehouseMonitorsGet.Rd b/man/lakehouseMonitorsGet.Rd deleted file mode 100644 index 593a7c4d..00000000 --- a/man/lakehouseMonitorsGet.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakehouse_monitors.R -\name{lakehouseMonitorsGet} -\alias{lakehouseMonitorsGet} -\title{Get a table monitor.} -\usage{ -lakehouseMonitorsGet(client, full_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the table.} -} -\description{ -Gets a monitor for the specified table. -} -\details{ -The caller must either: 1. be an owner of the table's parent catalog 2. have -\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's -parent schema. 3. have the following permissions: - \strong{USE_CATALOG} on the -table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - -\strong{SELECT} privilege on the table. - -The returned information includes configuration values, as well as -information on assets created by the monitor. Some information (e.g., -dashboard) may be filtered out if the caller is in a different workspace than -where the monitor was created. -} diff --git a/man/lakehouseMonitorsGetRefresh.Rd b/man/lakehouseMonitorsGetRefresh.Rd deleted file mode 100644 index dd8b570d..00000000 --- a/man/lakehouseMonitorsGetRefresh.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakehouse_monitors.R -\name{lakehouseMonitorsGetRefresh} -\alias{lakehouseMonitorsGetRefresh} -\title{Get refresh.} -\usage{ -lakehouseMonitorsGetRefresh(client, full_name, refresh_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the table.} - -\item{refresh_id}{Required. ID of the refresh.} -} -\description{ -Gets info about a specific monitor refresh using the given refresh ID. -} -\details{ -The caller must either: 1. be an owner of the table's parent catalog 2. have -\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's -parent schema 3. have the following permissions: - \strong{USE_CATALOG} on the -table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - -\strong{SELECT} privilege on the table. - -Additionally, the call must be made from the workspace where the monitor was -created. -} diff --git a/man/lakehouseMonitorsListRefreshes.Rd b/man/lakehouseMonitorsListRefreshes.Rd deleted file mode 100644 index 11faeccf..00000000 --- a/man/lakehouseMonitorsListRefreshes.Rd +++ /dev/null @@ -1,27 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakehouse_monitors.R -\name{lakehouseMonitorsListRefreshes} -\alias{lakehouseMonitorsListRefreshes} -\title{List refreshes.} -\usage{ -lakehouseMonitorsListRefreshes(client, full_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the table.} -} -\description{ -Gets an array containing the history of the most recent refreshes (up to 25) -for this table. -} -\details{ -The caller must either: 1. be an owner of the table's parent catalog 2. have -\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's -parent schema 3. have the following permissions: - \strong{USE_CATALOG} on the -table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - -\strong{SELECT} privilege on the table. - -Additionally, the call must be made from the workspace where the monitor was -created. -} diff --git a/man/lakehouseMonitorsRunRefresh.Rd b/man/lakehouseMonitorsRunRefresh.Rd deleted file mode 100644 index bf26f453..00000000 --- a/man/lakehouseMonitorsRunRefresh.Rd +++ /dev/null @@ -1,27 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakehouse_monitors.R -\name{lakehouseMonitorsRunRefresh} -\alias{lakehouseMonitorsRunRefresh} -\title{Queue a metric refresh for a monitor.} -\usage{ -lakehouseMonitorsRunRefresh(client, full_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the table.} -} -\description{ -Queues a metric refresh on the monitor for the specified table. The refresh -will execute in the background. -} -\details{ -The caller must either: 1. be an owner of the table's parent catalog 2. have -\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's -parent schema 3. have the following permissions: - \strong{USE_CATALOG} on the -table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - be an -owner of the table - -Additionally, the call must be made from the workspace where the monitor was -created. -} diff --git a/man/lakehouseMonitorsUpdate.Rd b/man/lakehouseMonitorsUpdate.Rd deleted file mode 100644 index 9eb3b924..00000000 --- a/man/lakehouseMonitorsUpdate.Rd +++ /dev/null @@ -1,62 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakehouse_monitors.R -\name{lakehouseMonitorsUpdate} -\alias{lakehouseMonitorsUpdate} -\title{Update a table monitor.} -\usage{ -lakehouseMonitorsUpdate( - client, - full_name, - output_schema_name, - baseline_table_name = NULL, - custom_metrics = NULL, - data_classification_config = NULL, - inference_log = NULL, - notifications = NULL, - schedule = NULL, - slicing_exprs = NULL, - snapshot = NULL, - time_series = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the table.} - -\item{output_schema_name}{Required. Schema where output metric tables are created.} - -\item{baseline_table_name}{Name of the baseline table from which drift metrics are computed from.} - -\item{custom_metrics}{Custom metrics to compute on the monitored table.} - -\item{data_classification_config}{The data classification config for the monitor.} - -\item{inference_log}{Configuration for monitoring inference logs.} - -\item{notifications}{The notification settings for the monitor.} - -\item{schedule}{The schedule for automatically updating and refreshing metric tables.} - -\item{slicing_exprs}{List of column expressions to slice data with for targeted analysis.} - -\item{snapshot}{Configuration for monitoring snapshot tables.} - -\item{time_series}{Configuration for monitoring time series tables.} -} -\description{ -Updates a monitor for the specified table. -} -\details{ -The caller must either: 1. be an owner of the table's parent catalog 2. have -\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's -parent schema 3. have the following permissions: - \strong{USE_CATALOG} on the -table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - be an -owner of the table. - -Additionally, the call must be made from the workspace where the monitor was -created, and the caller must be the original creator of the monitor. - -Certain configuration fields, such as output asset identifiers, cannot be -updated. -} diff --git a/man/lakeviewCreate.Rd b/man/lakeviewCreate.Rd deleted file mode 100644 index 908fc392..00000000 --- a/man/lakeviewCreate.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakeview.R -\name{lakeviewCreate} -\alias{lakeviewCreate} -\title{Create dashboard.} -\usage{ -lakeviewCreate( - client, - display_name, - parent_path = NULL, - serialized_dashboard = NULL, - warehouse_id = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{display_name}{Required. The display name of the dashboard.} - -\item{parent_path}{The workspace path of the folder containing the dashboard.} - -\item{serialized_dashboard}{The contents of the dashboard in serialized string form.} - -\item{warehouse_id}{The warehouse ID used to run the dashboard.} -} -\description{ -Create a draft dashboard. -} diff --git a/man/lakeviewGet.Rd b/man/lakeviewGet.Rd deleted file mode 100644 index 4695edcf..00000000 --- a/man/lakeviewGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakeview.R -\name{lakeviewGet} -\alias{lakeviewGet} -\title{Get dashboard.} -\usage{ -lakeviewGet(client, dashboard_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{dashboard_id}{Required. UUID identifying the dashboard.} -} -\description{ -Get a draft dashboard. -} diff --git a/man/lakeviewGetPublished.Rd b/man/lakeviewGetPublished.Rd deleted file mode 100644 index d35d27ec..00000000 --- a/man/lakeviewGetPublished.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakeview.R -\name{lakeviewGetPublished} -\alias{lakeviewGetPublished} -\title{Get published dashboard.} -\usage{ -lakeviewGetPublished(client, dashboard_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{dashboard_id}{Required. UUID identifying the dashboard to be published.} -} -\description{ -Get the current published dashboard. -} diff --git a/man/lakeviewPublish.Rd b/man/lakeviewPublish.Rd deleted file mode 100644 index 2a371a12..00000000 --- a/man/lakeviewPublish.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakeview.R -\name{lakeviewPublish} -\alias{lakeviewPublish} -\title{Publish dashboard.} -\usage{ -lakeviewPublish( - client, - dashboard_id, - embed_credentials = NULL, - warehouse_id = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{dashboard_id}{Required. UUID identifying the dashboard to be published.} - -\item{embed_credentials}{Flag to indicate if the publisher's credentials should be embedded in the published dashboard.} - -\item{warehouse_id}{The ID of the warehouse that can be used to override the warehouse which was set in the draft.} -} -\description{ -Publish the current draft dashboard. -} diff --git a/man/lakeviewTrash.Rd b/man/lakeviewTrash.Rd deleted file mode 100644 index 5c1565fd..00000000 --- a/man/lakeviewTrash.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakeview.R -\name{lakeviewTrash} -\alias{lakeviewTrash} -\title{Trash dashboard.} -\usage{ -lakeviewTrash(client, dashboard_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{dashboard_id}{Required. UUID identifying the dashboard.} -} -\description{ -Trash a dashboard. -} diff --git a/man/lakeviewUpdate.Rd b/man/lakeviewUpdate.Rd deleted file mode 100644 index aee32765..00000000 --- a/man/lakeviewUpdate.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/lakeview.R -\name{lakeviewUpdate} -\alias{lakeviewUpdate} -\title{Update dashboard.} -\usage{ -lakeviewUpdate( - client, - dashboard_id, - display_name = NULL, - etag = NULL, - serialized_dashboard = NULL, - warehouse_id = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{dashboard_id}{Required. UUID identifying the dashboard.} - -\item{display_name}{The display name of the dashboard.} - -\item{etag}{The etag for the dashboard.} - -\item{serialized_dashboard}{The contents of the dashboard in serialized string form.} - -\item{warehouse_id}{The warehouse ID used to run the dashboard.} -} -\description{ -Update a draft dashboard. -} diff --git a/man/librariesAllClusterStatuses.Rd b/man/librariesAllClusterStatuses.Rd deleted file mode 100644 index 79ac5240..00000000 --- a/man/librariesAllClusterStatuses.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/libraries.R -\name{librariesAllClusterStatuses} -\alias{librariesAllClusterStatuses} -\title{Get all statuses.} -\usage{ -librariesAllClusterStatuses(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Get the status of all libraries on all clusters. A status will be available -for all libraries installed on this cluster via the API or the libraries UI -as well as libraries set to be installed on all clusters via the libraries -UI. -} diff --git a/man/librariesClusterStatus.Rd b/man/librariesClusterStatus.Rd deleted file mode 100644 index 2e2b10ae..00000000 --- a/man/librariesClusterStatus.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/libraries.R -\name{librariesClusterStatus} -\alias{librariesClusterStatus} -\title{Get status.} -\usage{ -librariesClusterStatus(client, cluster_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. Unique identifier of the cluster whose status should be retrieved.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Get the status of libraries on a cluster. A status will be available for all -libraries installed on this cluster via the API or the libraries UI as well -as libraries set to be installed on all clusters via the libraries UI. The -order of returned libraries will be as follows. -} -\details{ -\enumerate{ -\item Libraries set to be installed on this cluster will be returned first. -Within this group, the final order will be order in which the libraries were -added to the cluster. -\item Libraries set to be installed on all clusters are returned next. Within -this group there is no order guarantee. -\item Libraries that were previously requested on this cluster or on all -clusters, but now marked for removal. Within this group there is no order -guarantee. -} -} diff --git a/man/librariesInstall.Rd b/man/librariesInstall.Rd deleted file mode 100644 index 274ccd54..00000000 --- a/man/librariesInstall.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/libraries.R -\name{librariesInstall} -\alias{librariesInstall} -\title{Add a library.} -\usage{ -librariesInstall(client, cluster_id, libraries) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. Unique identifier for the cluster on which to install these libraries.} - -\item{libraries}{Required. The libraries to install.} -} -\description{ -Add libraries to be installed on a cluster. The installation is asynchronous; -it happens in the background after the completion of this request. -} -\details{ -\strong{Note}: The actual set of libraries to be installed on a cluster is the -union of the libraries specified via this method and the libraries set to be -installed on all clusters via the libraries UI. -} diff --git a/man/librariesUninstall.Rd b/man/librariesUninstall.Rd deleted file mode 100644 index c3babbbb..00000000 --- a/man/librariesUninstall.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/libraries.R -\name{librariesUninstall} -\alias{librariesUninstall} -\title{Uninstall libraries.} -\usage{ -librariesUninstall(client, cluster_id, libraries) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{cluster_id}{Required. Unique identifier for the cluster on which to uninstall these libraries.} - -\item{libraries}{Required. The libraries to uninstall.} -} -\description{ -Set libraries to be uninstalled on a cluster. The libraries won't be -uninstalled until the cluster is restarted. Uninstalling libraries that are -not installed on the cluster will have no impact but is not an error. -} diff --git a/man/list_alerts.Rd b/man/list_alerts.Rd new file mode 100644 index 00000000..ab41246a --- /dev/null +++ b/man/list_alerts.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/alerts.R +\name{list_alerts} +\alias{list_alerts} +\alias{alertsList} +\title{Get alerts.} +\usage{ +list_alerts(client) + +alertsList(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Gets a list of alerts. +} diff --git a/man/list_catalogs.Rd b/man/list_catalogs.Rd new file mode 100644 index 00000000..54b1ae06 --- /dev/null +++ b/man/list_catalogs.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/catalogs.R +\name{list_catalogs} +\alias{list_catalogs} +\alias{catalogsList} +\title{List catalogs.} +\usage{ +list_catalogs(client, include_browse = NULL) + +catalogsList(client, include_browse = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{include_browse}{Whether to include catalogs in the response for which the principal can only access selective metadata for.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of catalogs in the metastore. If the caller is the metastore +admin, all catalogs will be retrieved. Otherwise, only catalogs owned by the +caller (or for which the caller has the \strong{USE_CATALOG} privilege) will be +retrieved. There is no guarantee of a specific ordering of the elements in +the array. +} diff --git a/man/list_clean_rooms.Rd b/man/list_clean_rooms.Rd new file mode 100644 index 00000000..d753f819 --- /dev/null +++ b/man/list_clean_rooms.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clean_rooms.R +\name{list_clean_rooms} +\alias{list_clean_rooms} +\alias{cleanRoomsList} +\title{List clean rooms.} +\usage{ +list_clean_rooms(client, max_results = NULL, page_token = NULL) + +cleanRoomsList(client, max_results = NULL, page_token = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{max_results}{Maximum number of clean rooms to return.} + +\item{page_token}{Opaque pagination token to go to next page based on previous query.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of data object clean rooms from the metastore. The caller must +be a metastore admin or the owner of the clean room. There is no guarantee of +a specific ordering of the elements in the array. +} diff --git a/man/list_cluster_node_types.Rd b/man/list_cluster_node_types.Rd new file mode 100644 index 00000000..4f8ffac8 --- /dev/null +++ b/man/list_cluster_node_types.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{list_cluster_node_types} +\alias{list_cluster_node_types} +\alias{clustersListNodeTypes} +\title{List node types.} +\usage{ +list_cluster_node_types(client) + +clustersListNodeTypes(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Returns a list of supported Spark node types. These node types can be used to +launch a cluster. +} diff --git a/man/list_cluster_policies.Rd b/man/list_cluster_policies.Rd new file mode 100644 index 00000000..52671066 --- /dev/null +++ b/man/list_cluster_policies.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/cluster_policies.R +\name{list_cluster_policies} +\alias{list_cluster_policies} +\alias{clusterPoliciesList} +\title{List cluster policies.} +\usage{ +list_cluster_policies(client, sort_column = NULL, sort_order = NULL) + +clusterPoliciesList(client, sort_column = NULL, sort_order = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{sort_column}{The cluster policy attribute to sort by.} + +\item{sort_order}{The order in which the policies get listed.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Returns a list of policies accessible by the requesting user. +} diff --git a/man/list_cluster_policy_families.Rd b/man/list_cluster_policy_families.Rd new file mode 100644 index 00000000..e118431a --- /dev/null +++ b/man/list_cluster_policy_families.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/policy_families.R +\name{list_cluster_policy_families} +\alias{list_cluster_policy_families} +\alias{policyFamiliesList} +\title{List policy families.} +\usage{ +list_cluster_policy_families(client, max_results = NULL, page_token = NULL) + +policyFamiliesList(client, max_results = NULL, page_token = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{max_results}{The max number of policy families to return.} + +\item{page_token}{A token that can be used to get the next page of results.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Retrieve a list of policy families. This API is paginated. +} diff --git a/man/list_cluster_zones.Rd b/man/list_cluster_zones.Rd new file mode 100644 index 00000000..5d5ef81f --- /dev/null +++ b/man/list_cluster_zones.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{list_cluster_zones} +\alias{list_cluster_zones} +\alias{clustersListZones} +\title{List availability zones.} +\usage{ +list_cluster_zones(client) + +clustersListZones(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Returns a list of availability zones where clusters can be created in (For +example, us-west-2a). These zones can be used to launch a cluster. +} diff --git a/man/list_clusters.Rd b/man/list_clusters.Rd new file mode 100644 index 00000000..176658ec --- /dev/null +++ b/man/list_clusters.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{list_clusters} +\alias{list_clusters} +\alias{clustersList} +\title{List all clusters.} +\usage{ +list_clusters(client, can_use_client = NULL) + +clustersList(client, can_use_client = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{can_use_client}{Filter clusters based on what type of client it can be used for.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Return information about all pinned clusters, active clusters, up to 200 of +the most recently terminated all-purpose clusters in the past 30 days, and up +to 30 of the most recently terminated job clusters in the past 30 days. +} +\details{ +For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated +all-purpose clusters in the past 30 days, and 50 terminated job clusters in +the past 30 days, then this API returns the 1 pinned cluster, 4 active +clusters, all 45 terminated all-purpose clusters, and the 30 most recently +terminated job clusters. +} diff --git a/man/list_connections.Rd b/man/list_connections.Rd new file mode 100644 index 00000000..0f75b6fc --- /dev/null +++ b/man/list_connections.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/connections.R +\name{list_connections} +\alias{list_connections} +\alias{connectionsList} +\title{List connections.} +\usage{ +list_connections(client) + +connectionsList(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +List all connections. +} diff --git a/man/list_dashboards.Rd b/man/list_dashboards.Rd new file mode 100644 index 00000000..71ccc074 --- /dev/null +++ b/man/list_dashboards.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dashboards.R +\name{list_dashboards} +\alias{list_dashboards} +\alias{dashboardsList} +\title{Get dashboard objects.} +\usage{ +list_dashboards(client, order = NULL, page = NULL, page_size = NULL, q = NULL) + +dashboardsList(client, order = NULL, page = NULL, page_size = NULL, q = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{order}{Name of dashboard attribute to order by.} + +\item{page}{Page number to retrieve.} + +\item{page_size}{Number of dashboards to return per page.} + +\item{q}{Full text search term.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Fetch a paginated list of dashboard objects. +} +\details{ +\subsection{**Warning: Calling this API concurrently 10 or more times could result in}{ + +throttling, service degradation, or a temporary ban.** +} +} diff --git a/man/list_data_sources.Rd b/man/list_data_sources.Rd new file mode 100644 index 00000000..b8f6b4b8 --- /dev/null +++ b/man/list_data_sources.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/data_sources.R +\name{list_data_sources} +\alias{list_data_sources} +\alias{dataSourcesList} +\title{Get a list of SQL warehouses.} +\usage{ +list_data_sources(client) + +dataSourcesList(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Retrieves a full list of SQL warehouses available in this workspace. All +fields that appear in this API response are enumerated for clarity. However, +you need only a SQL warehouse's \code{id} to create new queries against it. +} diff --git a/man/list_dbfs.Rd b/man/list_dbfs.Rd new file mode 100644 index 00000000..82f3ed44 --- /dev/null +++ b/man/list_dbfs.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbfs.R +\name{list_dbfs} +\alias{list_dbfs} +\alias{dbfsList} +\title{List directory contents or file details.} +\usage{ +list_dbfs(client, path) + +dbfsList(client, path) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{path}{Required. The path of the file or directory.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +List the contents of a directory, or details of the file. If the file or +directory does not exist, this call throws an exception with +\code{RESOURCE_DOES_NOT_EXIST}. +} +\details{ +When calling list on a large directory, the list operation will time out +after approximately 60 seconds. We strongly recommend using list only on +directories containing less than 10K files and discourage using the DBFS REST +API for operations that list more than 10K files. Instead, we recommend that +you perform such operations in the context of a cluster, using the \href{/dev-tools/databricks-utils.html#dbutils-fs}{File system utility (dbutils.fs)}, +which provides the same functionality without timing out. +} diff --git a/man/list_experiment_artifacts.Rd b/man/list_experiment_artifacts.Rd new file mode 100644 index 00000000..a692fa6c --- /dev/null +++ b/man/list_experiment_artifacts.Rd @@ -0,0 +1,41 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{list_experiment_artifacts} +\alias{list_experiment_artifacts} +\alias{experimentsListArtifacts} +\title{Get all artifacts.} +\usage{ +list_experiment_artifacts( + client, + page_token = NULL, + path = NULL, + run_id = NULL, + run_uuid = NULL +) + +experimentsListArtifacts( + client, + page_token = NULL, + path = NULL, + run_id = NULL, + run_uuid = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{page_token}{Token indicating the page of artifact results to fetch.} + +\item{path}{Filter artifacts matching this path (a relative path from the root artifact directory).} + +\item{run_id}{ID of the run whose artifacts to list.} + +\item{run_uuid}{Deprecated, use run_id instead. ID of the run whose artifacts to list.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +List artifacts for a run. Takes an optional \code{artifact_path} prefix. If it is +specified, the response contains only artifacts with the specified prefix.', +} diff --git a/man/list_experiment_experiments.Rd b/man/list_experiment_experiments.Rd new file mode 100644 index 00000000..61f729d2 --- /dev/null +++ b/man/list_experiment_experiments.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{list_experiment_experiments} +\alias{list_experiment_experiments} +\alias{experimentsListExperiments} +\title{List experiments.} +\usage{ +list_experiment_experiments( + client, + max_results = NULL, + page_token = NULL, + view_type = NULL +) + +experimentsListExperiments( + client, + max_results = NULL, + page_token = NULL, + view_type = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{max_results}{Maximum number of experiments desired.} + +\item{page_token}{Token indicating the page of experiments to fetch.} + +\item{view_type}{Qualifier for type of experiments to be returned.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets a list of all experiments. +} diff --git a/man/list_external_locations.Rd b/man/list_external_locations.Rd new file mode 100644 index 00000000..69aae37d --- /dev/null +++ b/man/list_external_locations.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/external_locations.R +\name{list_external_locations} +\alias{list_external_locations} +\alias{externalLocationsList} +\title{List external locations.} +\usage{ +list_external_locations( + client, + include_browse = NULL, + max_results = NULL, + page_token = NULL +) + +externalLocationsList( + client, + include_browse = NULL, + max_results = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{include_browse}{Whether to include external locations in the response for which the principal can only access selective metadata for.} + +\item{max_results}{Maximum number of external locations to return.} + +\item{page_token}{Opaque pagination token to go to next page based on previous query.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of external locations (\strong{ExternalLocationInfo} objects) from +the metastore. The caller must be a metastore admin, the owner of the +external location, or a user that has some privilege on the external +location. There is no guarantee of a specific ordering of the elements in the +array. +} diff --git a/man/list_file_directory_contents.Rd b/man/list_file_directory_contents.Rd new file mode 100644 index 00000000..65da03ff --- /dev/null +++ b/man/list_file_directory_contents.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/files.R +\name{list_file_directory_contents} +\alias{list_file_directory_contents} +\alias{filesListDirectoryContents} +\title{List directory contents.} +\usage{ +list_file_directory_contents( + client, + directory_path, + page_size = NULL, + page_token = NULL +) + +filesListDirectoryContents( + client, + directory_path, + page_size = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{directory_path}{Required. The absolute path of a directory.} + +\item{page_size}{The maximum number of directory entries to return.} + +\item{page_token}{An opaque page token which was the \code{next_page_token} in the response of the previous request to list the contents of this directory.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Returns the contents of a directory. If there is no directory at the +specified path, the API returns a HTTP 404 error. +} diff --git a/man/list_functions.Rd b/man/list_functions.Rd new file mode 100644 index 00000000..9e9022e1 --- /dev/null +++ b/man/list_functions.Rd @@ -0,0 +1,50 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/functions.R +\name{list_functions} +\alias{list_functions} +\alias{functionsList} +\title{List functions.} +\usage{ +list_functions( + client, + catalog_name, + schema_name, + include_browse = NULL, + max_results = NULL, + page_token = NULL +) + +functionsList( + client, + catalog_name, + schema_name, + include_browse = NULL, + max_results = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{catalog_name}{Required. Name of parent catalog for functions of interest.} + +\item{schema_name}{Required. Parent schema of functions.} + +\item{include_browse}{Whether to include functions in the response for which the principal can only access selective metadata for.} + +\item{max_results}{Maximum number of functions to return.} + +\item{page_token}{Opaque pagination token to go to next page based on previous query.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +List functions within the specified parent catalog and schema. If the user is +a metastore admin, all functions are returned in the output list. Otherwise, +the user must have the \strong{USE_CATALOG} privilege on the catalog and the +\strong{USE_SCHEMA} privilege on the schema, and the output list contains only +functions for which either the user has the \strong{EXECUTE} privilege or the user +is the owner. There is no guarantee of a specific ordering of the elements in +the array. +} diff --git a/man/list_git_credentials.Rd b/man/list_git_credentials.Rd new file mode 100644 index 00000000..72daa7c7 --- /dev/null +++ b/man/list_git_credentials.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/git_credentials.R +\name{list_git_credentials} +\alias{list_git_credentials} +\alias{gitCredentialsList} +\title{Get Git credentials.} +\usage{ +list_git_credentials(client) + +gitCredentialsList(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Lists the calling user's Git credentials. One credential per user is +supported. +} diff --git a/man/list_global_init_scripts.Rd b/man/list_global_init_scripts.Rd new file mode 100644 index 00000000..915ce468 --- /dev/null +++ b/man/list_global_init_scripts.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/global_init_scripts.R +\name{list_global_init_scripts} +\alias{list_global_init_scripts} +\alias{globalInitScriptsList} +\title{Get init scripts.} +\usage{ +list_global_init_scripts(client) + +globalInitScriptsList(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Get a list of all global init scripts for this workspace. This returns all +properties for each script but \strong{not} the script contents. To retrieve the +contents of a script, use the \href{:method:globalinitscripts/get}{get a global init script} operation. +} diff --git a/man/list_groups.Rd b/man/list_groups.Rd new file mode 100644 index 00000000..2f6d0a13 --- /dev/null +++ b/man/list_groups.Rd @@ -0,0 +1,52 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/groups.R +\name{list_groups} +\alias{list_groups} +\alias{groupsList} +\title{List group details.} +\usage{ +list_groups( + client, + attributes = NULL, + count = NULL, + excluded_attributes = NULL, + filter = NULL, + sort_by = NULL, + sort_order = NULL, + start_index = NULL +) + +groupsList( + client, + attributes = NULL, + count = NULL, + excluded_attributes = NULL, + filter = NULL, + sort_by = NULL, + sort_order = NULL, + start_index = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{attributes}{Comma-separated list of attributes to return in response.} + +\item{count}{Desired number of results per page.} + +\item{excluded_attributes}{Comma-separated list of attributes to exclude in response.} + +\item{filter}{Query by which the results have to be filtered.} + +\item{sort_by}{Attribute to sort the results.} + +\item{sort_order}{The order to sort the results.} + +\item{start_index}{Specifies the index of the first result.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets all details of the groups associated with the Databricks workspace. +} diff --git a/man/list_instance_pools.Rd b/man/list_instance_pools.Rd new file mode 100644 index 00000000..3e7c589e --- /dev/null +++ b/man/list_instance_pools.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/instance_pools.R +\name{list_instance_pools} +\alias{list_instance_pools} +\alias{instancePoolsList} +\title{List instance pool info.} +\usage{ +list_instance_pools(client) + +instancePoolsList(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets a list of instance pools with their statistics. +} diff --git a/man/list_instance_profiles.Rd b/man/list_instance_profiles.Rd new file mode 100644 index 00000000..54976d7e --- /dev/null +++ b/man/list_instance_profiles.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/instance_profiles.R +\name{list_instance_profiles} +\alias{list_instance_profiles} +\alias{instanceProfilesList} +\title{List available instance profiles.} +\usage{ +list_instance_profiles(client) + +instanceProfilesList(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +List the instance profiles that the calling user can use to launch a cluster. +} +\details{ +This API is available to all users. +} diff --git a/man/list_ip_access_lists.Rd b/man/list_ip_access_lists.Rd new file mode 100644 index 00000000..5f033046 --- /dev/null +++ b/man/list_ip_access_lists.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ip_access_lists.R +\name{list_ip_access_lists} +\alias{list_ip_access_lists} +\alias{ipAccessListsList} +\title{Get access lists.} +\usage{ +list_ip_access_lists(client) + +ipAccessListsList(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets all IP access lists for the specified workspace. +} diff --git a/man/list_job_runs.Rd b/man/list_job_runs.Rd new file mode 100644 index 00000000..94741b01 --- /dev/null +++ b/man/list_job_runs.Rd @@ -0,0 +1,64 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{list_job_runs} +\alias{list_job_runs} +\alias{jobsListRuns} +\title{List job runs.} +\usage{ +list_job_runs( + client, + active_only = NULL, + completed_only = NULL, + expand_tasks = NULL, + job_id = NULL, + limit = NULL, + offset = NULL, + page_token = NULL, + run_type = NULL, + start_time_from = NULL, + start_time_to = NULL +) + +jobsListRuns( + client, + active_only = NULL, + completed_only = NULL, + expand_tasks = NULL, + job_id = NULL, + limit = NULL, + offset = NULL, + page_token = NULL, + run_type = NULL, + start_time_from = NULL, + start_time_to = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{active_only}{If active_only is \code{true}, only active runs are included in the results; otherwise, lists both active and completed runs.} + +\item{completed_only}{If completed_only is \code{true}, only completed runs are included in the results; otherwise, lists both active and completed runs.} + +\item{expand_tasks}{Whether to include task and cluster details in the response.} + +\item{job_id}{The job for which to list runs.} + +\item{limit}{The number of runs to return.} + +\item{offset}{The offset of the first run to return, relative to the most recent run.} + +\item{page_token}{Use \code{next_page_token} or \code{prev_page_token} returned from the previous request to list the next or previous page of runs respectively.} + +\item{run_type}{The type of runs to return.} + +\item{start_time_from}{Show runs that started \emph{at or after} this value.} + +\item{start_time_to}{Show runs that started \emph{at or before} this value.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +List runs in descending order by start time. +} diff --git a/man/list_jobs.Rd b/man/list_jobs.Rd new file mode 100644 index 00000000..05c80c82 --- /dev/null +++ b/man/list_jobs.Rd @@ -0,0 +1,44 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{list_jobs} +\alias{list_jobs} +\alias{jobsList} +\title{List jobs.} +\usage{ +list_jobs( + client, + expand_tasks = NULL, + limit = NULL, + name = NULL, + offset = NULL, + page_token = NULL +) + +jobsList( + client, + expand_tasks = NULL, + limit = NULL, + name = NULL, + offset = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{expand_tasks}{Whether to include task and cluster details in the response.} + +\item{limit}{The number of jobs to return.} + +\item{name}{A filter on the list based on the exact (case insensitive) job name.} + +\item{offset}{The offset of the first job to return, relative to the most recently created job.} + +\item{page_token}{Use \code{next_page_token} or \code{prev_page_token} returned from the previous request to list the next or previous page of jobs respectively.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Retrieves a list of jobs. +} diff --git a/man/list_lakehouse_monitor_refreshes.Rd b/man/list_lakehouse_monitor_refreshes.Rd new file mode 100644 index 00000000..be3cab41 --- /dev/null +++ b/man/list_lakehouse_monitor_refreshes.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakehouse_monitors.R +\name{list_lakehouse_monitor_refreshes} +\alias{list_lakehouse_monitor_refreshes} +\alias{lakehouseMonitorsListRefreshes} +\title{List refreshes.} +\usage{ +list_lakehouse_monitor_refreshes(client, full_name) + +lakehouseMonitorsListRefreshes(client, full_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the table.} +} +\description{ +Gets an array containing the history of the most recent refreshes (up to 25) +for this table. +} +\details{ +The caller must either: 1. be an owner of the table's parent catalog 2. have +\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's +parent schema 3. have the following permissions: - \strong{USE_CATALOG} on the +table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - +\strong{SELECT} privilege on the table. + +Additionally, the call must be made from the workspace where the monitor was +created. +} diff --git a/man/list_metastores.Rd b/man/list_metastores.Rd new file mode 100644 index 00000000..d21d106a --- /dev/null +++ b/man/list_metastores.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/metastores.R +\name{list_metastores} +\alias{list_metastores} +\alias{metastoresList} +\title{List metastores.} +\usage{ +list_metastores(client) + +metastoresList(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of the available metastores (as \strong{MetastoreInfo} objects). The +caller must be an admin to retrieve this info. There is no guarantee of a +specific ordering of the elements in the array. +} diff --git a/man/list_model_models.Rd b/man/list_model_models.Rd new file mode 100644 index 00000000..549907f2 --- /dev/null +++ b/man/list_model_models.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{list_model_models} +\alias{list_model_models} +\alias{modelRegistryListModels} +\title{List models.} +\usage{ +list_model_models(client, max_results = NULL, page_token = NULL) + +modelRegistryListModels(client, max_results = NULL, page_token = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{max_results}{Maximum number of registered models desired.} + +\item{page_token}{Pagination token to go to the next page based on a previous query.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Lists all available registered models, up to the limit specified in +\strong{max_results}. +} diff --git a/man/list_model_transition_requests.Rd b/man/list_model_transition_requests.Rd new file mode 100644 index 00000000..25a40323 --- /dev/null +++ b/man/list_model_transition_requests.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{list_model_transition_requests} +\alias{list_model_transition_requests} +\alias{modelRegistryListTransitionRequests} +\title{List transition requests.} +\usage{ +list_model_transition_requests(client, name, version) + +modelRegistryListTransitionRequests(client, name, version) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the model.} + +\item{version}{Required. Version of the model.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets a list of all open stage transition requests for the model version. +} diff --git a/man/list_model_versions.Rd b/man/list_model_versions.Rd new file mode 100644 index 00000000..5a846d28 --- /dev/null +++ b/man/list_model_versions.Rd @@ -0,0 +1,52 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_versions.R +\name{list_model_versions} +\alias{list_model_versions} +\alias{modelVersionsList} +\title{List Model Versions.} +\usage{ +list_model_versions( + client, + full_name, + include_browse = NULL, + max_results = NULL, + page_token = NULL +) + +modelVersionsList( + client, + full_name, + include_browse = NULL, + max_results = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. The full three-level name of the registered model under which to list model versions.} + +\item{include_browse}{Whether to include model versions in the response for which the principal can only access selective metadata for.} + +\item{max_results}{Maximum number of model versions to return.} + +\item{page_token}{Opaque pagination token to go to next page based on previous query.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +List model versions. You can list model versions under a particular schema, +or list all model versions in the current metastore. +} +\details{ +The returned models are filtered based on the privileges of the calling user. +For example, the metastore admin is able to list all the model versions. A +regular user needs to be the owner or have the \strong{EXECUTE} privilege on the +parent registered model to recieve the model versions in the response. For +the latter case, the caller must also be the owner or have the +\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} +privilege on the parent schema. + +There is no guarantee of a specific ordering of the elements in the response. +} diff --git a/man/list_model_webhooks.Rd b/man/list_model_webhooks.Rd new file mode 100644 index 00000000..4c7c871f --- /dev/null +++ b/man/list_model_webhooks.Rd @@ -0,0 +1,39 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{list_model_webhooks} +\alias{list_model_webhooks} +\alias{modelRegistryListWebhooks} +\title{List registry webhooks.} +\usage{ +list_model_webhooks( + client, + events = NULL, + model_name = NULL, + page_token = NULL +) + +modelRegistryListWebhooks( + client, + events = NULL, + model_name = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{events}{If \code{events} is specified, any webhook with one or more of the specified trigger events is included in the output.} + +\item{model_name}{If not specified, all webhooks associated with the specified events are listed, regardless of their associated model.} + +\item{page_token}{Token indicating the page of artifact results to fetch.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +\strong{NOTE:} This endpoint is in Public Preview. +} +\details{ +Lists all registry webhooks. +} diff --git a/man/list_notebooks.Rd b/man/list_notebooks.Rd new file mode 100644 index 00000000..4a23b10f --- /dev/null +++ b/man/list_notebooks.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{list_notebooks} +\alias{list_notebooks} +\alias{workspaceList} +\title{List contents.} +\usage{ +list_notebooks(client, path, notebooks_modified_after = NULL) + +workspaceList(client, path, notebooks_modified_after = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{path}{Required. The absolute path of the notebook or directory.} + +\item{notebooks_modified_after}{UTC timestamp in milliseconds.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Lists the contents of a directory, or the object if it is not a directory. If +the input path does not exist, this call returns an error +\code{RESOURCE_DOES_NOT_EXIST}. +} diff --git a/man/list_pipeline_events.Rd b/man/list_pipeline_events.Rd new file mode 100644 index 00000000..518fb67b --- /dev/null +++ b/man/list_pipeline_events.Rd @@ -0,0 +1,44 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{list_pipeline_events} +\alias{list_pipeline_events} +\alias{pipelinesListPipelineEvents} +\title{List pipeline events.} +\usage{ +list_pipeline_events( + client, + pipeline_id, + filter = NULL, + max_results = NULL, + order_by = NULL, + page_token = NULL +) + +pipelinesListPipelineEvents( + client, + pipeline_id, + filter = NULL, + max_results = NULL, + order_by = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Required. This field has no description yet.} + +\item{filter}{Criteria to select a subset of results, expressed using a SQL-like syntax.} + +\item{max_results}{Max number of entries to return in a single page.} + +\item{order_by}{A string indicating a sort order by timestamp for the results, for example, timestamp asc.} + +\item{page_token}{Page token returned by previous call.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Retrieves events for a pipeline. +} diff --git a/man/list_pipeline_pipelines.Rd b/man/list_pipeline_pipelines.Rd new file mode 100644 index 00000000..ae5c5eb7 --- /dev/null +++ b/man/list_pipeline_pipelines.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{list_pipeline_pipelines} +\alias{list_pipeline_pipelines} +\alias{pipelinesListPipelines} +\title{List pipelines.} +\usage{ +list_pipeline_pipelines( + client, + filter = NULL, + max_results = NULL, + order_by = NULL, + page_token = NULL +) + +pipelinesListPipelines( + client, + filter = NULL, + max_results = NULL, + order_by = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{filter}{Select a subset of results based on the specified criteria.} + +\item{max_results}{The maximum number of entries to return in a single page.} + +\item{order_by}{A list of strings specifying the order of results.} + +\item{page_token}{Page token returned by previous call.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Lists pipelines defined in the Delta Live Tables system. +} diff --git a/man/list_pipeline_updates.Rd b/man/list_pipeline_updates.Rd new file mode 100644 index 00000000..10048e33 --- /dev/null +++ b/man/list_pipeline_updates.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{list_pipeline_updates} +\alias{list_pipeline_updates} +\alias{pipelinesListUpdates} +\title{List pipeline updates.} +\usage{ +list_pipeline_updates( + client, + pipeline_id, + max_results = NULL, + page_token = NULL, + until_update_id = NULL +) + +pipelinesListUpdates( + client, + pipeline_id, + max_results = NULL, + page_token = NULL, + until_update_id = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Required. The pipeline to return updates for.} + +\item{max_results}{Max number of entries to return in a single page.} + +\item{page_token}{Page token returned by previous call.} + +\item{until_update_id}{If present, returns updates until and including this update_id.} +} +\description{ +List updates for an active pipeline. +} diff --git a/man/list_provider_shares.Rd b/man/list_provider_shares.Rd new file mode 100644 index 00000000..365533ec --- /dev/null +++ b/man/list_provider_shares.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/providers.R +\name{list_provider_shares} +\alias{list_provider_shares} +\alias{providersListShares} +\title{List shares by Provider.} +\usage{ +list_provider_shares(client, name) + +providersListShares(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the provider in which to list shares.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of a specified provider's shares within the metastore where: +} +\details{ +\itemize{ +\item the caller is a metastore admin, or * the caller is the owner. +} +} diff --git a/man/list_providers.Rd b/man/list_providers.Rd new file mode 100644 index 00000000..f4469b63 --- /dev/null +++ b/man/list_providers.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/providers.R +\name{list_providers} +\alias{list_providers} +\alias{providersList} +\title{List providers.} +\usage{ +list_providers(client, data_provider_global_metastore_id = NULL) + +providersList(client, data_provider_global_metastore_id = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{data_provider_global_metastore_id}{If not provided, all providers will be returned.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of available authentication providers. The caller must either +be a metastore admin or the owner of the providers. Providers not owned by +the caller are not included in the response. There is no guarantee of a +specific ordering of the elements in the array. +} diff --git a/man/list_queries.Rd b/man/list_queries.Rd new file mode 100644 index 00000000..1ac9e517 --- /dev/null +++ b/man/list_queries.Rd @@ -0,0 +1,35 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/queries.R +\name{list_queries} +\alias{list_queries} +\alias{queriesList} +\title{Get a list of queries.} +\usage{ +list_queries(client, order = NULL, page = NULL, page_size = NULL, q = NULL) + +queriesList(client, order = NULL, page = NULL, page_size = NULL, q = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{order}{Name of query attribute to order by.} + +\item{page}{Page number to retrieve.} + +\item{page_size}{Number of queries to return per page.} + +\item{q}{Full text search term.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets a list of queries. Optionally, this list can be filtered by a search +term. +} +\details{ +\subsection{**Warning: Calling this API concurrently 10 or more times could result in}{ + +throttling, service degradation, or a temporary ban.** +} +} diff --git a/man/list_query_history.Rd b/man/list_query_history.Rd new file mode 100644 index 00000000..68d458de --- /dev/null +++ b/man/list_query_history.Rd @@ -0,0 +1,43 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/query_history.R +\name{list_query_history} +\alias{list_query_history} +\alias{queryHistoryList} +\title{List Queries.} +\usage{ +list_query_history( + client, + filter_by = NULL, + include_metrics = NULL, + max_results = NULL, + page_token = NULL +) + +queryHistoryList( + client, + filter_by = NULL, + include_metrics = NULL, + max_results = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{filter_by}{A filter to limit query history results.} + +\item{include_metrics}{Whether to include metrics about query.} + +\item{max_results}{Limit the number of results returned in one page.} + +\item{page_token}{A token that can be used to get the next page of results.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +List the history of queries through SQL warehouses. +} +\details{ +You can filter by user ID, warehouse ID, status, and time range. +} diff --git a/man/list_recipients.Rd b/man/list_recipients.Rd new file mode 100644 index 00000000..8c69995e --- /dev/null +++ b/man/list_recipients.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/recipients.R +\name{list_recipients} +\alias{list_recipients} +\alias{recipientsList} +\title{List share recipients.} +\usage{ +list_recipients(client, data_recipient_global_metastore_id = NULL) + +recipientsList(client, data_recipient_global_metastore_id = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{data_recipient_global_metastore_id}{If not provided, all recipients will be returned.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of all share recipients within the current metastore where: +} +\details{ +\itemize{ +\item the caller is a metastore admin, or * the caller is the owner. There is no +guarantee of a specific ordering of the elements in the array. +} +} diff --git a/man/list_registered_models.Rd b/man/list_registered_models.Rd new file mode 100644 index 00000000..405d571d --- /dev/null +++ b/man/list_registered_models.Rd @@ -0,0 +1,56 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/registered_models.R +\name{list_registered_models} +\alias{list_registered_models} +\alias{registeredModelsList} +\title{List Registered Models.} +\usage{ +list_registered_models( + client, + catalog_name = NULL, + include_browse = NULL, + max_results = NULL, + page_token = NULL, + schema_name = NULL +) + +registeredModelsList( + client, + catalog_name = NULL, + include_browse = NULL, + max_results = NULL, + page_token = NULL, + schema_name = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{catalog_name}{The identifier of the catalog under which to list registered models.} + +\item{include_browse}{Whether to include registered models in the response for which the principal can only access selective metadata for.} + +\item{max_results}{Max number of registered models to return.} + +\item{page_token}{Opaque token to send for the next page of results (pagination).} + +\item{schema_name}{The identifier of the schema under which to list registered models.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +List registered models. You can list registered models under a particular +schema, or list all registered models in the current metastore. +} +\details{ +The returned models are filtered based on the privileges of the calling user. +For example, the metastore admin is able to list all the registered models. A +regular user needs to be the owner or have the \strong{EXECUTE} privilege on the +registered model to recieve the registered models in the response. For the +latter case, the caller must also be the owner or have the \strong{USE_CATALOG} +privilege on the parent catalog and the \strong{USE_SCHEMA} privilege on the +parent schema. + +There is no guarantee of a specific ordering of the elements in the response. +} diff --git a/man/list_repos.Rd b/man/list_repos.Rd new file mode 100644 index 00000000..b036b01c --- /dev/null +++ b/man/list_repos.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/repos.R +\name{list_repos} +\alias{list_repos} +\alias{reposList} +\title{Get repos.} +\usage{ +list_repos(client, next_page_token = NULL, path_prefix = NULL) + +reposList(client, next_page_token = NULL, path_prefix = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{next_page_token}{Token used to get the next page of results.} + +\item{path_prefix}{Filters repos that have paths starting with the given path prefix.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Returns repos that the calling user has Manage permissions on. Results are +paginated with each page containing twenty repos. +} diff --git a/man/list_schemas.Rd b/man/list_schemas.Rd new file mode 100644 index 00000000..d7926e9b --- /dev/null +++ b/man/list_schemas.Rd @@ -0,0 +1,44 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/schemas.R +\name{list_schemas} +\alias{list_schemas} +\alias{schemasList} +\title{List schemas.} +\usage{ +list_schemas( + client, + catalog_name, + include_browse = NULL, + max_results = NULL, + page_token = NULL +) + +schemasList( + client, + catalog_name, + include_browse = NULL, + max_results = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{catalog_name}{Required. Parent catalog for schemas of interest.} + +\item{include_browse}{Whether to include schemas in the response for which the principal can only access selective metadata for.} + +\item{max_results}{Maximum number of schemas to return.} + +\item{page_token}{Opaque pagination token to go to next page based on previous query.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of schemas for a catalog in the metastore. If the caller is the +metastore admin or the owner of the parent catalog, all schemas for the +catalog will be retrieved. Otherwise, only schemas owned by the caller (or +for which the caller has the \strong{USE_SCHEMA} privilege) will be retrieved. +There is no guarantee of a specific ordering of the elements in the array. +} diff --git a/man/list_secret_acls.Rd b/man/list_secret_acls.Rd new file mode 100644 index 00000000..0b2f0599 --- /dev/null +++ b/man/list_secret_acls.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/secrets.R +\name{list_secret_acls} +\alias{list_secret_acls} +\alias{secretsListAcls} +\title{Lists ACLs.} +\usage{ +list_secret_acls(client, scope) + +secretsListAcls(client, scope) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{scope}{Required. The name of the scope to fetch ACL information from.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +List the ACLs for a given secret scope. Users must have the \code{MANAGE} +permission to invoke this API. +} +\details{ +Throws \code{RESOURCE_DOES_NOT_EXIST} if no such secret scope exists. Throws +\code{PERMISSION_DENIED} if the user does not have permission to make this API +call. +} diff --git a/man/list_secret_scopes.Rd b/man/list_secret_scopes.Rd new file mode 100644 index 00000000..968b807c --- /dev/null +++ b/man/list_secret_scopes.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/secrets.R +\name{list_secret_scopes} +\alias{list_secret_scopes} +\alias{secretsListScopes} +\title{List all scopes.} +\usage{ +list_secret_scopes(client) + +secretsListScopes(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Lists all secret scopes available in the workspace. +} +\details{ +Throws \code{PERMISSION_DENIED} if the user does not have permission to make this +API call. +} diff --git a/man/list_secret_secrets.Rd b/man/list_secret_secrets.Rd new file mode 100644 index 00000000..4ef76f7d --- /dev/null +++ b/man/list_secret_secrets.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/secrets.R +\name{list_secret_secrets} +\alias{list_secret_secrets} +\alias{secretsListSecrets} +\title{List secret keys.} +\usage{ +list_secret_secrets(client, scope) + +secretsListSecrets(client, scope) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{scope}{Required. The name of the scope to list secrets within.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Lists the secret keys that are stored at this scope. This is a metadata-only +operation; secret data cannot be retrieved using this API. Users need the +READ permission to make this call. +} +\details{ +The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws +\code{RESOURCE_DOES_NOT_EXIST} if no such secret scope exists. Throws +\code{PERMISSION_DENIED} if the user does not have permission to make this API +call. +} diff --git a/man/list_service_principals.Rd b/man/list_service_principals.Rd new file mode 100644 index 00000000..671d924b --- /dev/null +++ b/man/list_service_principals.Rd @@ -0,0 +1,52 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/service_principals.R +\name{list_service_principals} +\alias{list_service_principals} +\alias{servicePrincipalsList} +\title{List service principals.} +\usage{ +list_service_principals( + client, + attributes = NULL, + count = NULL, + excluded_attributes = NULL, + filter = NULL, + sort_by = NULL, + sort_order = NULL, + start_index = NULL +) + +servicePrincipalsList( + client, + attributes = NULL, + count = NULL, + excluded_attributes = NULL, + filter = NULL, + sort_by = NULL, + sort_order = NULL, + start_index = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{attributes}{Comma-separated list of attributes to return in response.} + +\item{count}{Desired number of results per page.} + +\item{excluded_attributes}{Comma-separated list of attributes to exclude in response.} + +\item{filter}{Query by which the results have to be filtered.} + +\item{sort_by}{Attribute to sort the results.} + +\item{sort_order}{The order to sort the results.} + +\item{start_index}{Specifies the index of the first result.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets the set of service principals associated with a Databricks workspace. +} diff --git a/man/list_serving_endpoints.Rd b/man/list_serving_endpoints.Rd new file mode 100644 index 00000000..c784f7fe --- /dev/null +++ b/man/list_serving_endpoints.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{list_serving_endpoints} +\alias{list_serving_endpoints} +\alias{servingEndpointsList} +\title{Get all serving endpoints.} +\usage{ +list_serving_endpoints(client) + +servingEndpointsList(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Get all serving endpoints. +} diff --git a/man/list_shares.Rd b/man/list_shares.Rd new file mode 100644 index 00000000..22989d24 --- /dev/null +++ b/man/list_shares.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/shares.R +\name{list_shares} +\alias{list_shares} +\alias{sharesList} +\title{List shares.} +\usage{ +list_shares(client) + +sharesList(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of data object shares from the metastore. The caller must be a +metastore admin or the owner of the share. There is no guarantee of a +specific ordering of the elements in the array. +} diff --git a/man/list_storage_credentials.Rd b/man/list_storage_credentials.Rd new file mode 100644 index 00000000..1cb8b48a --- /dev/null +++ b/man/list_storage_credentials.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/storage_credentials.R +\name{list_storage_credentials} +\alias{list_storage_credentials} +\alias{storageCredentialsList} +\title{List credentials.} +\usage{ +list_storage_credentials(client, max_results = NULL, page_token = NULL) + +storageCredentialsList(client, max_results = NULL, page_token = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{max_results}{Maximum number of storage credentials to return.} + +\item{page_token}{Opaque pagination token to go to next page based on previous query.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of storage credentials (as \strong{StorageCredentialInfo} objects). +The array is limited to only those storage credentials the caller has +permission to access. If the caller is a metastore admin, retrieval of +credentials is unrestricted. There is no guarantee of a specific ordering of +the elements in the array. +} diff --git a/man/list_system_schemas.Rd b/man/list_system_schemas.Rd new file mode 100644 index 00000000..2a415d97 --- /dev/null +++ b/man/list_system_schemas.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/system_schemas.R +\name{list_system_schemas} +\alias{list_system_schemas} +\alias{systemSchemasList} +\title{List system schemas.} +\usage{ +list_system_schemas(client, metastore_id) + +systemSchemasList(client, metastore_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{metastore_id}{Required. The ID for the metastore in which the system schema resides.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of system schemas for a metastore. The caller must be an +account admin or a metastore admin. +} diff --git a/man/list_table_summaries.Rd b/man/list_table_summaries.Rd new file mode 100644 index 00000000..378a3ab6 --- /dev/null +++ b/man/list_table_summaries.Rd @@ -0,0 +1,57 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tables.R +\name{list_table_summaries} +\alias{list_table_summaries} +\alias{tablesListSummaries} +\title{List table summaries.} +\usage{ +list_table_summaries( + client, + catalog_name, + max_results = NULL, + page_token = NULL, + schema_name_pattern = NULL, + table_name_pattern = NULL +) + +tablesListSummaries( + client, + catalog_name, + max_results = NULL, + page_token = NULL, + schema_name_pattern = NULL, + table_name_pattern = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{catalog_name}{Required. Name of parent catalog for tables of interest.} + +\item{max_results}{Maximum number of summaries for tables to return.} + +\item{page_token}{Opaque pagination token to go to next page based on previous query.} + +\item{schema_name_pattern}{A sql LIKE pattern (\% and _) for schema names.} + +\item{table_name_pattern}{A sql LIKE pattern (\% and _) for table names.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of summaries for tables for a schema and catalog within the +metastore. The table summaries returned are either: +} +\details{ +\itemize{ +\item summaries for tables (within the current metastore and parent catalog and +schema), when the user is a metastore admin, or: * summaries for tables and +schemas (within the current metastore and parent catalog) for which the user +has ownership or the \strong{SELECT} privilege on the table and ownership or +\strong{USE_SCHEMA} privilege on the schema, provided that the user also has +ownership or the \strong{USE_CATALOG} privilege on the parent catalog. +} + +There is no guarantee of a specific ordering of the elements in the array. +} diff --git a/man/list_tables.Rd b/man/list_tables.Rd new file mode 100644 index 00000000..93cc08b7 --- /dev/null +++ b/man/list_tables.Rd @@ -0,0 +1,61 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tables.R +\name{list_tables} +\alias{list_tables} +\alias{tablesList} +\title{List tables.} +\usage{ +list_tables( + client, + catalog_name, + schema_name, + include_browse = NULL, + include_delta_metadata = NULL, + max_results = NULL, + omit_columns = NULL, + omit_properties = NULL, + page_token = NULL +) + +tablesList( + client, + catalog_name, + schema_name, + include_browse = NULL, + include_delta_metadata = NULL, + max_results = NULL, + omit_columns = NULL, + omit_properties = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{catalog_name}{Required. Name of parent catalog for tables of interest.} + +\item{schema_name}{Required. Parent schema of tables.} + +\item{include_browse}{Whether to include tables in the response for which the principal can only access selective metadata for.} + +\item{include_delta_metadata}{Whether delta metadata should be included in the response.} + +\item{max_results}{Maximum number of tables to return.} + +\item{omit_columns}{Whether to omit the columns of the table from the response or not.} + +\item{omit_properties}{Whether to omit the properties of the table from the response or not.} + +\item{page_token}{Opaque token to send for the next page of results (pagination).} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of all tables for the current metastore under the parent +catalog and schema. The caller must be a metastore admin or an owner of (or +have the \strong{SELECT} privilege on) the table. For the latter case, the caller +must also be the owner or have the \strong{USE_CATALOG} privilege on the parent +catalog and the \strong{USE_SCHEMA} privilege on the parent schema. There is no +guarantee of a specific ordering of the elements in the array. +} diff --git a/man/list_token_management.Rd b/man/list_token_management.Rd new file mode 100644 index 00000000..32ffb91f --- /dev/null +++ b/man/list_token_management.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token_management.R +\name{list_token_management} +\alias{list_token_management} +\alias{tokenManagementList} +\title{List all tokens.} +\usage{ +list_token_management(client, created_by_id = NULL, created_by_username = NULL) + +tokenManagementList(client, created_by_id = NULL, created_by_username = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{created_by_id}{User ID of the user that created the token.} + +\item{created_by_username}{Username of the user that created the token.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Lists all tokens associated with the specified workspace or user. +} diff --git a/man/list_tokens.Rd b/man/list_tokens.Rd new file mode 100644 index 00000000..4c0320d5 --- /dev/null +++ b/man/list_tokens.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tokens.R +\name{list_tokens} +\alias{list_tokens} +\alias{tokensList} +\title{List tokens.} +\usage{ +list_tokens(client) + +tokensList(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Lists all the valid tokens for a user-workspace pair. +} diff --git a/man/list_users.Rd b/man/list_users.Rd new file mode 100644 index 00000000..63e5e1ca --- /dev/null +++ b/man/list_users.Rd @@ -0,0 +1,52 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/users.R +\name{list_users} +\alias{list_users} +\alias{usersList} +\title{List users.} +\usage{ +list_users( + client, + attributes = NULL, + count = NULL, + excluded_attributes = NULL, + filter = NULL, + sort_by = NULL, + sort_order = NULL, + start_index = NULL +) + +usersList( + client, + attributes = NULL, + count = NULL, + excluded_attributes = NULL, + filter = NULL, + sort_by = NULL, + sort_order = NULL, + start_index = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{attributes}{Comma-separated list of attributes to return in response.} + +\item{count}{Desired number of results per page.} + +\item{excluded_attributes}{Comma-separated list of attributes to exclude in response.} + +\item{filter}{Query by which the results have to be filtered.} + +\item{sort_by}{Attribute to sort the results.} + +\item{sort_order}{The order to sort the results.} + +\item{start_index}{Specifies the index of the first result.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets details for all the users associated with a Databricks workspace. +} diff --git a/man/list_vector_search_endpoint_endpoints.Rd b/man/list_vector_search_endpoint_endpoints.Rd new file mode 100644 index 00000000..ddfb8584 --- /dev/null +++ b/man/list_vector_search_endpoint_endpoints.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_endpoints.R +\name{list_vector_search_endpoint_endpoints} +\alias{list_vector_search_endpoint_endpoints} +\alias{vectorSearchEndpointsListEndpoints} +\title{List all endpoints.} +\usage{ +list_vector_search_endpoint_endpoints(client, page_token = NULL) + +vectorSearchEndpointsListEndpoints(client, page_token = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{page_token}{Token for pagination.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +List all endpoints. +} diff --git a/man/list_vector_search_index_indexes.Rd b/man/list_vector_search_index_indexes.Rd new file mode 100644 index 00000000..beacf4b2 --- /dev/null +++ b/man/list_vector_search_index_indexes.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_indexes.R +\name{list_vector_search_index_indexes} +\alias{list_vector_search_index_indexes} +\alias{vectorSearchIndexesListIndexes} +\title{List indexes.} +\usage{ +list_vector_search_index_indexes(client, endpoint_name, page_token = NULL) + +vectorSearchIndexesListIndexes(client, endpoint_name, page_token = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{endpoint_name}{Required. Name of the endpoint.} + +\item{page_token}{Token for pagination.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +List all indexes in the given endpoint. +} diff --git a/man/list_volumes.Rd b/man/list_volumes.Rd new file mode 100644 index 00000000..e617d76f --- /dev/null +++ b/man/list_volumes.Rd @@ -0,0 +1,55 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/volumes.R +\name{list_volumes} +\alias{list_volumes} +\alias{volumesList} +\title{List Volumes.} +\usage{ +list_volumes( + client, + catalog_name, + schema_name, + include_browse = NULL, + max_results = NULL, + page_token = NULL +) + +volumesList( + client, + catalog_name, + schema_name, + include_browse = NULL, + max_results = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{catalog_name}{Required. The identifier of the catalog.} + +\item{schema_name}{Required. The identifier of the schema.} + +\item{include_browse}{Whether to include volumes in the response for which the principal can only access selective metadata for.} + +\item{max_results}{Maximum number of volumes to return (page length).} + +\item{page_token}{Opaque token returned by a previous request.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Gets an array of volumes for the current metastore under the parent catalog +and schema. +} +\details{ +The returned volumes are filtered based on the privileges of the calling +user. For example, the metastore admin is able to list all the volumes. A +regular user needs to be the owner or have the \strong{READ VOLUME} privilege on +the volume to recieve the volumes in the response. For the latter case, the +caller must also be the owner or have the \strong{USE_CATALOG} privilege on the +parent catalog and the \strong{USE_SCHEMA} privilege on the parent schema. + +There is no guarantee of a specific ordering of the elements in the array. +} diff --git a/man/list_warehouses.Rd b/man/list_warehouses.Rd new file mode 100644 index 00000000..d057a4a2 --- /dev/null +++ b/man/list_warehouses.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{list_warehouses} +\alias{list_warehouses} +\alias{warehousesList} +\title{List warehouses.} +\usage{ +list_warehouses(client, run_as_user_id = NULL) + +warehousesList(client, run_as_user_id = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_as_user_id}{Service Principal which will be used to fetch the list of warehouses.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Lists all SQL warehouses that a user has manager permissions on. +} diff --git a/man/log_experiment_batch.Rd b/man/log_experiment_batch.Rd new file mode 100644 index 00000000..2c3309ed --- /dev/null +++ b/man/log_experiment_batch.Rd @@ -0,0 +1,75 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{log_experiment_batch} +\alias{log_experiment_batch} +\alias{experimentsLogBatch} +\title{Log a batch.} +\usage{ +log_experiment_batch( + client, + metrics = NULL, + params = NULL, + run_id = NULL, + tags = NULL +) + +experimentsLogBatch( + client, + metrics = NULL, + params = NULL, + run_id = NULL, + tags = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{metrics}{Metrics to log.} + +\item{params}{Params to log.} + +\item{run_id}{ID of the run to log under.} + +\item{tags}{Tags to log.} +} +\description{ +Logs a batch of metrics, params, and tags for a run. If any data failed to be +persisted, the server will respond with an error (non-200 status code). +} +\details{ +In case of error (due to internal server error or an invalid request), +partial data may be written. + +You can write metrics, params, and tags in interleaving fashion, but within a +given entity type are guaranteed to follow the order specified in the request +body. + +The overwrite behavior for metrics, params, and tags is as follows: +\itemize{ +\item Metrics: metric values are never overwritten. Logging a metric (key, value, +timestamp) appends to the set of values for the metric with the provided key. +\item Tags: tag values can be overwritten by successive writes to the same tag +key. That is, if multiple tag values with the same key are provided in the +same API request, the last-provided tag value is written. Logging the same +tag (key, value) is permitted. Specifically, logging a tag is idempotent. +\item Parameters: once written, param values cannot be changed (attempting to +overwrite a param value will result in an error). However, logging the same +param (key, value) is permitted. Specifically, logging a param is idempotent. +} + +Request Limits ------------------------------- A single JSON-serialized API +request may be up to 1 MB in size and contain: +\itemize{ +\item No more than 1000 metrics, params, and tags in total * Up to 1000 metrics * +Up to 100 params * Up to 100 tags +} + +For example, a valid request might contain 900 metrics, 50 params, and 50 +tags, but logging 900 metrics, 50 params, and 51 tags is invalid. + +The following limits also apply to metric, param, and tag keys and values: +\itemize{ +\item Metric keys, param keys, and tag keys can be up to 250 characters in length +\item Parameter and tag values can be up to 250 characters in length +} +} diff --git a/man/log_experiment_inputs.Rd b/man/log_experiment_inputs.Rd new file mode 100644 index 00000000..608bd354 --- /dev/null +++ b/man/log_experiment_inputs.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{log_experiment_inputs} +\alias{log_experiment_inputs} +\alias{experimentsLogInputs} +\title{Log inputs to a run.} +\usage{ +log_experiment_inputs(client, datasets = NULL, run_id = NULL) + +experimentsLogInputs(client, datasets = NULL, run_id = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{datasets}{Dataset inputs.} + +\item{run_id}{ID of the run to log under.} +} +\description{ +\strong{NOTE:} Experimental: This API may change or be removed in a future release +without warning. +} diff --git a/man/log_experiment_metric.Rd b/man/log_experiment_metric.Rd new file mode 100644 index 00000000..d66ae822 --- /dev/null +++ b/man/log_experiment_metric.Rd @@ -0,0 +1,47 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{log_experiment_metric} +\alias{log_experiment_metric} +\alias{experimentsLogMetric} +\title{Log a metric.} +\usage{ +log_experiment_metric( + client, + key, + value, + timestamp, + run_id = NULL, + run_uuid = NULL, + step = NULL +) + +experimentsLogMetric( + client, + key, + value, + timestamp, + run_id = NULL, + run_uuid = NULL, + step = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{key}{Required. Name of the metric.} + +\item{value}{Required. Double value of the metric being logged.} + +\item{timestamp}{Required. Unix timestamp in milliseconds at the time metric was logged.} + +\item{run_id}{ID of the run under which to log the metric.} + +\item{run_uuid}{Deprecated, use run_id instead. ID of the run under which to log the metric.} + +\item{step}{Step at which to log the metric.} +} +\description{ +Logs a metric for a run. A metric is a key-value pair (string key, float +value) with an associated timestamp. Examples include the various metrics +that represent ML model accuracy. A metric can be logged multiple times. +} diff --git a/man/log_experiment_model.Rd b/man/log_experiment_model.Rd new file mode 100644 index 00000000..d6cdf3f1 --- /dev/null +++ b/man/log_experiment_model.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{log_experiment_model} +\alias{log_experiment_model} +\alias{experimentsLogModel} +\title{Log a model.} +\usage{ +log_experiment_model(client, model_json = NULL, run_id = NULL) + +experimentsLogModel(client, model_json = NULL, run_id = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{model_json}{MLmodel file in json format.} + +\item{run_id}{ID of the run to log under.} +} +\description{ +\strong{NOTE:} Experimental: This API may change or be removed in a future release +without warning. +} diff --git a/man/log_experiment_param.Rd b/man/log_experiment_param.Rd new file mode 100644 index 00000000..4644e0f2 --- /dev/null +++ b/man/log_experiment_param.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{log_experiment_param} +\alias{log_experiment_param} +\alias{experimentsLogParam} +\title{Log a param.} +\usage{ +log_experiment_param(client, key, value, run_id = NULL, run_uuid = NULL) + +experimentsLogParam(client, key, value, run_id = NULL, run_uuid = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{key}{Required. Name of the param.} + +\item{value}{Required. String value of the param being logged.} + +\item{run_id}{ID of the run under which to log the param.} + +\item{run_uuid}{Deprecated, use run_id instead. ID of the run under which to log the param.} +} +\description{ +Logs a param used for a run. A param is a key-value pair (string key, string +value). Examples include hyperparameters used for ML model training and +constant dates and values used in an ETL pipeline. A param can be logged only +once for a run. +} diff --git a/man/logs_serving_endpoint.Rd b/man/logs_serving_endpoint.Rd new file mode 100644 index 00000000..c6826889 --- /dev/null +++ b/man/logs_serving_endpoint.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{logs_serving_endpoint} +\alias{logs_serving_endpoint} +\alias{servingEndpointsLogs} +\title{Get the latest logs for a served model.} +\usage{ +logs_serving_endpoint(client, name, served_model_name) + +servingEndpointsLogs(client, name, served_model_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint that the served model belongs to.} + +\item{served_model_name}{Required. The name of the served model that logs will be retrieved for.} +} +\description{ +Retrieves the service logs associated with the provided served model. +} diff --git a/man/me.Rd b/man/me.Rd new file mode 100644 index 00000000..2d44cd8f --- /dev/null +++ b/man/me.Rd @@ -0,0 +1,17 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/current_user.R +\name{me} +\alias{me} +\alias{currentUserMe} +\title{Get current user info.} +\usage{ +me(client) + +currentUserMe(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Get details about the current method caller's identity. +} diff --git a/man/metastoresAssign.Rd b/man/metastoresAssign.Rd deleted file mode 100644 index 549b0fce..00000000 --- a/man/metastoresAssign.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/metastores.R -\name{metastoresAssign} -\alias{metastoresAssign} -\title{Create an assignment.} -\usage{ -metastoresAssign(client, workspace_id, metastore_id, default_catalog_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{workspace_id}{Required. A workspace ID.} - -\item{metastore_id}{Required. The unique ID of the metastore.} - -\item{default_catalog_name}{Required. The name of the default catalog in the metastore.} -} -\description{ -Creates a new metastore assignment. If an assignment for the same -\strong{workspace_id} exists, it will be overwritten by the new \strong{metastore_id} -and \strong{default_catalog_name}. The caller must be an account admin. -} diff --git a/man/metastoresCreate.Rd b/man/metastoresCreate.Rd deleted file mode 100644 index 50a52d3e..00000000 --- a/man/metastoresCreate.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/metastores.R -\name{metastoresCreate} -\alias{metastoresCreate} -\title{Create a metastore.} -\usage{ -metastoresCreate(client, name, region = NULL, storage_root = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The user-specified name of the metastore.} - -\item{region}{Cloud region which the metastore serves (e.g., \code{us-west-2}, \code{westus}).} - -\item{storage_root}{The storage root URL for metastore.} -} -\description{ -Creates a new metastore based on a provided name and optional storage root -path. By default (if the \strong{owner} field is not set), the owner of the new -metastore is the user calling the \strong{createMetastore} API. If the \strong{owner} -field is set to the empty string (\strong{''}), the ownership is assigned to the -System User instead. -} diff --git a/man/metastoresCurrent.Rd b/man/metastoresCurrent.Rd deleted file mode 100644 index 0d0e2087..00000000 --- a/man/metastoresCurrent.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/metastores.R -\name{metastoresCurrent} -\alias{metastoresCurrent} -\title{Get metastore assignment for workspace.} -\usage{ -metastoresCurrent(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Gets the metastore assignment for the workspace being accessed. -} diff --git a/man/metastoresDelete.Rd b/man/metastoresDelete.Rd deleted file mode 100644 index 43bbb1b7..00000000 --- a/man/metastoresDelete.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/metastores.R -\name{metastoresDelete} -\alias{metastoresDelete} -\title{Delete a metastore.} -\usage{ -metastoresDelete(client, id, force = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Unique ID of the metastore.} - -\item{force}{Force deletion even if the metastore is not empty.} -} -\description{ -Deletes a metastore. The caller must be a metastore admin. -} diff --git a/man/metastoresGet.Rd b/man/metastoresGet.Rd deleted file mode 100644 index ea1c7acf..00000000 --- a/man/metastoresGet.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/metastores.R -\name{metastoresGet} -\alias{metastoresGet} -\title{Get a metastore.} -\usage{ -metastoresGet(client, id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Unique ID of the metastore.} -} -\description{ -Gets a metastore that matches the supplied ID. The caller must be a metastore -admin to retrieve this info. -} diff --git a/man/metastoresList.Rd b/man/metastoresList.Rd deleted file mode 100644 index 7e9a471a..00000000 --- a/man/metastoresList.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/metastores.R -\name{metastoresList} -\alias{metastoresList} -\title{List metastores.} -\usage{ -metastoresList(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of the available metastores (as \strong{MetastoreInfo} objects). The -caller must be an admin to retrieve this info. There is no guarantee of a -specific ordering of the elements in the array. -} diff --git a/man/metastoresSummary.Rd b/man/metastoresSummary.Rd deleted file mode 100644 index 4f354d35..00000000 --- a/man/metastoresSummary.Rd +++ /dev/null @@ -1,15 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/metastores.R -\name{metastoresSummary} -\alias{metastoresSummary} -\title{Get a metastore summary.} -\usage{ -metastoresSummary(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Gets information about a metastore. This summary includes the storage -credential, the cloud vendor, the cloud region, and the global metastore ID. -} diff --git a/man/metastoresUnassign.Rd b/man/metastoresUnassign.Rd deleted file mode 100644 index 0a8964d0..00000000 --- a/man/metastoresUnassign.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/metastores.R -\name{metastoresUnassign} -\alias{metastoresUnassign} -\title{Delete an assignment.} -\usage{ -metastoresUnassign(client, workspace_id, metastore_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{workspace_id}{Required. A workspace ID.} - -\item{metastore_id}{Required. Query for the ID of the metastore to delete.} -} -\description{ -Deletes a metastore assignment. The caller must be an account administrator. -} diff --git a/man/metastoresUpdate.Rd b/man/metastoresUpdate.Rd deleted file mode 100644 index f3f9a9e0..00000000 --- a/man/metastoresUpdate.Rd +++ /dev/null @@ -1,42 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/metastores.R -\name{metastoresUpdate} -\alias{metastoresUpdate} -\title{Update a metastore.} -\usage{ -metastoresUpdate( - client, - id, - delta_sharing_organization_name = NULL, - delta_sharing_recipient_token_lifetime_in_seconds = NULL, - delta_sharing_scope = NULL, - new_name = NULL, - owner = NULL, - privilege_model_version = NULL, - storage_root_credential_id = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Unique ID of the metastore.} - -\item{delta_sharing_organization_name}{The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name.} - -\item{delta_sharing_recipient_token_lifetime_in_seconds}{The lifetime of delta sharing recipient token in seconds.} - -\item{delta_sharing_scope}{The scope of Delta Sharing enabled for the metastore.} - -\item{new_name}{New name for the metastore.} - -\item{owner}{The owner of the metastore.} - -\item{privilege_model_version}{Privilege model version of the metastore, of the form \code{major.minor} (e.g., \code{1.0}).} - -\item{storage_root_credential_id}{UUID of storage credential to access the metastore storage_root.} -} -\description{ -Updates information for a specific metastore. The caller must be a metastore -admin. If the \strong{owner} field is set to the empty string (\strong{''}), the -ownership is updated to the System User. -} diff --git a/man/metastoresUpdateAssignment.Rd b/man/metastoresUpdateAssignment.Rd deleted file mode 100644 index 427015cd..00000000 --- a/man/metastoresUpdateAssignment.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/metastores.R -\name{metastoresUpdateAssignment} -\alias{metastoresUpdateAssignment} -\title{Update an assignment.} -\usage{ -metastoresUpdateAssignment( - client, - workspace_id, - default_catalog_name = NULL, - metastore_id = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{workspace_id}{Required. A workspace ID.} - -\item{default_catalog_name}{The name of the default catalog for the metastore.} - -\item{metastore_id}{The unique ID of the metastore.} -} -\description{ -Updates a metastore assignment. This operation can be used to update -\strong{metastore_id} or \strong{default_catalog_name} for a specified Workspace, if -the Workspace is already assigned a metastore. The caller must be an account -admin to update \strong{metastore_id}; otherwise, the caller can be a Workspace -admin. -} diff --git a/man/migrate_permission_migration_permissions.Rd b/man/migrate_permission_migration_permissions.Rd new file mode 100644 index 00000000..9c246c4b --- /dev/null +++ b/man/migrate_permission_migration_permissions.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/permission_migration.R +\name{migrate_permission_migration_permissions} +\alias{migrate_permission_migration_permissions} +\alias{permissionMigrationMigratePermissions} +\title{Migrate Permissions.} +\usage{ +migrate_permission_migration_permissions( + client, + workspace_id, + from_workspace_group_name, + to_account_group_name, + size = NULL +) + +permissionMigrationMigratePermissions( + client, + workspace_id, + from_workspace_group_name, + to_account_group_name, + size = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{workspace_id}{Required. WorkspaceId of the associated workspace where the permission migration will occur.} + +\item{from_workspace_group_name}{Required. The name of the workspace group that permissions will be migrated from.} + +\item{to_account_group_name}{Required. The name of the account group that permissions will be migrated to.} + +\item{size}{The maximum number of permissions that will be migrated.} +} +\description{ +Migrate a batch of permissions from a workspace local group to an account +group. +} diff --git a/man/mkdirs_dbfs.Rd b/man/mkdirs_dbfs.Rd new file mode 100644 index 00000000..abbd0973 --- /dev/null +++ b/man/mkdirs_dbfs.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbfs.R +\name{mkdirs_dbfs} +\alias{mkdirs_dbfs} +\alias{dbfsMkdirs} +\title{Create a directory.} +\usage{ +mkdirs_dbfs(client, path) + +dbfsMkdirs(client, path) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{path}{Required. The path of the new directory.} +} +\description{ +Creates the given directory and necessary parent directories if they do not +exist. If a file (not a directory) exists at any prefix of the input path, +this call throws an exception with \code{RESOURCE_ALREADY_EXISTS}. \strong{Note}: If +this operation fails, it might have succeeded in creating some of the +necessary parent directories. +} diff --git a/man/mkdirs_notebook.Rd b/man/mkdirs_notebook.Rd new file mode 100644 index 00000000..35cdda32 --- /dev/null +++ b/man/mkdirs_notebook.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{mkdirs_notebook} +\alias{mkdirs_notebook} +\alias{workspaceMkdirs} +\title{Create a directory.} +\usage{ +mkdirs_notebook(client, path) + +workspaceMkdirs(client, path) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{path}{Required. The absolute path of the directory.} +} +\description{ +Creates the specified directory (and necessary parent directories if they do +not exist). If there is an object (not a directory) at any prefix of the +input path, this call returns an error \code{RESOURCE_ALREADY_EXISTS}. +} +\details{ +Note that if this operation fails it may have succeeded in creating some of +the necessary parent directories. +} diff --git a/man/modelRegistryApproveTransitionRequest.Rd b/man/modelRegistryApproveTransitionRequest.Rd deleted file mode 100644 index c7990d74..00000000 --- a/man/modelRegistryApproveTransitionRequest.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryApproveTransitionRequest} -\alias{modelRegistryApproveTransitionRequest} -\title{Approve transition request.} -\usage{ -modelRegistryApproveTransitionRequest( - client, - name, - version, - stage, - archive_existing_versions, - comment = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the model.} - -\item{version}{Required. Version of the model.} - -\item{stage}{Required. Target stage of the transition.} - -\item{archive_existing_versions}{Required. Specifies whether to archive all current model versions in the target stage.} - -\item{comment}{User-provided comment on the action.} -} -\description{ -Approves a model version stage transition request. -} diff --git a/man/modelRegistryCreateComment.Rd b/man/modelRegistryCreateComment.Rd deleted file mode 100644 index a32ad0e2..00000000 --- a/man/modelRegistryCreateComment.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryCreateComment} -\alias{modelRegistryCreateComment} -\title{Post a comment.} -\usage{ -modelRegistryCreateComment(client, name, version, comment) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the model.} - -\item{version}{Required. Version of the model.} - -\item{comment}{Required. User-provided comment on the action.} -} -\description{ -Posts a comment on a model version. A comment can be submitted either by a -user or programmatically to display relevant information about the model. For -example, test results or deployment errors. -} diff --git a/man/modelRegistryCreateModel.Rd b/man/modelRegistryCreateModel.Rd deleted file mode 100644 index 587f1cef..00000000 --- a/man/modelRegistryCreateModel.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryCreateModel} -\alias{modelRegistryCreateModel} -\title{Create a model.} -\usage{ -modelRegistryCreateModel(client, name, description = NULL, tags = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Register models under this name.} - -\item{description}{Optional description for registered model.} - -\item{tags}{Additional metadata for registered model.} -} -\description{ -Creates a new registered model with the name specified in the request body. -} -\details{ -Throws \code{RESOURCE_ALREADY_EXISTS} if a registered model with the given name -exists. -} diff --git a/man/modelRegistryCreateModelVersion.Rd b/man/modelRegistryCreateModelVersion.Rd deleted file mode 100644 index 42694601..00000000 --- a/man/modelRegistryCreateModelVersion.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryCreateModelVersion} -\alias{modelRegistryCreateModelVersion} -\title{Create a model version.} -\usage{ -modelRegistryCreateModelVersion( - client, - name, - source, - description = NULL, - run_id = NULL, - run_link = NULL, - tags = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Register model under this name.} - -\item{source}{Required. URI indicating the location of the model artifacts.} - -\item{description}{Optional description for model version.} - -\item{run_id}{MLflow run ID for correlation, if \code{source} was generated by an experiment run in MLflow tracking server.} - -\item{run_link}{MLflow run link - this is the exact link of the run that generated this model version, potentially hosted at another instance of MLflow.} - -\item{tags}{Additional metadata for model version.} -} -\description{ -Creates a model version. -} diff --git a/man/modelRegistryCreateTransitionRequest.Rd b/man/modelRegistryCreateTransitionRequest.Rd deleted file mode 100644 index d7ec87b5..00000000 --- a/man/modelRegistryCreateTransitionRequest.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryCreateTransitionRequest} -\alias{modelRegistryCreateTransitionRequest} -\title{Make a transition request.} -\usage{ -modelRegistryCreateTransitionRequest( - client, - name, - version, - stage, - comment = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the model.} - -\item{version}{Required. Version of the model.} - -\item{stage}{Required. Target stage of the transition.} - -\item{comment}{User-provided comment on the action.} -} -\description{ -Creates a model version stage transition request. -} diff --git a/man/modelRegistryCreateWebhook.Rd b/man/modelRegistryCreateWebhook.Rd deleted file mode 100644 index 7e84a7c3..00000000 --- a/man/modelRegistryCreateWebhook.Rd +++ /dev/null @@ -1,37 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryCreateWebhook} -\alias{modelRegistryCreateWebhook} -\title{Create a webhook.} -\usage{ -modelRegistryCreateWebhook( - client, - events, - description = NULL, - http_url_spec = NULL, - job_spec = NULL, - model_name = NULL, - status = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{events}{Required. Events that can trigger a registry webhook: * \code{MODEL_VERSION_CREATED}: A new model version was created for the associated model.} - -\item{description}{User-specified description for the webhook.} - -\item{http_url_spec}{This field has no description yet.} - -\item{job_spec}{This field has no description yet.} - -\item{model_name}{Name of the model whose events would trigger this webhook.} - -\item{status}{Enable or disable triggering the webhook, or put the webhook into test mode.} -} -\description{ -\strong{NOTE}: This endpoint is in Public Preview. -} -\details{ -Creates a registry webhook. -} diff --git a/man/modelRegistryDeleteComment.Rd b/man/modelRegistryDeleteComment.Rd deleted file mode 100644 index 363779c3..00000000 --- a/man/modelRegistryDeleteComment.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryDeleteComment} -\alias{modelRegistryDeleteComment} -\title{Delete a comment.} -\usage{ -modelRegistryDeleteComment(client, id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. This field has no description yet.} -} -\description{ -Deletes a comment on a model version. -} diff --git a/man/modelRegistryDeleteModel.Rd b/man/modelRegistryDeleteModel.Rd deleted file mode 100644 index 2873f0bf..00000000 --- a/man/modelRegistryDeleteModel.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryDeleteModel} -\alias{modelRegistryDeleteModel} -\title{Delete a model.} -\usage{ -modelRegistryDeleteModel(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Registered model unique name identifier.} -} -\description{ -Deletes a registered model. -} diff --git a/man/modelRegistryDeleteModelTag.Rd b/man/modelRegistryDeleteModelTag.Rd deleted file mode 100644 index faab16cb..00000000 --- a/man/modelRegistryDeleteModelTag.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryDeleteModelTag} -\alias{modelRegistryDeleteModelTag} -\title{Delete a model tag.} -\usage{ -modelRegistryDeleteModelTag(client, name, key) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the registered model that the tag was logged under.} - -\item{key}{Required. Name of the tag.} -} -\description{ -Deletes the tag for a registered model. -} diff --git a/man/modelRegistryDeleteModelVersion.Rd b/man/modelRegistryDeleteModelVersion.Rd deleted file mode 100644 index fb201a04..00000000 --- a/man/modelRegistryDeleteModelVersion.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryDeleteModelVersion} -\alias{modelRegistryDeleteModelVersion} -\title{Delete a model version.} -\usage{ -modelRegistryDeleteModelVersion(client, name, version) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the registered model.} - -\item{version}{Required. Model version number.} -} -\description{ -Deletes a model version. -} diff --git a/man/modelRegistryDeleteModelVersionTag.Rd b/man/modelRegistryDeleteModelVersionTag.Rd deleted file mode 100644 index 62c1a9ff..00000000 --- a/man/modelRegistryDeleteModelVersionTag.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryDeleteModelVersionTag} -\alias{modelRegistryDeleteModelVersionTag} -\title{Delete a model version tag.} -\usage{ -modelRegistryDeleteModelVersionTag(client, name, version, key) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the registered model that the tag was logged under.} - -\item{version}{Required. Model version number that the tag was logged under.} - -\item{key}{Required. Name of the tag.} -} -\description{ -Deletes a model version tag. -} diff --git a/man/modelRegistryDeleteTransitionRequest.Rd b/man/modelRegistryDeleteTransitionRequest.Rd deleted file mode 100644 index 7da07a28..00000000 --- a/man/modelRegistryDeleteTransitionRequest.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryDeleteTransitionRequest} -\alias{modelRegistryDeleteTransitionRequest} -\title{Delete a transition request.} -\usage{ -modelRegistryDeleteTransitionRequest( - client, - name, - version, - stage, - creator, - comment = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the model.} - -\item{version}{Required. Version of the model.} - -\item{stage}{Required. Target stage of the transition request.} - -\item{creator}{Required. Username of the user who created this request.} - -\item{comment}{User-provided comment on the action.} -} -\description{ -Cancels a model version stage transition request. -} diff --git a/man/modelRegistryDeleteWebhook.Rd b/man/modelRegistryDeleteWebhook.Rd deleted file mode 100644 index 7245e52f..00000000 --- a/man/modelRegistryDeleteWebhook.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryDeleteWebhook} -\alias{modelRegistryDeleteWebhook} -\title{Delete a webhook.} -\usage{ -modelRegistryDeleteWebhook(client, id = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Webhook ID required to delete a registry webhook.} -} -\description{ -\strong{NOTE:} This endpoint is in Public Preview. -} -\details{ -Deletes a registry webhook. -} diff --git a/man/modelRegistryGetLatestVersions.Rd b/man/modelRegistryGetLatestVersions.Rd deleted file mode 100644 index 76f8bec1..00000000 --- a/man/modelRegistryGetLatestVersions.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryGetLatestVersions} -\alias{modelRegistryGetLatestVersions} -\title{Get the latest version.} -\usage{ -modelRegistryGetLatestVersions(client, name, stages = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Registered model unique name identifier.} - -\item{stages}{List of stages.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets the latest version of a registered model. -} diff --git a/man/modelRegistryGetModel.Rd b/man/modelRegistryGetModel.Rd deleted file mode 100644 index dd6cc5bb..00000000 --- a/man/modelRegistryGetModel.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryGetModel} -\alias{modelRegistryGetModel} -\title{Get model.} -\usage{ -modelRegistryGetModel(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Registered model unique name identifier.} -} -\description{ -Get the details of a model. This is a Databricks workspace version of the -MLflow endpoint that also returns the model's Databricks workspace ID and -the permission level of the requesting user on the model. -} -\details{ -MLflow endpoint: https://www.mlflow.org/docs/latest/rest-api.html#get-registeredmodel -} diff --git a/man/modelRegistryGetModelVersion.Rd b/man/modelRegistryGetModelVersion.Rd deleted file mode 100644 index a02bdd10..00000000 --- a/man/modelRegistryGetModelVersion.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryGetModelVersion} -\alias{modelRegistryGetModelVersion} -\title{Get a model version.} -\usage{ -modelRegistryGetModelVersion(client, name, version) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the registered model.} - -\item{version}{Required. Model version number.} -} -\description{ -Get a model version. -} diff --git a/man/modelRegistryGetModelVersionDownloadUri.Rd b/man/modelRegistryGetModelVersionDownloadUri.Rd deleted file mode 100644 index affc5729..00000000 --- a/man/modelRegistryGetModelVersionDownloadUri.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryGetModelVersionDownloadUri} -\alias{modelRegistryGetModelVersionDownloadUri} -\title{Get a model version URI.} -\usage{ -modelRegistryGetModelVersionDownloadUri(client, name, version) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the registered model.} - -\item{version}{Required. Model version number.} -} -\description{ -Gets a URI to download the model version. -} diff --git a/man/modelRegistryGetPermissionLevels.Rd b/man/modelRegistryGetPermissionLevels.Rd deleted file mode 100644 index f01c1cab..00000000 --- a/man/modelRegistryGetPermissionLevels.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryGetPermissionLevels} -\alias{modelRegistryGetPermissionLevels} -\title{Get registered model permission levels.} -\usage{ -modelRegistryGetPermissionLevels(client, registered_model_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{registered_model_id}{Required. The registered model for which to get or manage permissions.} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/modelRegistryGetPermissions.Rd b/man/modelRegistryGetPermissions.Rd deleted file mode 100644 index 3c767874..00000000 --- a/man/modelRegistryGetPermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryGetPermissions} -\alias{modelRegistryGetPermissions} -\title{Get registered model permissions.} -\usage{ -modelRegistryGetPermissions(client, registered_model_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{registered_model_id}{Required. The registered model for which to get or manage permissions.} -} -\description{ -Gets the permissions of a registered model. Registered models can inherit -permissions from their root object. -} diff --git a/man/modelRegistryListModels.Rd b/man/modelRegistryListModels.Rd deleted file mode 100644 index d1eaee65..00000000 --- a/man/modelRegistryListModels.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryListModels} -\alias{modelRegistryListModels} -\title{List models.} -\usage{ -modelRegistryListModels(client, max_results = NULL, page_token = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{max_results}{Maximum number of registered models desired.} - -\item{page_token}{Pagination token to go to the next page based on a previous query.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Lists all available registered models, up to the limit specified in -\strong{max_results}. -} diff --git a/man/modelRegistryListTransitionRequests.Rd b/man/modelRegistryListTransitionRequests.Rd deleted file mode 100644 index 67ba2b42..00000000 --- a/man/modelRegistryListTransitionRequests.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryListTransitionRequests} -\alias{modelRegistryListTransitionRequests} -\title{List transition requests.} -\usage{ -modelRegistryListTransitionRequests(client, name, version) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the model.} - -\item{version}{Required. Version of the model.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets a list of all open stage transition requests for the model version. -} diff --git a/man/modelRegistryListWebhooks.Rd b/man/modelRegistryListWebhooks.Rd deleted file mode 100644 index 2b16da2b..00000000 --- a/man/modelRegistryListWebhooks.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryListWebhooks} -\alias{modelRegistryListWebhooks} -\title{List registry webhooks.} -\usage{ -modelRegistryListWebhooks( - client, - events = NULL, - model_name = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{events}{If \code{events} is specified, any webhook with one or more of the specified trigger events is included in the output.} - -\item{model_name}{If not specified, all webhooks associated with the specified events are listed, regardless of their associated model.} - -\item{page_token}{Token indicating the page of artifact results to fetch.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -\strong{NOTE:} This endpoint is in Public Preview. -} -\details{ -Lists all registry webhooks. -} diff --git a/man/modelRegistryRejectTransitionRequest.Rd b/man/modelRegistryRejectTransitionRequest.Rd deleted file mode 100644 index 494bbd52..00000000 --- a/man/modelRegistryRejectTransitionRequest.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryRejectTransitionRequest} -\alias{modelRegistryRejectTransitionRequest} -\title{Reject a transition request.} -\usage{ -modelRegistryRejectTransitionRequest( - client, - name, - version, - stage, - comment = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the model.} - -\item{version}{Required. Version of the model.} - -\item{stage}{Required. Target stage of the transition.} - -\item{comment}{User-provided comment on the action.} -} -\description{ -Rejects a model version stage transition request. -} diff --git a/man/modelRegistryRenameModel.Rd b/man/modelRegistryRenameModel.Rd deleted file mode 100644 index 74e3b6d0..00000000 --- a/man/modelRegistryRenameModel.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryRenameModel} -\alias{modelRegistryRenameModel} -\title{Rename a model.} -\usage{ -modelRegistryRenameModel(client, name, new_name = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Registered model unique name identifier.} - -\item{new_name}{If provided, updates the name for this \code{registered_model}.} -} -\description{ -Renames a registered model. -} diff --git a/man/modelRegistrySearchModelVersions.Rd b/man/modelRegistrySearchModelVersions.Rd deleted file mode 100644 index 481157d4..00000000 --- a/man/modelRegistrySearchModelVersions.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistrySearchModelVersions} -\alias{modelRegistrySearchModelVersions} -\title{Searches model versions.} -\usage{ -modelRegistrySearchModelVersions( - client, - filter = NULL, - max_results = NULL, - order_by = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{filter}{String filter condition, like 'name='my-model-name''.} - -\item{max_results}{Maximum number of models desired.} - -\item{order_by}{List of columns to be ordered by including model name, version, stage with an optional 'DESC' or 'ASC' annotation, where 'ASC' is the default.} - -\item{page_token}{Pagination token to go to next page based on previous search query.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Searches for specific model versions based on the supplied \strong{filter}. -} diff --git a/man/modelRegistrySearchModels.Rd b/man/modelRegistrySearchModels.Rd deleted file mode 100644 index c4a1501a..00000000 --- a/man/modelRegistrySearchModels.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistrySearchModels} -\alias{modelRegistrySearchModels} -\title{Search models.} -\usage{ -modelRegistrySearchModels( - client, - filter = NULL, - max_results = NULL, - order_by = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{filter}{String filter condition, like 'name LIKE 'my-model-name''.} - -\item{max_results}{Maximum number of models desired.} - -\item{order_by}{List of columns for ordering search results, which can include model name and last updated timestamp with an optional 'DESC' or 'ASC' annotation, where 'ASC' is the default.} - -\item{page_token}{Pagination token to go to the next page based on a previous search query.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Search for registered models based on the specified \strong{filter}. -} diff --git a/man/modelRegistrySetModelTag.Rd b/man/modelRegistrySetModelTag.Rd deleted file mode 100644 index 714f7e13..00000000 --- a/man/modelRegistrySetModelTag.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistrySetModelTag} -\alias{modelRegistrySetModelTag} -\title{Set a tag.} -\usage{ -modelRegistrySetModelTag(client, name, key, value) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Unique name of the model.} - -\item{key}{Required. Name of the tag.} - -\item{value}{Required. String value of the tag being logged.} -} -\description{ -Sets a tag on a registered model. -} diff --git a/man/modelRegistrySetModelVersionTag.Rd b/man/modelRegistrySetModelVersionTag.Rd deleted file mode 100644 index 83a3637e..00000000 --- a/man/modelRegistrySetModelVersionTag.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistrySetModelVersionTag} -\alias{modelRegistrySetModelVersionTag} -\title{Set a version tag.} -\usage{ -modelRegistrySetModelVersionTag(client, name, version, key, value) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Unique name of the model.} - -\item{version}{Required. Model version number.} - -\item{key}{Required. Name of the tag.} - -\item{value}{Required. String value of the tag being logged.} -} -\description{ -Sets a model version tag. -} diff --git a/man/modelRegistrySetPermissions.Rd b/man/modelRegistrySetPermissions.Rd deleted file mode 100644 index b2d8b329..00000000 --- a/man/modelRegistrySetPermissions.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistrySetPermissions} -\alias{modelRegistrySetPermissions} -\title{Set registered model permissions.} -\usage{ -modelRegistrySetPermissions( - client, - registered_model_id, - access_control_list = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{registered_model_id}{Required. The registered model for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on a registered model. Registered models can inherit -permissions from their root object. -} diff --git a/man/modelRegistryTestRegistryWebhook.Rd b/man/modelRegistryTestRegistryWebhook.Rd deleted file mode 100644 index 5814f6aa..00000000 --- a/man/modelRegistryTestRegistryWebhook.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryTestRegistryWebhook} -\alias{modelRegistryTestRegistryWebhook} -\title{Test a webhook.} -\usage{ -modelRegistryTestRegistryWebhook(client, id, event = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Webhook ID.} - -\item{event}{If \code{event} is specified, the test trigger uses the specified event.} -} -\description{ -\strong{NOTE:} This endpoint is in Public Preview. -} -\details{ -Tests a registry webhook. -} diff --git a/man/modelRegistryTransitionStage.Rd b/man/modelRegistryTransitionStage.Rd deleted file mode 100644 index 962b3fc1..00000000 --- a/man/modelRegistryTransitionStage.Rd +++ /dev/null @@ -1,36 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryTransitionStage} -\alias{modelRegistryTransitionStage} -\title{Transition a stage.} -\usage{ -modelRegistryTransitionStage( - client, - name, - version, - stage, - archive_existing_versions, - comment = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the model.} - -\item{version}{Required. Version of the model.} - -\item{stage}{Required. Target stage of the transition.} - -\item{archive_existing_versions}{Required. Specifies whether to archive all current model versions in the target stage.} - -\item{comment}{User-provided comment on the action.} -} -\description{ -Transition a model version's stage. This is a Databricks workspace version of -the MLflow endpoint that also accepts a comment associated with the -transition to be recorded.', -} -\details{ -MLflow endpoint: https://www.mlflow.org/docs/latest/rest-api.html#transition-modelversion-stage -} diff --git a/man/modelRegistryUpdateComment.Rd b/man/modelRegistryUpdateComment.Rd deleted file mode 100644 index 1ff5299d..00000000 --- a/man/modelRegistryUpdateComment.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryUpdateComment} -\alias{modelRegistryUpdateComment} -\title{Update a comment.} -\usage{ -modelRegistryUpdateComment(client, id, comment) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Unique identifier of an activity.} - -\item{comment}{Required. User-provided comment on the action.} -} -\description{ -Post an edit to a comment on a model version. -} diff --git a/man/modelRegistryUpdateModel.Rd b/man/modelRegistryUpdateModel.Rd deleted file mode 100644 index d4684df0..00000000 --- a/man/modelRegistryUpdateModel.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryUpdateModel} -\alias{modelRegistryUpdateModel} -\title{Update model.} -\usage{ -modelRegistryUpdateModel(client, name, description = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Registered model unique name identifier.} - -\item{description}{If provided, updates the description for this \code{registered_model}.} -} -\description{ -Updates a registered model. -} diff --git a/man/modelRegistryUpdateModelVersion.Rd b/man/modelRegistryUpdateModelVersion.Rd deleted file mode 100644 index 8456c2dd..00000000 --- a/man/modelRegistryUpdateModelVersion.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryUpdateModelVersion} -\alias{modelRegistryUpdateModelVersion} -\title{Update model version.} -\usage{ -modelRegistryUpdateModelVersion(client, name, version, description = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the registered model.} - -\item{version}{Required. Model version number.} - -\item{description}{If provided, updates the description for this \code{registered_model}.} -} -\description{ -Updates the model version. -} diff --git a/man/modelRegistryUpdatePermissions.Rd b/man/modelRegistryUpdatePermissions.Rd deleted file mode 100644 index e732ab36..00000000 --- a/man/modelRegistryUpdatePermissions.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryUpdatePermissions} -\alias{modelRegistryUpdatePermissions} -\title{Update registered model permissions.} -\usage{ -modelRegistryUpdatePermissions( - client, - registered_model_id, - access_control_list = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{registered_model_id}{Required. The registered model for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on a registered model. Registered models can inherit -permissions from their root object. -} diff --git a/man/modelRegistryUpdateWebhook.Rd b/man/modelRegistryUpdateWebhook.Rd deleted file mode 100644 index 36c5bca0..00000000 --- a/man/modelRegistryUpdateWebhook.Rd +++ /dev/null @@ -1,37 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_registry.R -\name{modelRegistryUpdateWebhook} -\alias{modelRegistryUpdateWebhook} -\title{Update a webhook.} -\usage{ -modelRegistryUpdateWebhook( - client, - id, - description = NULL, - events = NULL, - http_url_spec = NULL, - job_spec = NULL, - status = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Webhook ID.} - -\item{description}{User-specified description for the webhook.} - -\item{events}{Events that can trigger a registry webhook: * \code{MODEL_VERSION_CREATED}: A new model version was created for the associated model.} - -\item{http_url_spec}{This field has no description yet.} - -\item{job_spec}{This field has no description yet.} - -\item{status}{Enable or disable triggering the webhook, or put the webhook into test mode.} -} -\description{ -\strong{NOTE:} This endpoint is in Public Preview. -} -\details{ -Updates a registry webhook. -} diff --git a/man/modelVersionsDelete.Rd b/man/modelVersionsDelete.Rd deleted file mode 100644 index 220bb51d..00000000 --- a/man/modelVersionsDelete.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_versions.R -\name{modelVersionsDelete} -\alias{modelVersionsDelete} -\title{Delete a Model Version.} -\usage{ -modelVersionsDelete(client, full_name, version) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. The three-level (fully qualified) name of the model version.} - -\item{version}{Required. The integer version number of the model version.} -} -\description{ -Deletes a model version from the specified registered model. Any aliases -assigned to the model version will also be deleted. -} -\details{ -The caller must be a metastore admin or an owner of the parent registered -model. For the latter case, the caller must also be the owner or have the -\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} -privilege on the parent schema. -} diff --git a/man/modelVersionsGet.Rd b/man/modelVersionsGet.Rd deleted file mode 100644 index 8ccf6afe..00000000 --- a/man/modelVersionsGet.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_versions.R -\name{modelVersionsGet} -\alias{modelVersionsGet} -\title{Get a Model Version.} -\usage{ -modelVersionsGet(client, full_name, version, include_browse = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. The three-level (fully qualified) name of the model version.} - -\item{version}{Required. The integer version number of the model version.} - -\item{include_browse}{Whether to include model versions in the response for which the principal can only access selective metadata for.} -} -\description{ -Get a model version. -} -\details{ -The caller must be a metastore admin or an owner of (or have the \strong{EXECUTE} -privilege on) the parent registered model. For the latter case, the caller -must also be the owner or have the \strong{USE_CATALOG} privilege on the parent -catalog and the \strong{USE_SCHEMA} privilege on the parent schema. -} diff --git a/man/modelVersionsGetByAlias.Rd b/man/modelVersionsGetByAlias.Rd deleted file mode 100644 index b255047c..00000000 --- a/man/modelVersionsGetByAlias.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_versions.R -\name{modelVersionsGetByAlias} -\alias{modelVersionsGetByAlias} -\title{Get Model Version By Alias.} -\usage{ -modelVersionsGetByAlias(client, full_name, alias) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. The three-level (fully qualified) name of the registered model.} - -\item{alias}{Required. The name of the alias.} -} -\description{ -Get a model version by alias. -} -\details{ -The caller must be a metastore admin or an owner of (or have the \strong{EXECUTE} -privilege on) the registered model. For the latter case, the caller must also -be the owner or have the \strong{USE_CATALOG} privilege on the parent catalog and -the \strong{USE_SCHEMA} privilege on the parent schema. -} diff --git a/man/modelVersionsList.Rd b/man/modelVersionsList.Rd deleted file mode 100644 index 9f5ab17f..00000000 --- a/man/modelVersionsList.Rd +++ /dev/null @@ -1,43 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_versions.R -\name{modelVersionsList} -\alias{modelVersionsList} -\title{List Model Versions.} -\usage{ -modelVersionsList( - client, - full_name, - include_browse = NULL, - max_results = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. The full three-level name of the registered model under which to list model versions.} - -\item{include_browse}{Whether to include model versions in the response for which the principal can only access selective metadata for.} - -\item{max_results}{Maximum number of model versions to return.} - -\item{page_token}{Opaque pagination token to go to next page based on previous query.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -List model versions. You can list model versions under a particular schema, -or list all model versions in the current metastore. -} -\details{ -The returned models are filtered based on the privileges of the calling user. -For example, the metastore admin is able to list all the model versions. A -regular user needs to be the owner or have the \strong{EXECUTE} privilege on the -parent registered model to recieve the model versions in the response. For -the latter case, the caller must also be the owner or have the -\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} -privilege on the parent schema. - -There is no guarantee of a specific ordering of the elements in the response. -} diff --git a/man/modelVersionsUpdate.Rd b/man/modelVersionsUpdate.Rd deleted file mode 100644 index ca5994e8..00000000 --- a/man/modelVersionsUpdate.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/model_versions.R -\name{modelVersionsUpdate} -\alias{modelVersionsUpdate} -\title{Update a Model Version.} -\usage{ -modelVersionsUpdate(client, full_name, version, comment = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. The three-level (fully qualified) name of the model version.} - -\item{version}{Required. The integer version number of the model version.} - -\item{comment}{The comment attached to the model version.} -} -\description{ -Updates the specified model version. -} -\details{ -The caller must be a metastore admin or an owner of the parent registered -model. For the latter case, the caller must also be the owner or have the -\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} -privilege on the parent schema. - -Currently only the comment of the model version can be updated. -} diff --git a/man/move_dbfs.Rd b/man/move_dbfs.Rd new file mode 100644 index 00000000..b8db9811 --- /dev/null +++ b/man/move_dbfs.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbfs.R +\name{move_dbfs} +\alias{move_dbfs} +\alias{dbfsMove} +\title{Move a file.} +\usage{ +move_dbfs(client, source_path, destination_path) + +dbfsMove(client, source_path, destination_path) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{source_path}{Required. The source path of the file or directory.} + +\item{destination_path}{Required. The destination path of the file or directory.} +} +\description{ +Moves a file from one location to another location within DBFS. If the source +file does not exist, this call throws an exception with +\code{RESOURCE_DOES_NOT_EXIST}. If a file already exists in the destination path, +this call throws an exception with \code{RESOURCE_ALREADY_EXISTS}. If the given +source path is a directory, this call always recursively moves all files. +} diff --git a/man/onlineTablesCreate.Rd b/man/onlineTablesCreate.Rd deleted file mode 100644 index 45d08432..00000000 --- a/man/onlineTablesCreate.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/online_tables.R -\name{onlineTablesCreate} -\alias{onlineTablesCreate} -\title{Create an Online Table.} -\usage{ -onlineTablesCreate(client, name = NULL, spec = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Full three-part (catalog, schema, table) name of the table.} - -\item{spec}{Specification of the online table.} -} -\description{ -Create a new Online Table. -} diff --git a/man/onlineTablesDelete.Rd b/man/onlineTablesDelete.Rd deleted file mode 100644 index 1f94278b..00000000 --- a/man/onlineTablesDelete.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/online_tables.R -\name{onlineTablesDelete} -\alias{onlineTablesDelete} -\title{Delete an Online Table.} -\usage{ -onlineTablesDelete(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Full three-part (catalog, schema, table) name of the table.} -} -\description{ -Delete an online table. Warning: This will delete all the data in the online -table. If the source Delta table was deleted or modified since this Online -Table was created, this will lose the data forever! -} diff --git a/man/onlineTablesGet.Rd b/man/onlineTablesGet.Rd deleted file mode 100644 index d77afce8..00000000 --- a/man/onlineTablesGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/online_tables.R -\name{onlineTablesGet} -\alias{onlineTablesGet} -\title{Get an Online Table.} -\usage{ -onlineTablesGet(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Full three-part (catalog, schema, table) name of the table.} -} -\description{ -Get information about an existing online table and its status. -} diff --git a/man/patch_group.Rd b/man/patch_group.Rd new file mode 100644 index 00000000..f7bb129d --- /dev/null +++ b/man/patch_group.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/groups.R +\name{patch_group} +\alias{patch_group} +\alias{groupsPatch} +\title{Update group details.} +\usage{ +patch_group(client, id, operations = NULL, schemas = NULL) + +groupsPatch(client, id, operations = NULL, schemas = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Unique ID for a group in the Databricks workspace.} + +\item{operations}{This field has no description yet.} + +\item{schemas}{The schema of the patch request.} +} +\description{ +Partially updates the details of a group. +} diff --git a/man/patch_service_principal.Rd b/man/patch_service_principal.Rd new file mode 100644 index 00000000..b659bf43 --- /dev/null +++ b/man/patch_service_principal.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/service_principals.R +\name{patch_service_principal} +\alias{patch_service_principal} +\alias{servicePrincipalsPatch} +\title{Update service principal details.} +\usage{ +patch_service_principal(client, id, operations = NULL, schemas = NULL) + +servicePrincipalsPatch(client, id, operations = NULL, schemas = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Unique ID for a service principal in the Databricks workspace.} + +\item{operations}{This field has no description yet.} + +\item{schemas}{The schema of the patch request.} +} +\description{ +Partially updates the details of a single service principal in the Databricks +workspace. +} diff --git a/man/patch_serving_endpoint.Rd b/man/patch_serving_endpoint.Rd new file mode 100644 index 00000000..1f506adc --- /dev/null +++ b/man/patch_serving_endpoint.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{patch_serving_endpoint} +\alias{patch_serving_endpoint} +\alias{servingEndpointsPatch} +\title{Update tags of a serving endpoint.} +\usage{ +patch_serving_endpoint(client, name, add_tags = NULL, delete_tags = NULL) + +servingEndpointsPatch(client, name, add_tags = NULL, delete_tags = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint who's tags to patch.} + +\item{add_tags}{List of endpoint tags to add.} + +\item{delete_tags}{List of tag keys to delete.} +} +\description{ +Used to batch add and delete tags from a serving endpoint with a single API +call. +} diff --git a/man/patch_user.Rd b/man/patch_user.Rd new file mode 100644 index 00000000..367b87a8 --- /dev/null +++ b/man/patch_user.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/users.R +\name{patch_user} +\alias{patch_user} +\alias{usersPatch} +\title{Update user details.} +\usage{ +patch_user(client, id, operations = NULL, schemas = NULL) + +usersPatch(client, id, operations = NULL, schemas = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Unique ID for a user in the Databricks workspace.} + +\item{operations}{This field has no description yet.} + +\item{schemas}{The schema of the patch request.} +} +\description{ +Partially updates a user resource by applying the supplied operations on +specific user attributes. +} diff --git a/man/permanent_cluster_delete.Rd b/man/permanent_cluster_delete.Rd new file mode 100644 index 00000000..573a77d7 --- /dev/null +++ b/man/permanent_cluster_delete.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{permanent_cluster_delete} +\alias{permanent_cluster_delete} +\alias{clustersPermanentDelete} +\title{Permanently delete cluster.} +\usage{ +permanent_cluster_delete(client, cluster_id) + +clustersPermanentDelete(client, cluster_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster to be deleted.} +} +\description{ +Permanently deletes a Spark cluster. This cluster is terminated and resources +are asynchronously removed. +} +\details{ +In addition, users will no longer see permanently deleted clusters in the +cluster list, and API users can no longer perform any action on permanently +deleted clusters. +} diff --git a/man/permissionMigrationMigratePermissions.Rd b/man/permissionMigrationMigratePermissions.Rd deleted file mode 100644 index c15eb9a4..00000000 --- a/man/permissionMigrationMigratePermissions.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/permission_migration.R -\name{permissionMigrationMigratePermissions} -\alias{permissionMigrationMigratePermissions} -\title{Migrate Permissions.} -\usage{ -permissionMigrationMigratePermissions( - client, - workspace_id, - from_workspace_group_name, - to_account_group_name, - size = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{workspace_id}{Required. WorkspaceId of the associated workspace where the permission migration will occur.} - -\item{from_workspace_group_name}{Required. The name of the workspace group that permissions will be migrated from.} - -\item{to_account_group_name}{Required. The name of the account group that permissions will be migrated to.} - -\item{size}{The maximum number of permissions that will be migrated.} -} -\description{ -Migrate a batch of permissions from a workspace local group to an account -group. -} diff --git a/man/permissionsGet.Rd b/man/permissionsGet.Rd deleted file mode 100644 index 6d03e186..00000000 --- a/man/permissionsGet.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/permissions.R -\name{permissionsGet} -\alias{permissionsGet} -\title{Get object permissions.} -\usage{ -permissionsGet(client, request_object_type, request_object_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{request_object_type}{Required. The type of the request object.} - -\item{request_object_id}{Required. The id of the request object.} -} -\description{ -Gets the permissions of an object. Objects can inherit permissions from their -parent objects or root object. -} diff --git a/man/permissionsGetPermissionLevels.Rd b/man/permissionsGetPermissionLevels.Rd deleted file mode 100644 index 36719300..00000000 --- a/man/permissionsGetPermissionLevels.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/permissions.R -\name{permissionsGetPermissionLevels} -\alias{permissionsGetPermissionLevels} -\title{Get object permission levels.} -\usage{ -permissionsGetPermissionLevels(client, request_object_type, request_object_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{request_object_type}{Required. \if{html}{\out{}}.} - -\item{request_object_id}{Required. \if{html}{\out{}}.} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/permissionsSet.Rd b/man/permissionsSet.Rd deleted file mode 100644 index ca9023d5..00000000 --- a/man/permissionsSet.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/permissions.R -\name{permissionsSet} -\alias{permissionsSet} -\title{Set object permissions.} -\usage{ -permissionsSet( - client, - request_object_type, - request_object_id, - access_control_list = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{request_object_type}{Required. The type of the request object.} - -\item{request_object_id}{Required. The id of the request object.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on an object. Objects can inherit permissions from their -parent objects or root object. -} diff --git a/man/permissionsUpdate.Rd b/man/permissionsUpdate.Rd deleted file mode 100644 index 99fc0062..00000000 --- a/man/permissionsUpdate.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/permissions.R -\name{permissionsUpdate} -\alias{permissionsUpdate} -\title{Update object permissions.} -\usage{ -permissionsUpdate( - client, - request_object_type, - request_object_id, - access_control_list = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{request_object_type}{Required. The type of the request object.} - -\item{request_object_id}{Required. The id of the request object.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on an object. Objects can inherit permissions from -their parent objects or root object. -} diff --git a/man/pin_cluster.Rd b/man/pin_cluster.Rd new file mode 100644 index 00000000..19c397eb --- /dev/null +++ b/man/pin_cluster.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{pin_cluster} +\alias{pin_cluster} +\alias{clustersPin} +\title{Pin cluster.} +\usage{ +pin_cluster(client, cluster_id) + +clustersPin(client, cluster_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. \if{html}{\out{}}.} +} +\description{ +Pinning a cluster ensures that the cluster will always be returned by the +ListClusters API. Pinning a cluster that is already pinned will have no +effect. This API can only be called by workspace admins. +} diff --git a/man/pipelinesCreate.Rd b/man/pipelinesCreate.Rd deleted file mode 100644 index 5923aadb..00000000 --- a/man/pipelinesCreate.Rd +++ /dev/null @@ -1,74 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesCreate} -\alias{pipelinesCreate} -\title{Create a pipeline.} -\usage{ -pipelinesCreate( - client, - allow_duplicate_names = NULL, - catalog = NULL, - channel = NULL, - clusters = NULL, - configuration = NULL, - continuous = NULL, - development = NULL, - dry_run = NULL, - edition = NULL, - filters = NULL, - id = NULL, - libraries = NULL, - name = NULL, - notifications = NULL, - photon = NULL, - serverless = NULL, - storage = NULL, - target = NULL, - trigger = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{allow_duplicate_names}{If false, deployment will fail if name conflicts with that of another pipeline.} - -\item{catalog}{A catalog in Unity Catalog to publish data from this pipeline to.} - -\item{channel}{DLT Release Channel that specifies which version to use.} - -\item{clusters}{Cluster settings for this pipeline deployment.} - -\item{configuration}{String-String configuration for this pipeline execution.} - -\item{continuous}{Whether the pipeline is continuous or triggered.} - -\item{development}{Whether the pipeline is in Development mode.} - -\item{dry_run}{This field has no description yet.} - -\item{edition}{Pipeline product edition.} - -\item{filters}{Filters on which Pipeline packages to include in the deployed graph.} - -\item{id}{Unique identifier for this pipeline.} - -\item{libraries}{Libraries or code needed by this deployment.} - -\item{name}{Friendly identifier for this pipeline.} - -\item{notifications}{List of notification settings for this pipeline.} - -\item{photon}{Whether Photon is enabled for this pipeline.} - -\item{serverless}{Whether serverless compute is enabled for this pipeline.} - -\item{storage}{DBFS root directory for storing checkpoints and tables.} - -\item{target}{Target schema (database) to add tables in this pipeline to.} - -\item{trigger}{Which pipeline trigger to use.} -} -\description{ -Creates a new data processing pipeline based on the requested configuration. -If successful, this method returns the ID of the new pipeline. -} diff --git a/man/pipelinesDelete.Rd b/man/pipelinesDelete.Rd deleted file mode 100644 index d65d11b3..00000000 --- a/man/pipelinesDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesDelete} -\alias{pipelinesDelete} -\title{Delete a pipeline.} -\usage{ -pipelinesDelete(client, pipeline_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{pipeline_id}{Required. This field has no description yet.} -} -\description{ -Deletes a pipeline. -} diff --git a/man/pipelinesGet.Rd b/man/pipelinesGet.Rd deleted file mode 100644 index 1919e34d..00000000 --- a/man/pipelinesGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesGet} -\alias{pipelinesGet} -\title{Get a pipeline.} -\usage{ -pipelinesGet(client, pipeline_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{pipeline_id}{Required. This field has no description yet.} -} -\description{ -Get a pipeline. -} diff --git a/man/pipelinesGetPermissionLevels.Rd b/man/pipelinesGetPermissionLevels.Rd deleted file mode 100644 index a472ff6f..00000000 --- a/man/pipelinesGetPermissionLevels.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesGetPermissionLevels} -\alias{pipelinesGetPermissionLevels} -\title{Get pipeline permission levels.} -\usage{ -pipelinesGetPermissionLevels(client, pipeline_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{pipeline_id}{Required. The pipeline for which to get or manage permissions.} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/pipelinesGetPermissions.Rd b/man/pipelinesGetPermissions.Rd deleted file mode 100644 index 66045157..00000000 --- a/man/pipelinesGetPermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesGetPermissions} -\alias{pipelinesGetPermissions} -\title{Get pipeline permissions.} -\usage{ -pipelinesGetPermissions(client, pipeline_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{pipeline_id}{Required. The pipeline for which to get or manage permissions.} -} -\description{ -Gets the permissions of a pipeline. Pipelines can inherit permissions from -their root object. -} diff --git a/man/pipelinesGetUpdate.Rd b/man/pipelinesGetUpdate.Rd deleted file mode 100644 index da63b379..00000000 --- a/man/pipelinesGetUpdate.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesGetUpdate} -\alias{pipelinesGetUpdate} -\title{Get a pipeline update.} -\usage{ -pipelinesGetUpdate(client, pipeline_id, update_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{pipeline_id}{Required. The ID of the pipeline.} - -\item{update_id}{Required. The ID of the update.} -} -\description{ -Gets an update from an active pipeline. -} diff --git a/man/pipelinesListPipelineEvents.Rd b/man/pipelinesListPipelineEvents.Rd deleted file mode 100644 index 057d027f..00000000 --- a/man/pipelinesListPipelineEvents.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesListPipelineEvents} -\alias{pipelinesListPipelineEvents} -\title{List pipeline events.} -\usage{ -pipelinesListPipelineEvents( - client, - pipeline_id, - filter = NULL, - max_results = NULL, - order_by = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{pipeline_id}{Required. This field has no description yet.} - -\item{filter}{Criteria to select a subset of results, expressed using a SQL-like syntax.} - -\item{max_results}{Max number of entries to return in a single page.} - -\item{order_by}{A string indicating a sort order by timestamp for the results, for example, timestamp asc.} - -\item{page_token}{Page token returned by previous call.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Retrieves events for a pipeline. -} diff --git a/man/pipelinesListPipelines.Rd b/man/pipelinesListPipelines.Rd deleted file mode 100644 index 417f3fc5..00000000 --- a/man/pipelinesListPipelines.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesListPipelines} -\alias{pipelinesListPipelines} -\title{List pipelines.} -\usage{ -pipelinesListPipelines( - client, - filter = NULL, - max_results = NULL, - order_by = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{filter}{Select a subset of results based on the specified criteria.} - -\item{max_results}{The maximum number of entries to return in a single page.} - -\item{order_by}{A list of strings specifying the order of results.} - -\item{page_token}{Page token returned by previous call.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Lists pipelines defined in the Delta Live Tables system. -} diff --git a/man/pipelinesListUpdates.Rd b/man/pipelinesListUpdates.Rd deleted file mode 100644 index 9e4fb443..00000000 --- a/man/pipelinesListUpdates.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesListUpdates} -\alias{pipelinesListUpdates} -\title{List pipeline updates.} -\usage{ -pipelinesListUpdates( - client, - pipeline_id, - max_results = NULL, - page_token = NULL, - until_update_id = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{pipeline_id}{Required. The pipeline to return updates for.} - -\item{max_results}{Max number of entries to return in a single page.} - -\item{page_token}{Page token returned by previous call.} - -\item{until_update_id}{If present, returns updates until and including this update_id.} -} -\description{ -List updates for an active pipeline. -} diff --git a/man/pipelinesSetPermissions.Rd b/man/pipelinesSetPermissions.Rd deleted file mode 100644 index 4fd9ee3b..00000000 --- a/man/pipelinesSetPermissions.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesSetPermissions} -\alias{pipelinesSetPermissions} -\title{Set pipeline permissions.} -\usage{ -pipelinesSetPermissions(client, pipeline_id, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{pipeline_id}{Required. The pipeline for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on a pipeline. Pipelines can inherit permissions from their -root object. -} diff --git a/man/pipelinesStartUpdate.Rd b/man/pipelinesStartUpdate.Rd deleted file mode 100644 index 13090e27..00000000 --- a/man/pipelinesStartUpdate.Rd +++ /dev/null @@ -1,36 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesStartUpdate} -\alias{pipelinesStartUpdate} -\title{Start a pipeline.} -\usage{ -pipelinesStartUpdate( - client, - pipeline_id, - cause = NULL, - full_refresh = NULL, - full_refresh_selection = NULL, - refresh_selection = NULL, - validate_only = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{pipeline_id}{Required. This field has no description yet.} - -\item{cause}{This field has no description yet.} - -\item{full_refresh}{If true, this update will reset all tables before running.} - -\item{full_refresh_selection}{A list of tables to update with fullRefresh.} - -\item{refresh_selection}{A list of tables to update without fullRefresh.} - -\item{validate_only}{If true, this update only validates the correctness of pipeline source code but does not materialize or publish any datasets.} -} -\description{ -Starts a new update for the pipeline. If there is already an active update -for the pipeline, the request will fail and the active update will remain -running. -} diff --git a/man/pipelinesStop.Rd b/man/pipelinesStop.Rd deleted file mode 100644 index a1595b35..00000000 --- a/man/pipelinesStop.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesStop} -\alias{pipelinesStop} -\title{Stop a pipeline.} -\usage{ -pipelinesStop(client, pipeline_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{pipeline_id}{Required. This field has no description yet.} -} -\description{ -Stops the pipeline by canceling the active update. If there is no active -update for the pipeline, this request is a no-op. -} diff --git a/man/pipelinesStopAndWait.Rd b/man/pipelinesStopAndWait.Rd deleted file mode 100644 index 440d7c5a..00000000 --- a/man/pipelinesStopAndWait.Rd +++ /dev/null @@ -1,32 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesStopAndWait} -\alias{pipelinesStopAndWait} -\title{Stop a pipeline.} -\usage{ -pipelinesStopAndWait( - client, - pipeline_id, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{pipeline_id}{Required. This field has no description yet.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Pipelines on Databricks reach -IDLE state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Pipelines is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Stops the pipeline by canceling the active update. If there is no active -update for the pipeline, this request is a no-op. -} diff --git a/man/pipelinesUpdate.Rd b/man/pipelinesUpdate.Rd deleted file mode 100644 index 14912909..00000000 --- a/man/pipelinesUpdate.Rd +++ /dev/null @@ -1,76 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesUpdate} -\alias{pipelinesUpdate} -\title{Edit a pipeline.} -\usage{ -pipelinesUpdate( - client, - pipeline_id, - allow_duplicate_names = NULL, - catalog = NULL, - channel = NULL, - clusters = NULL, - configuration = NULL, - continuous = NULL, - development = NULL, - edition = NULL, - expected_last_modified = NULL, - filters = NULL, - id = NULL, - libraries = NULL, - name = NULL, - notifications = NULL, - photon = NULL, - serverless = NULL, - storage = NULL, - target = NULL, - trigger = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{pipeline_id}{Unique identifier for this pipeline.} - -\item{allow_duplicate_names}{If false, deployment will fail if name has changed and conflicts the name of another pipeline.} - -\item{catalog}{A catalog in Unity Catalog to publish data from this pipeline to.} - -\item{channel}{DLT Release Channel that specifies which version to use.} - -\item{clusters}{Cluster settings for this pipeline deployment.} - -\item{configuration}{String-String configuration for this pipeline execution.} - -\item{continuous}{Whether the pipeline is continuous or triggered.} - -\item{development}{Whether the pipeline is in Development mode.} - -\item{edition}{Pipeline product edition.} - -\item{expected_last_modified}{If present, the last-modified time of the pipeline settings before the edit.} - -\item{filters}{Filters on which Pipeline packages to include in the deployed graph.} - -\item{id}{Unique identifier for this pipeline.} - -\item{libraries}{Libraries or code needed by this deployment.} - -\item{name}{Friendly identifier for this pipeline.} - -\item{notifications}{List of notification settings for this pipeline.} - -\item{photon}{Whether Photon is enabled for this pipeline.} - -\item{serverless}{Whether serverless compute is enabled for this pipeline.} - -\item{storage}{DBFS root directory for storing checkpoints and tables.} - -\item{target}{Target schema (database) to add tables in this pipeline to.} - -\item{trigger}{Which pipeline trigger to use.} -} -\description{ -Updates a pipeline with the supplied configuration. -} diff --git a/man/pipelinesUpdatePermissions.Rd b/man/pipelinesUpdatePermissions.Rd deleted file mode 100644 index 26ab8a0c..00000000 --- a/man/pipelinesUpdatePermissions.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/pipelines.R -\name{pipelinesUpdatePermissions} -\alias{pipelinesUpdatePermissions} -\title{Update pipeline permissions.} -\usage{ -pipelinesUpdatePermissions(client, pipeline_id, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{pipeline_id}{Required. The pipeline for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on a pipeline. Pipelines can inherit permissions from -their root object. -} diff --git a/man/policyFamiliesGet.Rd b/man/policyFamiliesGet.Rd deleted file mode 100644 index 30a09b43..00000000 --- a/man/policyFamiliesGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/policy_families.R -\name{policyFamiliesGet} -\alias{policyFamiliesGet} -\title{Get policy family information.} -\usage{ -policyFamiliesGet(client, policy_family_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{policy_family_id}{Required. This field has no description yet.} -} -\description{ -Retrieve the information for an policy family based on its identifier. -} diff --git a/man/policyFamiliesList.Rd b/man/policyFamiliesList.Rd deleted file mode 100644 index 7e2d771d..00000000 --- a/man/policyFamiliesList.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/policy_families.R -\name{policyFamiliesList} -\alias{policyFamiliesList} -\title{List policy families.} -\usage{ -policyFamiliesList(client, max_results = NULL, page_token = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{max_results}{The max number of policy families to return.} - -\item{page_token}{A token that can be used to get the next page of results.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Retrieve a list of policy families. This API is paginated. -} diff --git a/man/providersCreate.Rd b/man/providersCreate.Rd deleted file mode 100644 index d897cc7c..00000000 --- a/man/providersCreate.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/providers.R -\name{providersCreate} -\alias{providersCreate} -\title{Create an auth provider.} -\usage{ -providersCreate( - client, - name, - authentication_type, - comment = NULL, - recipient_profile_str = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the Provider.} - -\item{authentication_type}{Required. The delta sharing authentication type.} - -\item{comment}{Description about the provider.} - -\item{recipient_profile_str}{This field is required when the \strong{authentication_type} is \strong{TOKEN} or not provided.} -} -\description{ -Creates a new authentication provider minimally based on a name and -authentication type. The caller must be an admin on the metastore. -} diff --git a/man/providersDelete.Rd b/man/providersDelete.Rd deleted file mode 100644 index f31984ff..00000000 --- a/man/providersDelete.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/providers.R -\name{providersDelete} -\alias{providersDelete} -\title{Delete a provider.} -\usage{ -providersDelete(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the provider.} -} -\description{ -Deletes an authentication provider, if the caller is a metastore admin or is -the owner of the provider. -} diff --git a/man/providersGet.Rd b/man/providersGet.Rd deleted file mode 100644 index ffb69a1a..00000000 --- a/man/providersGet.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/providers.R -\name{providersGet} -\alias{providersGet} -\title{Get a provider.} -\usage{ -providersGet(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the provider.} -} -\description{ -Gets a specific authentication provider. The caller must supply the name of -the provider, and must either be a metastore admin or the owner of the -provider. -} diff --git a/man/providersList.Rd b/man/providersList.Rd deleted file mode 100644 index e80e22c2..00000000 --- a/man/providersList.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/providers.R -\name{providersList} -\alias{providersList} -\title{List providers.} -\usage{ -providersList(client, data_provider_global_metastore_id = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{data_provider_global_metastore_id}{If not provided, all providers will be returned.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of available authentication providers. The caller must either -be a metastore admin or the owner of the providers. Providers not owned by -the caller are not included in the response. There is no guarantee of a -specific ordering of the elements in the array. -} diff --git a/man/providersListShares.Rd b/man/providersListShares.Rd deleted file mode 100644 index ffb9a6df..00000000 --- a/man/providersListShares.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/providers.R -\name{providersListShares} -\alias{providersListShares} -\title{List shares by Provider.} -\usage{ -providersListShares(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the provider in which to list shares.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of a specified provider's shares within the metastore where: -} -\details{ -\itemize{ -\item the caller is a metastore admin, or * the caller is the owner. -} -} diff --git a/man/providersUpdate.Rd b/man/providersUpdate.Rd deleted file mode 100644 index 041a2fb0..00000000 --- a/man/providersUpdate.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/providers.R -\name{providersUpdate} -\alias{providersUpdate} -\title{Update a provider.} -\usage{ -providersUpdate( - client, - name, - comment = NULL, - new_name = NULL, - owner = NULL, - recipient_profile_str = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the provider.} - -\item{comment}{Description about the provider.} - -\item{new_name}{New name for the provider.} - -\item{owner}{Username of Provider owner.} - -\item{recipient_profile_str}{This field is required when the \strong{authentication_type} is \strong{TOKEN} or not provided.} -} -\description{ -Updates the information for an authentication provider, if the caller is a -metastore admin or is the owner of the provider. If the update changes the -provider name, the caller must be both a metastore admin and the owner of the -provider. -} diff --git a/man/publish_lakeview.Rd b/man/publish_lakeview.Rd new file mode 100644 index 00000000..b78be7f0 --- /dev/null +++ b/man/publish_lakeview.Rd @@ -0,0 +1,33 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakeview.R +\name{publish_lakeview} +\alias{publish_lakeview} +\alias{lakeviewPublish} +\title{Publish dashboard.} +\usage{ +publish_lakeview( + client, + dashboard_id, + embed_credentials = NULL, + warehouse_id = NULL +) + +lakeviewPublish( + client, + dashboard_id, + embed_credentials = NULL, + warehouse_id = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{dashboard_id}{Required. UUID identifying the dashboard to be published.} + +\item{embed_credentials}{Flag to indicate if the publisher's credentials should be embedded in the published dashboard.} + +\item{warehouse_id}{The ID of the warehouse that can be used to override the warehouse which was set in the draft.} +} +\description{ +Publish the current draft dashboard. +} diff --git a/man/put_dbfs.Rd b/man/put_dbfs.Rd new file mode 100644 index 00000000..bb89cadf --- /dev/null +++ b/man/put_dbfs.Rd @@ -0,0 +1,35 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbfs.R +\name{put_dbfs} +\alias{put_dbfs} +\alias{dbfsPut} +\title{Upload a file.} +\usage{ +put_dbfs(client, path, contents = NULL, overwrite = NULL) + +dbfsPut(client, path, contents = NULL, overwrite = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{path}{Required. The path of the new file.} + +\item{contents}{This parameter might be absent, and instead a posted file will be used.} + +\item{overwrite}{The flag that specifies whether to overwrite existing file/files.} +} +\description{ +Uploads a file through the use of multipart form post. It is mainly used for +streaming uploads, but can also be used as a convenient single call for data +upload. +} +\details{ +Alternatively you can pass contents as base64 string. + +The amount of data that can be passed (when not streaming) using the +\strong{contents} parameter is limited to 1 MB. \code{MAX_BLOCK_SIZE_EXCEEDED} will be +thrown if this limit is exceeded. + +If you want to upload large files, use the streaming upload. For details, see +:method:dbfs/create, :method:dbfs/addBlock, :method:dbfs/close. +} diff --git a/man/put_secret.Rd b/man/put_secret.Rd new file mode 100644 index 00000000..8103e7a9 --- /dev/null +++ b/man/put_secret.Rd @@ -0,0 +1,44 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/secrets.R +\name{put_secret} +\alias{put_secret} +\alias{secretsPutSecret} +\title{Add a secret.} +\usage{ +put_secret(client, scope, key, bytes_value = NULL, string_value = NULL) + +secretsPutSecret(client, scope, key, bytes_value = NULL, string_value = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{scope}{Required. The name of the scope to which the secret will be associated with.} + +\item{key}{Required. A unique name to identify the secret.} + +\item{bytes_value}{If specified, value will be stored as bytes.} + +\item{string_value}{If specified, note that the value will be stored in UTF-8 (MB4) form.} +} +\description{ +Inserts a secret under the provided scope with the given name. If a secret +already exists with the same name, this command overwrites the existing +secret's value. The server encrypts the secret using the secret scope's +encryption settings before storing it. +} +\details{ +You must have \code{WRITE} or \code{MANAGE} permission on the secret scope. The secret +key must consist of alphanumeric characters, dashes, underscores, and +periods, and cannot exceed 128 characters. The maximum allowed secret value +size is 128 KB. The maximum number of secrets in a given scope is 1000. + +The input fields 'string_value' or 'bytes_value' specify the type of the +secret, which will determine the value returned when the secret value is +requested. Exactly one must be specified. + +Throws \code{RESOURCE_DOES_NOT_EXIST} if no such secret scope exists. Throws +\code{RESOURCE_LIMIT_EXCEEDED} if maximum number of secrets in scope is exceeded. +Throws \code{INVALID_PARAMETER_VALUE} if the key name or value length is invalid. +Throws \code{PERMISSION_DENIED} if the user does not have permission to make this +API call. +} diff --git a/man/put_secret_acl.Rd b/man/put_secret_acl.Rd new file mode 100644 index 00000000..079b36b2 --- /dev/null +++ b/man/put_secret_acl.Rd @@ -0,0 +1,50 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/secrets.R +\name{put_secret_acl} +\alias{put_secret_acl} +\alias{secretsPutAcl} +\title{Create/update an ACL.} +\usage{ +put_secret_acl(client, scope, principal, permission) + +secretsPutAcl(client, scope, principal, permission) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{scope}{Required. The name of the scope to apply permissions to.} + +\item{principal}{Required. The principal in which the permission is applied.} + +\item{permission}{Required. The permission level applied to the principal.} +} +\description{ +Creates or overwrites the Access Control List (ACL) associated with the given +principal (user or group) on the specified scope point. +} +\details{ +In general, a user or group will use the most powerful permission available +to them, and permissions are ordered as follows: +\itemize{ +\item \code{MANAGE} - Allowed to change ACLs, and read and write to this secret scope. +\item \code{WRITE} - Allowed to read and write to this secret scope. * \code{READ} - +Allowed to read this secret scope and list what secrets are available. +} + +Note that in general, secret values can only be read from within a command on +a cluster (for example, through a notebook). There is no API to read the +actual secret value material outside of a cluster. However, the user's +permission will be applied based on who is executing the command, and they +must have at least READ permission. + +Users must have the \code{MANAGE} permission to invoke this API. + +The principal is a user or group name corresponding to an existing Databricks +principal to be granted or revoked access. + +Throws \code{RESOURCE_DOES_NOT_EXIST} if no such secret scope exists. Throws +\code{RESOURCE_ALREADY_EXISTS} if a permission for the principal already exists. +Throws \code{INVALID_PARAMETER_VALUE} if the permission or principal is invalid. +Throws \code{PERMISSION_DENIED} if the user does not have permission to make this +API call. +} diff --git a/man/put_serving_endpoint.Rd b/man/put_serving_endpoint.Rd new file mode 100644 index 00000000..dff3ca28 --- /dev/null +++ b/man/put_serving_endpoint.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{put_serving_endpoint} +\alias{put_serving_endpoint} +\alias{servingEndpointsPut} +\title{Update rate limits of a serving endpoint.} +\usage{ +put_serving_endpoint(client, name, rate_limits = NULL) + +servingEndpointsPut(client, name, rate_limits = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint whose rate limits are being updated.} + +\item{rate_limits}{The list of endpoint rate limits.} +} +\description{ +Used to update the rate limits of a serving endpoint. NOTE: only external and +foundation model endpoints are supported as of now. +} diff --git a/man/queriesCreate.Rd b/man/queriesCreate.Rd deleted file mode 100644 index b43a468b..00000000 --- a/man/queriesCreate.Rd +++ /dev/null @@ -1,46 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/queries.R -\name{queriesCreate} -\alias{queriesCreate} -\title{Create a new query definition.} -\usage{ -queriesCreate( - client, - data_source_id = NULL, - description = NULL, - name = NULL, - options = NULL, - parent = NULL, - query = NULL, - run_as_role = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{data_source_id}{Data source ID maps to the ID of the data source used by the resource and is distinct from the warehouse ID.} - -\item{description}{General description that conveys additional information about this query such as usage notes.} - -\item{name}{The title of this query that appears in list views, widget headings, and on the query page.} - -\item{options}{Exclusively used for storing a list parameter definitions.} - -\item{parent}{The identifier of the workspace folder containing the object.} - -\item{query}{The text of the query to be run.} - -\item{run_as_role}{Sets the \strong{Run as} role for the object.} -} -\description{ -Creates a new query definition. Queries created with this endpoint belong to -the authenticated user making the request. -} -\details{ -The \code{data_source_id} field specifies the ID of the SQL warehouse to run this -query against. You can use the Data Sources API to see a complete list of -available SQL warehouses. Or you can copy the \code{data_source_id} from an -existing query. - -\strong{Note}: You cannot add a visualization until you create the query. -} diff --git a/man/queriesDelete.Rd b/man/queriesDelete.Rd deleted file mode 100644 index af12d62f..00000000 --- a/man/queriesDelete.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/queries.R -\name{queriesDelete} -\alias{queriesDelete} -\title{Delete a query.} -\usage{ -queriesDelete(client, query_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{query_id}{Required. This field has no description yet.} -} -\description{ -Moves a query to the trash. Trashed queries immediately disappear from -searches and list views, and they cannot be used for alerts. The trash is -deleted after 30 days. -} diff --git a/man/queriesGet.Rd b/man/queriesGet.Rd deleted file mode 100644 index 511cd3c6..00000000 --- a/man/queriesGet.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/queries.R -\name{queriesGet} -\alias{queriesGet} -\title{Get a query definition.} -\usage{ -queriesGet(client, query_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{query_id}{Required. This field has no description yet.} -} -\description{ -Retrieve a query object definition along with contextual permissions -information about the currently authenticated user. -} diff --git a/man/queriesList.Rd b/man/queriesList.Rd deleted file mode 100644 index 1ee61763..00000000 --- a/man/queriesList.Rd +++ /dev/null @@ -1,32 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/queries.R -\name{queriesList} -\alias{queriesList} -\title{Get a list of queries.} -\usage{ -queriesList(client, order = NULL, page = NULL, page_size = NULL, q = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{order}{Name of query attribute to order by.} - -\item{page}{Page number to retrieve.} - -\item{page_size}{Number of queries to return per page.} - -\item{q}{Full text search term.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets a list of queries. Optionally, this list can be filtered by a search -term. -} -\details{ -\subsection{**Warning: Calling this API concurrently 10 or more times could result in}{ - -throttling, service degradation, or a temporary ban.** -} -} diff --git a/man/queriesRestore.Rd b/man/queriesRestore.Rd deleted file mode 100644 index 748b9e4d..00000000 --- a/man/queriesRestore.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/queries.R -\name{queriesRestore} -\alias{queriesRestore} -\title{Restore a query.} -\usage{ -queriesRestore(client, query_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{query_id}{Required. This field has no description yet.} -} -\description{ -Restore a query that has been moved to the trash. A restored query appears in -list views and searches. You can use restored queries for alerts. -} diff --git a/man/queriesUpdate.Rd b/man/queriesUpdate.Rd deleted file mode 100644 index 4fad2113..00000000 --- a/man/queriesUpdate.Rd +++ /dev/null @@ -1,40 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/queries.R -\name{queriesUpdate} -\alias{queriesUpdate} -\title{Change a query definition.} -\usage{ -queriesUpdate( - client, - query_id, - data_source_id = NULL, - description = NULL, - name = NULL, - options = NULL, - query = NULL, - run_as_role = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{query_id}{Required. This field has no description yet.} - -\item{data_source_id}{Data source ID maps to the ID of the data source used by the resource and is distinct from the warehouse ID.} - -\item{description}{General description that conveys additional information about this query such as usage notes.} - -\item{name}{The title of this query that appears in list views, widget headings, and on the query page.} - -\item{options}{Exclusively used for storing a list parameter definitions.} - -\item{query}{The text of the query to be run.} - -\item{run_as_role}{Sets the \strong{Run as} role for the object.} -} -\description{ -Modify this query definition. -} -\details{ -\strong{Note}: You cannot undo this operation. -} diff --git a/man/queryHistoryList.Rd b/man/queryHistoryList.Rd deleted file mode 100644 index 31605b76..00000000 --- a/man/queryHistoryList.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/query_history.R -\name{queryHistoryList} -\alias{queryHistoryList} -\title{List Queries.} -\usage{ -queryHistoryList( - client, - filter_by = NULL, - include_metrics = NULL, - max_results = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{filter_by}{A filter to limit query history results.} - -\item{include_metrics}{Whether to include metrics about query.} - -\item{max_results}{Limit the number of results returned in one page.} - -\item{page_token}{A token that can be used to get the next page of results.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -List the history of queries through SQL warehouses. -} -\details{ -You can filter by user ID, warehouse ID, status, and time range. -} diff --git a/man/queryVisualizationsCreate.Rd b/man/queryVisualizationsCreate.Rd deleted file mode 100644 index 582d1925..00000000 --- a/man/queryVisualizationsCreate.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/query_visualizations.R -\name{queryVisualizationsCreate} -\alias{queryVisualizationsCreate} -\title{Add visualization to a query.} -\usage{ -queryVisualizationsCreate( - client, - query_id, - type, - options, - description = NULL, - name = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{query_id}{Required. The identifier returned by :method:queries/create.} - -\item{type}{Required. The type of visualization: chart, table, pivot table, and so on.} - -\item{options}{Required. The options object varies widely from one visualization type to the next and is unsupported.} - -\item{description}{A short description of this visualization.} - -\item{name}{The name of the visualization that appears on dashboards and the query screen.} -} -\description{ -Add visualization to a query. -} diff --git a/man/queryVisualizationsDelete.Rd b/man/queryVisualizationsDelete.Rd deleted file mode 100644 index ca94ffc5..00000000 --- a/man/queryVisualizationsDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/query_visualizations.R -\name{queryVisualizationsDelete} -\alias{queryVisualizationsDelete} -\title{Remove visualization.} -\usage{ -queryVisualizationsDelete(client, id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Widget ID returned by :method:queryvizualisations/create.} -} -\description{ -Remove visualization. -} diff --git a/man/queryVisualizationsUpdate.Rd b/man/queryVisualizationsUpdate.Rd deleted file mode 100644 index 695df94e..00000000 --- a/man/queryVisualizationsUpdate.Rd +++ /dev/null @@ -1,37 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/query_visualizations.R -\name{queryVisualizationsUpdate} -\alias{queryVisualizationsUpdate} -\title{Edit existing visualization.} -\usage{ -queryVisualizationsUpdate( - client, - id, - created_at = NULL, - description = NULL, - name = NULL, - options = NULL, - type = NULL, - updated_at = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{The UUID for this visualization.} - -\item{created_at}{This field has no description yet.} - -\item{description}{A short description of this visualization.} - -\item{name}{The name of the visualization that appears on dashboards and the query screen.} - -\item{options}{The options object varies widely from one visualization type to the next and is unsupported.} - -\item{type}{The type of visualization: chart, table, pivot table, and so on.} - -\item{updated_at}{This field has no description yet.} -} -\description{ -Edit existing visualization. -} diff --git a/man/query_serving_endpoint.Rd b/man/query_serving_endpoint.Rd new file mode 100644 index 00000000..d3cf4860 --- /dev/null +++ b/man/query_serving_endpoint.Rd @@ -0,0 +1,77 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{query_serving_endpoint} +\alias{query_serving_endpoint} +\alias{servingEndpointsQuery} +\title{Query a serving endpoint.} +\usage{ +query_serving_endpoint( + client, + name, + dataframe_records = NULL, + dataframe_split = NULL, + extra_params = NULL, + input = NULL, + inputs = NULL, + instances = NULL, + max_tokens = NULL, + messages = NULL, + n = NULL, + prompt = NULL, + stop = NULL, + stream = NULL, + temperature = NULL +) + +servingEndpointsQuery( + client, + name, + dataframe_records = NULL, + dataframe_split = NULL, + extra_params = NULL, + input = NULL, + inputs = NULL, + instances = NULL, + max_tokens = NULL, + messages = NULL, + n = NULL, + prompt = NULL, + stop = NULL, + stream = NULL, + temperature = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint.} + +\item{dataframe_records}{Pandas Dataframe input in the records orientation.} + +\item{dataframe_split}{Pandas Dataframe input in the split orientation.} + +\item{extra_params}{The extra parameters field used ONLY for \strong{completions, chat,} and \strong{embeddings external & foundation model} serving endpoints.} + +\item{input}{The input string (or array of strings) field used ONLY for \strong{embeddings external & foundation model} serving endpoints and is the only field (along with extra_params if needed) used by embeddings queries.} + +\item{inputs}{Tensor-based input in columnar format.} + +\item{instances}{Tensor-based input in row format.} + +\item{max_tokens}{The max tokens field used ONLY for \strong{completions} and \strong{chat external & foundation model} serving endpoints.} + +\item{messages}{The messages field used ONLY for \strong{chat external & foundation model} serving endpoints.} + +\item{n}{The n (number of candidates) field used ONLY for \strong{completions} and \strong{chat external & foundation model} serving endpoints.} + +\item{prompt}{The prompt string (or array of strings) field used ONLY for \strong{completions external & foundation model} serving endpoints and should only be used with other completions query fields.} + +\item{stop}{The stop sequences field used ONLY for \strong{completions} and \strong{chat external & foundation model} serving endpoints.} + +\item{stream}{The stream field used ONLY for \strong{completions} and \strong{chat external & foundation model} serving endpoints.} + +\item{temperature}{The temperature field used ONLY for \strong{completions} and \strong{chat external & foundation model} serving endpoints.} +} +\description{ +Query a serving endpoint. +} diff --git a/man/query_vector_search_index.Rd b/man/query_vector_search_index.Rd new file mode 100644 index 00000000..eac714ee --- /dev/null +++ b/man/query_vector_search_index.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_indexes.R +\name{query_vector_search_index} +\alias{query_vector_search_index} +\alias{vectorSearchIndexesQueryIndex} +\title{Query an index.} +\usage{ +query_vector_search_index( + client, + index_name, + columns, + filters_json = NULL, + num_results = NULL, + query_text = NULL, + query_vector = NULL, + score_threshold = NULL +) + +vectorSearchIndexesQueryIndex( + client, + index_name, + columns, + filters_json = NULL, + num_results = NULL, + query_text = NULL, + query_vector = NULL, + score_threshold = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{index_name}{Required. Name of the vector index to query.} + +\item{columns}{Required. List of column names to include in the response.} + +\item{filters_json}{JSON string representing query filters.} + +\item{num_results}{Number of results to return.} + +\item{query_text}{Query text.} + +\item{query_vector}{Query vector.} + +\item{score_threshold}{Threshold for the approximate nearest neighbor search.} +} +\description{ +Query the specified vector index. +} diff --git a/man/read_dbfs.Rd b/man/read_dbfs.Rd new file mode 100644 index 00000000..3c97c313 --- /dev/null +++ b/man/read_dbfs.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbfs.R +\name{read_dbfs} +\alias{read_dbfs} +\alias{dbfsRead} +\title{Get the contents of a file.} +\usage{ +read_dbfs(client, path, length = NULL, offset = NULL) + +dbfsRead(client, path, length = NULL, offset = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{path}{Required. The path of the file to read.} + +\item{length}{The number of bytes to read starting from the offset.} + +\item{offset}{The offset to read from in bytes.} +} +\description{ +Returns the contents of a file. If the file does not exist, this call throws +an exception with \code{RESOURCE_DOES_NOT_EXIST}. If the path is a directory, the +read length is negative, or if the offset is negative, this call throws an +exception with \code{INVALID_PARAMETER_VALUE}. If the read length exceeds 1 MB, +this call throws an exception with \code{MAX_READ_SIZE_EXCEEDED}. +} +\details{ +If \code{offset + length} exceeds the number of bytes in a file, it reads the +contents until the end of file. +} diff --git a/man/read_volume.Rd b/man/read_volume.Rd new file mode 100644 index 00000000..5bd99c45 --- /dev/null +++ b/man/read_volume.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/volumes.R +\name{read_volume} +\alias{read_volume} +\alias{volumesRead} +\title{Get a Volume.} +\usage{ +read_volume(client, name, include_browse = NULL) + +volumesRead(client, name, include_browse = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The three-level (fully qualified) name of the volume.} + +\item{include_browse}{Whether to include volumes in the response for which the principal can only access selective metadata for.} +} +\description{ +Gets a volume from the metastore for a specific catalog and schema. +} +\details{ +The caller must be a metastore admin or an owner of (or have the \strong{READ +VOLUME} privilege on) the volume. For the latter case, the caller must also +be the owner or have the \strong{USE_CATALOG} privilege on the parent catalog and +the \strong{USE_SCHEMA} privilege on the parent schema. +} diff --git a/man/recipientActivationGetActivationUrlInfo.Rd b/man/recipientActivationGetActivationUrlInfo.Rd deleted file mode 100644 index 79f8a814..00000000 --- a/man/recipientActivationGetActivationUrlInfo.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/recipient_activation.R -\name{recipientActivationGetActivationUrlInfo} -\alias{recipientActivationGetActivationUrlInfo} -\title{Get a share activation URL.} -\usage{ -recipientActivationGetActivationUrlInfo(client, activation_url) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{activation_url}{Required. The one time activation url.} -} -\description{ -Gets an activation URL for a share. -} diff --git a/man/recipientActivationRetrieveToken.Rd b/man/recipientActivationRetrieveToken.Rd deleted file mode 100644 index f66539b0..00000000 --- a/man/recipientActivationRetrieveToken.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/recipient_activation.R -\name{recipientActivationRetrieveToken} -\alias{recipientActivationRetrieveToken} -\title{Get an access token.} -\usage{ -recipientActivationRetrieveToken(client, activation_url) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{activation_url}{Required. The one time activation url.} -} -\description{ -Retrieve access token with an activation url. This is a public API without -any authentication. -} diff --git a/man/recipientsCreate.Rd b/man/recipientsCreate.Rd deleted file mode 100644 index ce5360a7..00000000 --- a/man/recipientsCreate.Rd +++ /dev/null @@ -1,42 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/recipients.R -\name{recipientsCreate} -\alias{recipientsCreate} -\title{Create a share recipient.} -\usage{ -recipientsCreate( - client, - name, - authentication_type, - comment = NULL, - data_recipient_global_metastore_id = NULL, - ip_access_list = NULL, - owner = NULL, - properties_kvpairs = NULL, - sharing_code = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of Recipient.} - -\item{authentication_type}{Required. The delta sharing authentication type.} - -\item{comment}{Description about the recipient.} - -\item{data_recipient_global_metastore_id}{The global Unity Catalog metastore id provided by the data recipient.} - -\item{ip_access_list}{IP Access List.} - -\item{owner}{Username of the recipient owner.} - -\item{properties_kvpairs}{Recipient properties as map of string key-value pairs.} - -\item{sharing_code}{The one-time sharing code provided by the data recipient.} -} -\description{ -Creates a new recipient with the delta sharing authentication type in the -metastore. The caller must be a metastore admin or has the -\strong{CREATE_RECIPIENT} privilege on the metastore. -} diff --git a/man/recipientsDelete.Rd b/man/recipientsDelete.Rd deleted file mode 100644 index a8905471..00000000 --- a/man/recipientsDelete.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/recipients.R -\name{recipientsDelete} -\alias{recipientsDelete} -\title{Delete a share recipient.} -\usage{ -recipientsDelete(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the recipient.} -} -\description{ -Deletes the specified recipient from the metastore. The caller must be the -owner of the recipient. -} diff --git a/man/recipientsGet.Rd b/man/recipientsGet.Rd deleted file mode 100644 index 455b8681..00000000 --- a/man/recipientsGet.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/recipients.R -\name{recipientsGet} -\alias{recipientsGet} -\title{Get a share recipient.} -\usage{ -recipientsGet(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the recipient.} -} -\description{ -Gets a share recipient from the metastore if: -} -\details{ -\itemize{ -\item the caller is the owner of the share recipient, or: * is a metastore admin -} -} diff --git a/man/recipientsList.Rd b/man/recipientsList.Rd deleted file mode 100644 index 439f65a7..00000000 --- a/man/recipientsList.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/recipients.R -\name{recipientsList} -\alias{recipientsList} -\title{List share recipients.} -\usage{ -recipientsList(client, data_recipient_global_metastore_id = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{data_recipient_global_metastore_id}{If not provided, all recipients will be returned.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of all share recipients within the current metastore where: -} -\details{ -\itemize{ -\item the caller is a metastore admin, or * the caller is the owner. There is no -guarantee of a specific ordering of the elements in the array. -} -} diff --git a/man/recipientsRotateToken.Rd b/man/recipientsRotateToken.Rd deleted file mode 100644 index 26a9ce64..00000000 --- a/man/recipientsRotateToken.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/recipients.R -\name{recipientsRotateToken} -\alias{recipientsRotateToken} -\title{Rotate a token.} -\usage{ -recipientsRotateToken(client, name, existing_token_expire_in_seconds) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the recipient.} - -\item{existing_token_expire_in_seconds}{Required. The expiration time of the bearer token in ISO 8601 format.} -} -\description{ -Refreshes the specified recipient's delta sharing authentication token with -the provided token info. The caller must be the owner of the recipient. -} diff --git a/man/recipientsSharePermissions.Rd b/man/recipientsSharePermissions.Rd deleted file mode 100644 index 6ffe4a20..00000000 --- a/man/recipientsSharePermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/recipients.R -\name{recipientsSharePermissions} -\alias{recipientsSharePermissions} -\title{Get recipient share permissions.} -\usage{ -recipientsSharePermissions(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the Recipient.} -} -\description{ -Gets the share permissions for the specified Recipient. The caller must be a -metastore admin or the owner of the Recipient. -} diff --git a/man/recipientsUpdate.Rd b/man/recipientsUpdate.Rd deleted file mode 100644 index a7be223d..00000000 --- a/man/recipientsUpdate.Rd +++ /dev/null @@ -1,37 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/recipients.R -\name{recipientsUpdate} -\alias{recipientsUpdate} -\title{Update a share recipient.} -\usage{ -recipientsUpdate( - client, - name, - comment = NULL, - ip_access_list = NULL, - new_name = NULL, - owner = NULL, - properties_kvpairs = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the recipient.} - -\item{comment}{Description about the recipient.} - -\item{ip_access_list}{IP Access List.} - -\item{new_name}{New name for the recipient.} - -\item{owner}{Username of the recipient owner.} - -\item{properties_kvpairs}{Recipient properties as map of string key-value pairs.} -} -\description{ -Updates an existing recipient in the metastore. The caller must be a -metastore admin or the owner of the recipient. If the recipient name will be -updated, the user must be both a metastore admin and the owner of the -recipient. -} diff --git a/man/registeredModelsCreate.Rd b/man/registeredModelsCreate.Rd deleted file mode 100644 index 4a32d273..00000000 --- a/man/registeredModelsCreate.Rd +++ /dev/null @@ -1,43 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/registered_models.R -\name{registeredModelsCreate} -\alias{registeredModelsCreate} -\title{Create a Registered Model.} -\usage{ -registeredModelsCreate( - client, - catalog_name, - schema_name, - name, - comment = NULL, - storage_location = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{catalog_name}{Required. The name of the catalog where the schema and the registered model reside.} - -\item{schema_name}{Required. The name of the schema where the registered model resides.} - -\item{name}{Required. The name of the registered model.} - -\item{comment}{The comment attached to the registered model.} - -\item{storage_location}{The storage location on the cloud under which model version data files are stored.} -} -\description{ -Creates a new registered model in Unity Catalog. -} -\details{ -File storage for model versions in the registered model will be located in -the default location which is specified by the parent schema, or the parent -catalog, or the Metastore. - -For registered model creation to succeed, the user must satisfy the following -conditions: - The caller must be a metastore admin, or be the owner of the -parent catalog and schema, or have the \strong{USE_CATALOG} privilege on the -parent catalog and the \strong{USE_SCHEMA} privilege on the parent schema. - The -caller must have the \strong{CREATE MODEL} or \strong{CREATE FUNCTION} privilege on the -parent schema. -} diff --git a/man/registeredModelsDelete.Rd b/man/registeredModelsDelete.Rd deleted file mode 100644 index 1c8e1d25..00000000 --- a/man/registeredModelsDelete.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/registered_models.R -\name{registeredModelsDelete} -\alias{registeredModelsDelete} -\title{Delete a Registered Model.} -\usage{ -registeredModelsDelete(client, full_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. The three-level (fully qualified) name of the registered model.} -} -\description{ -Deletes a registered model and all its model versions from the specified -parent catalog and schema. -} -\details{ -The caller must be a metastore admin or an owner of the registered model. For -the latter case, the caller must also be the owner or have the -\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} -privilege on the parent schema. -} diff --git a/man/registeredModelsDeleteAlias.Rd b/man/registeredModelsDeleteAlias.Rd deleted file mode 100644 index 162d5d35..00000000 --- a/man/registeredModelsDeleteAlias.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/registered_models.R -\name{registeredModelsDeleteAlias} -\alias{registeredModelsDeleteAlias} -\title{Delete a Registered Model Alias.} -\usage{ -registeredModelsDeleteAlias(client, full_name, alias) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. The three-level (fully qualified) name of the registered model.} - -\item{alias}{Required. The name of the alias.} -} -\description{ -Deletes a registered model alias. -} -\details{ -The caller must be a metastore admin or an owner of the registered model. For -the latter case, the caller must also be the owner or have the -\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} -privilege on the parent schema. -} diff --git a/man/registeredModelsGet.Rd b/man/registeredModelsGet.Rd deleted file mode 100644 index 15e07593..00000000 --- a/man/registeredModelsGet.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/registered_models.R -\name{registeredModelsGet} -\alias{registeredModelsGet} -\title{Get a Registered Model.} -\usage{ -registeredModelsGet(client, full_name, include_browse = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. The three-level (fully qualified) name of the registered model.} - -\item{include_browse}{Whether to include registered models in the response for which the principal can only access selective metadata for.} -} -\description{ -Get a registered model. -} -\details{ -The caller must be a metastore admin or an owner of (or have the \strong{EXECUTE} -privilege on) the registered model. For the latter case, the caller must also -be the owner or have the \strong{USE_CATALOG} privilege on the parent catalog and -the \strong{USE_SCHEMA} privilege on the parent schema. -} diff --git a/man/registeredModelsList.Rd b/man/registeredModelsList.Rd deleted file mode 100644 index 87a6a683..00000000 --- a/man/registeredModelsList.Rd +++ /dev/null @@ -1,46 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/registered_models.R -\name{registeredModelsList} -\alias{registeredModelsList} -\title{List Registered Models.} -\usage{ -registeredModelsList( - client, - catalog_name = NULL, - include_browse = NULL, - max_results = NULL, - page_token = NULL, - schema_name = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{catalog_name}{The identifier of the catalog under which to list registered models.} - -\item{include_browse}{Whether to include registered models in the response for which the principal can only access selective metadata for.} - -\item{max_results}{Max number of registered models to return.} - -\item{page_token}{Opaque token to send for the next page of results (pagination).} - -\item{schema_name}{The identifier of the schema under which to list registered models.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -List registered models. You can list registered models under a particular -schema, or list all registered models in the current metastore. -} -\details{ -The returned models are filtered based on the privileges of the calling user. -For example, the metastore admin is able to list all the registered models. A -regular user needs to be the owner or have the \strong{EXECUTE} privilege on the -registered model to recieve the registered models in the response. For the -latter case, the caller must also be the owner or have the \strong{USE_CATALOG} -privilege on the parent catalog and the \strong{USE_SCHEMA} privilege on the -parent schema. - -There is no guarantee of a specific ordering of the elements in the response. -} diff --git a/man/registeredModelsSetAlias.Rd b/man/registeredModelsSetAlias.Rd deleted file mode 100644 index e1cb5732..00000000 --- a/man/registeredModelsSetAlias.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/registered_models.R -\name{registeredModelsSetAlias} -\alias{registeredModelsSetAlias} -\title{Set a Registered Model Alias.} -\usage{ -registeredModelsSetAlias(client, full_name, alias, version_num) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the registered model.} - -\item{alias}{Required. The name of the alias.} - -\item{version_num}{Required. The version number of the model version to which the alias points.} -} -\description{ -Set an alias on the specified registered model. -} -\details{ -The caller must be a metastore admin or an owner of the registered model. For -the latter case, the caller must also be the owner or have the -\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} -privilege on the parent schema. -} diff --git a/man/registeredModelsUpdate.Rd b/man/registeredModelsUpdate.Rd deleted file mode 100644 index 5e44af0c..00000000 --- a/man/registeredModelsUpdate.Rd +++ /dev/null @@ -1,37 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/registered_models.R -\name{registeredModelsUpdate} -\alias{registeredModelsUpdate} -\title{Update a Registered Model.} -\usage{ -registeredModelsUpdate( - client, - full_name, - comment = NULL, - new_name = NULL, - owner = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. The three-level (fully qualified) name of the registered model.} - -\item{comment}{The comment attached to the registered model.} - -\item{new_name}{New name for the registered model.} - -\item{owner}{The identifier of the user who owns the registered model.} -} -\description{ -Updates the specified registered model. -} -\details{ -The caller must be a metastore admin or an owner of the registered model. For -the latter case, the caller must also be the owner or have the -\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} -privilege on the parent schema. - -Currently only the name, the owner or the comment of the registered model can -be updated. -} diff --git a/man/reject_model_transition_request.Rd b/man/reject_model_transition_request.Rd new file mode 100644 index 00000000..1a38b3a8 --- /dev/null +++ b/man/reject_model_transition_request.Rd @@ -0,0 +1,31 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{reject_model_transition_request} +\alias{reject_model_transition_request} +\alias{modelRegistryRejectTransitionRequest} +\title{Reject a transition request.} +\usage{ +reject_model_transition_request(client, name, version, stage, comment = NULL) + +modelRegistryRejectTransitionRequest( + client, + name, + version, + stage, + comment = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the model.} + +\item{version}{Required. Version of the model.} + +\item{stage}{Required. Target stage of the transition.} + +\item{comment}{User-provided comment on the action.} +} +\description{ +Rejects a model version stage transition request. +} diff --git a/man/remove_instance_profile.Rd b/man/remove_instance_profile.Rd new file mode 100644 index 00000000..2cf52b54 --- /dev/null +++ b/man/remove_instance_profile.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/instance_profiles.R +\name{remove_instance_profile} +\alias{remove_instance_profile} +\alias{instanceProfilesRemove} +\title{Remove the instance profile.} +\usage{ +remove_instance_profile(client, instance_profile_arn) + +instanceProfilesRemove(client, instance_profile_arn) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{instance_profile_arn}{Required. The ARN of the instance profile to remove.} +} +\description{ +Remove the instance profile with the provided ARN. Existing clusters with +this instance profile will continue to function. +} +\details{ +This API is only accessible to admin users. +} diff --git a/man/rename_model.Rd b/man/rename_model.Rd new file mode 100644 index 00000000..b4972565 --- /dev/null +++ b/man/rename_model.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{rename_model} +\alias{rename_model} +\alias{modelRegistryRenameModel} +\title{Rename a model.} +\usage{ +rename_model(client, name, new_name = NULL) + +modelRegistryRenameModel(client, name, new_name = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Registered model unique name identifier.} + +\item{new_name}{If provided, updates the name for this \code{registered_model}.} +} +\description{ +Renames a registered model. +} diff --git a/man/repair_job_run.Rd b/man/repair_job_run.Rd new file mode 100644 index 00000000..7cb916cc --- /dev/null +++ b/man/repair_job_run.Rd @@ -0,0 +1,79 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{repair_job_run} +\alias{repair_job_run} +\alias{jobsRepairRun} +\title{Repair a job run.} +\usage{ +repair_job_run( + client, + run_id, + dbt_commands = NULL, + jar_params = NULL, + job_parameters = NULL, + latest_repair_id = NULL, + notebook_params = NULL, + pipeline_params = NULL, + python_named_params = NULL, + python_params = NULL, + rerun_all_failed_tasks = NULL, + rerun_dependent_tasks = NULL, + rerun_tasks = NULL, + spark_submit_params = NULL, + sql_params = NULL +) + +jobsRepairRun( + client, + run_id, + dbt_commands = NULL, + jar_params = NULL, + job_parameters = NULL, + latest_repair_id = NULL, + notebook_params = NULL, + pipeline_params = NULL, + python_named_params = NULL, + python_params = NULL, + rerun_all_failed_tasks = NULL, + rerun_dependent_tasks = NULL, + rerun_tasks = NULL, + spark_submit_params = NULL, + sql_params = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. The job run ID of the run to repair.} + +\item{dbt_commands}{An array of commands to execute for jobs with the dbt task, for example \verb{'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']}.} + +\item{jar_params}{A list of parameters for jobs with Spark JAR tasks, for example \verb{'jar_params': ['john doe', '35']}.} + +\item{job_parameters}{Job-level parameters used in the run.} + +\item{latest_repair_id}{The ID of the latest repair.} + +\item{notebook_params}{A map from keys to values for jobs with notebook task, for example \verb{'notebook_params': \{'name': 'john doe', 'age': '35'\}}.} + +\item{pipeline_params}{This field has no description yet.} + +\item{python_named_params}{A map from keys to values for jobs with Python wheel task, for example \verb{'python_named_params': \{'name': 'task', 'data': 'dbfs:/path/to/data.json'\}}.} + +\item{python_params}{A list of parameters for jobs with Python tasks, for example \verb{'python_params': ['john doe', '35']}.} + +\item{rerun_all_failed_tasks}{If true, repair all failed tasks.} + +\item{rerun_dependent_tasks}{If true, repair all tasks that depend on the tasks in \code{rerun_tasks}, even if they were previously successful.} + +\item{rerun_tasks}{The task keys of the task runs to repair.} + +\item{spark_submit_params}{A list of parameters for jobs with spark submit task, for example \verb{'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']}.} + +\item{sql_params}{A map from keys to values for jobs with SQL task, for example \verb{'sql_params': \{'name': 'john doe', 'age': '35'\}}.} +} +\description{ +Re-run one or more tasks. Tasks are re-run as part of the original job run. +They use the current job and task settings, and can be viewed in the history +for the original job run. +} diff --git a/man/repair_job_run_and_wait.Rd b/man/repair_job_run_and_wait.Rd new file mode 100644 index 00000000..12cb891a --- /dev/null +++ b/man/repair_job_run_and_wait.Rd @@ -0,0 +1,72 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{repair_job_run_and_wait} +\alias{repair_job_run_and_wait} +\title{Repair a job run.} +\usage{ +repair_job_run_and_wait( + client, + run_id, + dbt_commands = NULL, + jar_params = NULL, + job_parameters = NULL, + latest_repair_id = NULL, + notebook_params = NULL, + pipeline_params = NULL, + python_named_params = NULL, + python_params = NULL, + rerun_all_failed_tasks = NULL, + rerun_dependent_tasks = NULL, + rerun_tasks = NULL, + spark_submit_params = NULL, + sql_params = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. The job run ID of the run to repair.} + +\item{dbt_commands}{An array of commands to execute for jobs with the dbt task, for example \verb{'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']}.} + +\item{jar_params}{A list of parameters for jobs with Spark JAR tasks, for example \verb{'jar_params': ['john doe', '35']}.} + +\item{job_parameters}{Job-level parameters used in the run.} + +\item{latest_repair_id}{The ID of the latest repair.} + +\item{notebook_params}{A map from keys to values for jobs with notebook task, for example \verb{'notebook_params': \{'name': 'john doe', 'age': '35'\}}.} + +\item{pipeline_params}{This field has no description yet.} + +\item{python_named_params}{A map from keys to values for jobs with Python wheel task, for example \verb{'python_named_params': \{'name': 'task', 'data': 'dbfs:/path/to/data.json'\}}.} + +\item{python_params}{A list of parameters for jobs with Python tasks, for example \verb{'python_params': ['john doe', '35']}.} + +\item{rerun_all_failed_tasks}{If true, repair all failed tasks.} + +\item{rerun_dependent_tasks}{If true, repair all tasks that depend on the tasks in \code{rerun_tasks}, even if they were previously successful.} + +\item{rerun_tasks}{The task keys of the task runs to repair.} + +\item{spark_submit_params}{A list of parameters for jobs with spark submit task, for example \verb{'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']}.} + +\item{sql_params}{A map from keys to values for jobs with SQL task, for example \verb{'sql_params': \{'name': 'john doe', 'age': '35'\}}.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Jobs on Databricks reach +TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Jobs is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Re-run one or more tasks. Tasks are re-run as part of the original job run. +They use the current job and task settings, and can be viewed in the history +for the original job run. +} diff --git a/man/replace_ip_access_list.Rd b/man/replace_ip_access_list.Rd new file mode 100644 index 00000000..b3c3fc09 --- /dev/null +++ b/man/replace_ip_access_list.Rd @@ -0,0 +1,53 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ip_access_lists.R +\name{replace_ip_access_list} +\alias{replace_ip_access_list} +\alias{ipAccessListsReplace} +\title{Replace access list.} +\usage{ +replace_ip_access_list( + client, + ip_access_list_id, + label, + list_type, + enabled, + ip_addresses = NULL +) + +ipAccessListsReplace( + client, + ip_access_list_id, + label, + list_type, + enabled, + ip_addresses = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{ip_access_list_id}{Required. The ID for the corresponding IP access list.} + +\item{label}{Required. Label for the IP access list.} + +\item{list_type}{Required. Type of IP access list.} + +\item{enabled}{Required. Specifies whether this IP access list is enabled.} + +\item{ip_addresses}{This field has no description yet.} +} +\description{ +Replaces an IP access list, specified by its ID. +} +\details{ +A list can include allow lists and block lists. See the top of this file for +a description of how the server treats allow lists and block lists at run +time. When replacing an IP access list: * For all allow lists and block lists +combined, the API supports a maximum of 1000 IP/CIDR values, where one CIDR +counts as a single value. Attempts to exceed that number return error 400 +with \code{error_code} value \code{QUOTA_EXCEEDED}. * If the resulting list would block +the calling user's current IP, error 400 is returned with \code{error_code} value +\code{INVALID_STATE}. It can take a few minutes for the changes to take effect. +Note that your resulting IP access list has no effect until you enable the +feature. See :method:workspaceconf/setStatus. +} diff --git a/man/reposCreate.Rd b/man/reposCreate.Rd deleted file mode 100644 index a3daced7..00000000 --- a/man/reposCreate.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/repos.R -\name{reposCreate} -\alias{reposCreate} -\title{Create a repo.} -\usage{ -reposCreate(client, url, provider, path = NULL, sparse_checkout = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{url}{Required. URL of the Git repository to be linked.} - -\item{provider}{Required. Git provider.} - -\item{path}{Desired path for the repo in the workspace.} - -\item{sparse_checkout}{If specified, the repo will be created with sparse checkout enabled.} -} -\description{ -Creates a repo in the workspace and links it to the remote Git repo -specified. Note that repos created programmatically must be linked to a -remote Git repo, unlike repos created in the browser. -} diff --git a/man/reposDelete.Rd b/man/reposDelete.Rd deleted file mode 100644 index 0a0f93a3..00000000 --- a/man/reposDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/repos.R -\name{reposDelete} -\alias{reposDelete} -\title{Delete a repo.} -\usage{ -reposDelete(client, repo_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{repo_id}{Required. The ID for the corresponding repo to access.} -} -\description{ -Deletes the specified repo. -} diff --git a/man/reposGet.Rd b/man/reposGet.Rd deleted file mode 100644 index 13801356..00000000 --- a/man/reposGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/repos.R -\name{reposGet} -\alias{reposGet} -\title{Get a repo.} -\usage{ -reposGet(client, repo_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{repo_id}{Required. The ID for the corresponding repo to access.} -} -\description{ -Returns the repo with the given repo ID. -} diff --git a/man/reposGetPermissionLevels.Rd b/man/reposGetPermissionLevels.Rd deleted file mode 100644 index 76d7bbf8..00000000 --- a/man/reposGetPermissionLevels.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/repos.R -\name{reposGetPermissionLevels} -\alias{reposGetPermissionLevels} -\title{Get repo permission levels.} -\usage{ -reposGetPermissionLevels(client, repo_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{repo_id}{Required. The repo for which to get or manage permissions.} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/reposGetPermissions.Rd b/man/reposGetPermissions.Rd deleted file mode 100644 index d27c4ae4..00000000 --- a/man/reposGetPermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/repos.R -\name{reposGetPermissions} -\alias{reposGetPermissions} -\title{Get repo permissions.} -\usage{ -reposGetPermissions(client, repo_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{repo_id}{Required. The repo for which to get or manage permissions.} -} -\description{ -Gets the permissions of a repo. Repos can inherit permissions from their root -object. -} diff --git a/man/reposList.Rd b/man/reposList.Rd deleted file mode 100644 index 165bdf2f..00000000 --- a/man/reposList.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/repos.R -\name{reposList} -\alias{reposList} -\title{Get repos.} -\usage{ -reposList(client, next_page_token = NULL, path_prefix = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{next_page_token}{Token used to get the next page of results.} - -\item{path_prefix}{Filters repos that have paths starting with the given path prefix.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Returns repos that the calling user has Manage permissions on. Results are -paginated with each page containing twenty repos. -} diff --git a/man/reposSetPermissions.Rd b/man/reposSetPermissions.Rd deleted file mode 100644 index 8b6bbf58..00000000 --- a/man/reposSetPermissions.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/repos.R -\name{reposSetPermissions} -\alias{reposSetPermissions} -\title{Set repo permissions.} -\usage{ -reposSetPermissions(client, repo_id, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{repo_id}{Required. The repo for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on a repo. Repos can inherit permissions from their root -object. -} diff --git a/man/reposUpdate.Rd b/man/reposUpdate.Rd deleted file mode 100644 index 5e2bd512..00000000 --- a/man/reposUpdate.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/repos.R -\name{reposUpdate} -\alias{reposUpdate} -\title{Update a repo.} -\usage{ -reposUpdate(client, repo_id, branch = NULL, sparse_checkout = NULL, tag = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{repo_id}{Required. The ID for the corresponding repo to access.} - -\item{branch}{Branch that the local version of the repo is checked out to.} - -\item{sparse_checkout}{If specified, update the sparse checkout settings.} - -\item{tag}{Tag that the local version of the repo is checked out to.} -} -\description{ -Updates the repo to a different branch or tag, or updates the repo to the -latest commit on the same branch. -} diff --git a/man/reposUpdatePermissions.Rd b/man/reposUpdatePermissions.Rd deleted file mode 100644 index a02961f9..00000000 --- a/man/reposUpdatePermissions.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/repos.R -\name{reposUpdatePermissions} -\alias{reposUpdatePermissions} -\title{Update repo permissions.} -\usage{ -reposUpdatePermissions(client, repo_id, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{repo_id}{Required. The repo for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on a repo. Repos can inherit permissions from their -root object. -} diff --git a/man/reset_job.Rd b/man/reset_job.Rd new file mode 100644 index 00000000..18a85c9f --- /dev/null +++ b/man/reset_job.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{reset_job} +\alias{reset_job} +\alias{jobsReset} +\title{Update all job settings (reset).} +\usage{ +reset_job(client, job_id, new_settings) + +jobsReset(client, job_id, new_settings) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{job_id}{Required. The canonical identifier of the job to reset.} + +\item{new_settings}{Required. The new settings of the job.} +} +\description{ +Overwrite all settings for the given job. Use the \href{:method:jobs/update}{\emph{Update} endpoint} to update job settings partially. +} diff --git a/man/resize_cluster.Rd b/man/resize_cluster.Rd new file mode 100644 index 00000000..2cbb2aa4 --- /dev/null +++ b/man/resize_cluster.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{resize_cluster} +\alias{resize_cluster} +\alias{clustersResize} +\title{Resize cluster.} +\usage{ +resize_cluster(client, cluster_id, autoscale = NULL, num_workers = NULL) + +clustersResize(client, cluster_id, autoscale = NULL, num_workers = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster to be resized.} + +\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} + +\item{num_workers}{Number of worker nodes that this cluster should have.} +} +\description{ +Resizes a cluster to have a desired number of workers. This will fail unless +the cluster is in a \code{RUNNING} state. +} diff --git a/man/resize_cluster_and_wait.Rd b/man/resize_cluster_and_wait.Rd new file mode 100644 index 00000000..cd890f9f --- /dev/null +++ b/man/resize_cluster_and_wait.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{resize_cluster_and_wait} +\alias{resize_cluster_and_wait} +\title{Resize cluster.} +\usage{ +resize_cluster_and_wait( + client, + cluster_id, + autoscale = NULL, + num_workers = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster to be resized.} + +\item{autoscale}{Parameters needed in order to automatically scale clusters up and down based on load.} + +\item{num_workers}{Number of worker nodes that this cluster should have.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Clusters on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Clusters is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Resizes a cluster to have a desired number of workers. This will fail unless +the cluster is in a \code{RUNNING} state. +} diff --git a/man/restart_cluster.Rd b/man/restart_cluster.Rd new file mode 100644 index 00000000..8cd09c6a --- /dev/null +++ b/man/restart_cluster.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{restart_cluster} +\alias{restart_cluster} +\alias{clustersRestart} +\title{Restart cluster.} +\usage{ +restart_cluster(client, cluster_id, restart_user = NULL) + +clustersRestart(client, cluster_id, restart_user = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster to be started.} + +\item{restart_user}{\if{html}{\out{}}.} +} +\description{ +Restarts a Spark cluster with the supplied ID. If the cluster is not +currently in a \code{RUNNING} state, nothing will happen. +} diff --git a/man/restart_cluster_and_wait.Rd b/man/restart_cluster_and_wait.Rd new file mode 100644 index 00000000..27d3a3b2 --- /dev/null +++ b/man/restart_cluster_and_wait.Rd @@ -0,0 +1,35 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{restart_cluster_and_wait} +\alias{restart_cluster_and_wait} +\title{Restart cluster.} +\usage{ +restart_cluster_and_wait( + client, + cluster_id, + restart_user = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster to be started.} + +\item{restart_user}{\if{html}{\out{}}.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Clusters on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Clusters is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Restarts a Spark cluster with the supplied ID. If the cluster is not +currently in a \code{RUNNING} state, nothing will happen. +} diff --git a/man/restore_dashboard.Rd b/man/restore_dashboard.Rd new file mode 100644 index 00000000..fbafb5c7 --- /dev/null +++ b/man/restore_dashboard.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dashboards.R +\name{restore_dashboard} +\alias{restore_dashboard} +\alias{dashboardsRestore} +\title{Restore a dashboard.} +\usage{ +restore_dashboard(client, dashboard_id) + +dashboardsRestore(client, dashboard_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{dashboard_id}{Required. This field has no description yet.} +} +\description{ +A restored dashboard appears in list views and searches and can be shared. +} diff --git a/man/restore_experiment.Rd b/man/restore_experiment.Rd new file mode 100644 index 00000000..1922d034 --- /dev/null +++ b/man/restore_experiment.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{restore_experiment} +\alias{restore_experiment} +\alias{experimentsRestoreExperiment} +\title{Restores an experiment.} +\usage{ +restore_experiment(client, experiment_id) + +experimentsRestoreExperiment(client, experiment_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{experiment_id}{Required. ID of the associated experiment.} +} +\description{ +Restore an experiment marked for deletion. This also restores associated +metadata, runs, metrics, params, and tags. If experiment uses FileStore, +underlying artifacts associated with experiment are also restored. +} +\details{ +Throws \code{RESOURCE_DOES_NOT_EXIST} if experiment was never created or was +permanently deleted. +} diff --git a/man/restore_experiment_run.Rd b/man/restore_experiment_run.Rd new file mode 100644 index 00000000..f4313126 --- /dev/null +++ b/man/restore_experiment_run.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{restore_experiment_run} +\alias{restore_experiment_run} +\alias{experimentsRestoreRun} +\title{Restore a run.} +\usage{ +restore_experiment_run(client, run_id) + +experimentsRestoreRun(client, run_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{run_id}{Required. ID of the run to restore.} +} +\description{ +Restores a deleted run. +} diff --git a/man/restore_experiment_runs.Rd b/man/restore_experiment_runs.Rd new file mode 100644 index 00000000..9584002a --- /dev/null +++ b/man/restore_experiment_runs.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{restore_experiment_runs} +\alias{restore_experiment_runs} +\alias{experimentsRestoreRuns} +\title{Restore runs by deletion time.} +\usage{ +restore_experiment_runs( + client, + experiment_id, + min_timestamp_millis, + max_runs = NULL +) + +experimentsRestoreRuns( + client, + experiment_id, + min_timestamp_millis, + max_runs = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{experiment_id}{Required. The ID of the experiment containing the runs to restore.} + +\item{min_timestamp_millis}{Required. The minimum deletion timestamp in milliseconds since the UNIX epoch for restoring runs.} + +\item{max_runs}{An optional positive integer indicating the maximum number of runs to restore.} +} +\description{ +Bulk restore runs in an experiment that were deleted no earlier than the +specified timestamp. Restores at most max_runs per request. To call this API +from a Databricks Notebook in Python, you can use the client code snippet on +https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore. +} diff --git a/man/restore_query.Rd b/man/restore_query.Rd new file mode 100644 index 00000000..8a1e8d4f --- /dev/null +++ b/man/restore_query.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/queries.R +\name{restore_query} +\alias{restore_query} +\alias{queriesRestore} +\title{Restore a query.} +\usage{ +restore_query(client, query_id) + +queriesRestore(client, query_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{query_id}{Required. This field has no description yet.} +} +\description{ +Restore a query that has been moved to the trash. A restored query appears in +list views and searches. You can use restored queries for alerts. +} diff --git a/man/restrictWorkspaceAdminsDelete.Rd b/man/restrictWorkspaceAdminsDelete.Rd deleted file mode 100644 index 71a13cf5..00000000 --- a/man/restrictWorkspaceAdminsDelete.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/restrict_workspace_admins.R -\name{restrictWorkspaceAdminsDelete} -\alias{restrictWorkspaceAdminsDelete} -\title{Delete the restrict workspace admins setting.} -\usage{ -restrictWorkspaceAdminsDelete(client, etag = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{etag}{etag used for versioning.} -} -\description{ -Reverts the restrict workspace admins setting status for the workspace. A -fresh etag needs to be provided in \code{DELETE} requests (as a query parameter). -The etag can be retrieved by making a \code{GET} request before the DELETE -request. If the setting is updated/deleted concurrently, \code{DELETE} fails with -409 and the request must be retried by using the fresh etag in the 409 -response. -} diff --git a/man/restrictWorkspaceAdminsGet.Rd b/man/restrictWorkspaceAdminsGet.Rd deleted file mode 100644 index 004f634f..00000000 --- a/man/restrictWorkspaceAdminsGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/restrict_workspace_admins.R -\name{restrictWorkspaceAdminsGet} -\alias{restrictWorkspaceAdminsGet} -\title{Get the restrict workspace admins setting.} -\usage{ -restrictWorkspaceAdminsGet(client, etag = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{etag}{etag used for versioning.} -} -\description{ -Gets the restrict workspace admins setting. -} diff --git a/man/restrictWorkspaceAdminsUpdate.Rd b/man/restrictWorkspaceAdminsUpdate.Rd deleted file mode 100644 index 5d2afd21..00000000 --- a/man/restrictWorkspaceAdminsUpdate.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/restrict_workspace_admins.R -\name{restrictWorkspaceAdminsUpdate} -\alias{restrictWorkspaceAdminsUpdate} -\title{Update the restrict workspace admins setting.} -\usage{ -restrictWorkspaceAdminsUpdate(client, allow_missing, setting, field_mask) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{allow_missing}{Required. This should always be set to true for Settings API.} - -\item{setting}{Required. This field has no description yet.} - -\item{field_mask}{Required. Field mask is required to be passed into the PATCH request.} -} -\description{ -Updates the restrict workspace admins setting for the workspace. A fresh etag -needs to be provided in \code{PATCH} requests (as part of the setting field). The -etag can be retrieved by making a GET request before the \code{PATCH} request. If -the setting is updated concurrently, \code{PATCH} fails with 409 and the request -must be retried by using the fresh etag in the 409 response. -} diff --git a/man/retrieve_recipient_activation_token.Rd b/man/retrieve_recipient_activation_token.Rd new file mode 100644 index 00000000..60f68398 --- /dev/null +++ b/man/retrieve_recipient_activation_token.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/recipient_activation.R +\name{retrieve_recipient_activation_token} +\alias{retrieve_recipient_activation_token} +\alias{recipientActivationRetrieveToken} +\title{Get an access token.} +\usage{ +retrieve_recipient_activation_token(client, activation_url) + +recipientActivationRetrieveToken(client, activation_url) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{activation_url}{Required. The one time activation url.} +} +\description{ +Retrieve access token with an activation url. This is a public API without +any authentication. +} diff --git a/man/rotate_recipient_token.Rd b/man/rotate_recipient_token.Rd new file mode 100644 index 00000000..0070ffac --- /dev/null +++ b/man/rotate_recipient_token.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/recipients.R +\name{rotate_recipient_token} +\alias{rotate_recipient_token} +\alias{recipientsRotateToken} +\title{Rotate a token.} +\usage{ +rotate_recipient_token(client, name, existing_token_expire_in_seconds) + +recipientsRotateToken(client, name, existing_token_expire_in_seconds) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the recipient.} + +\item{existing_token_expire_in_seconds}{Required. The expiration time of the bearer token in ISO 8601 format.} +} +\description{ +Refreshes the specified recipient's delta sharing authentication token with +the provided token info. The caller must be the owner of the recipient. +} diff --git a/man/run_job_now.Rd b/man/run_job_now.Rd new file mode 100644 index 00000000..8d9ca5f4 --- /dev/null +++ b/man/run_job_now.Rd @@ -0,0 +1,69 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{run_job_now} +\alias{run_job_now} +\alias{jobsRunNow} +\title{Trigger a new job run.} +\usage{ +run_job_now( + client, + job_id, + dbt_commands = NULL, + idempotency_token = NULL, + jar_params = NULL, + job_parameters = NULL, + notebook_params = NULL, + pipeline_params = NULL, + python_named_params = NULL, + python_params = NULL, + queue = NULL, + spark_submit_params = NULL, + sql_params = NULL +) + +jobsRunNow( + client, + job_id, + dbt_commands = NULL, + idempotency_token = NULL, + jar_params = NULL, + job_parameters = NULL, + notebook_params = NULL, + pipeline_params = NULL, + python_named_params = NULL, + python_params = NULL, + queue = NULL, + spark_submit_params = NULL, + sql_params = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{job_id}{Required. The ID of the job to be executed.} + +\item{dbt_commands}{An array of commands to execute for jobs with the dbt task, for example \verb{'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']}.} + +\item{idempotency_token}{An optional token to guarantee the idempotency of job run requests.} + +\item{jar_params}{A list of parameters for jobs with Spark JAR tasks, for example \verb{'jar_params': ['john doe', '35']}.} + +\item{job_parameters}{Job-level parameters used in the run.} + +\item{notebook_params}{A map from keys to values for jobs with notebook task, for example \verb{'notebook_params': \{'name': 'john doe', 'age': '35'\}}.} + +\item{pipeline_params}{This field has no description yet.} + +\item{python_named_params}{A map from keys to values for jobs with Python wheel task, for example \verb{'python_named_params': \{'name': 'task', 'data': 'dbfs:/path/to/data.json'\}}.} + +\item{python_params}{A list of parameters for jobs with Python tasks, for example \verb{'python_params': ['john doe', '35']}.} + +\item{queue}{The queue settings of the run.} + +\item{spark_submit_params}{A list of parameters for jobs with spark submit task, for example \verb{'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']}.} + +\item{sql_params}{A map from keys to values for jobs with SQL task, for example \verb{'sql_params': \{'name': 'john doe', 'age': '35'\}}.} +} +\description{ +Run a job and return the \code{run_id} of the triggered run. +} diff --git a/man/run_job_now_and_wait.Rd b/man/run_job_now_and_wait.Rd new file mode 100644 index 00000000..42373ac6 --- /dev/null +++ b/man/run_job_now_and_wait.Rd @@ -0,0 +1,64 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{run_job_now_and_wait} +\alias{run_job_now_and_wait} +\title{Trigger a new job run.} +\usage{ +run_job_now_and_wait( + client, + job_id, + dbt_commands = NULL, + idempotency_token = NULL, + jar_params = NULL, + job_parameters = NULL, + notebook_params = NULL, + pipeline_params = NULL, + python_named_params = NULL, + python_params = NULL, + queue = NULL, + spark_submit_params = NULL, + sql_params = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{job_id}{Required. The ID of the job to be executed.} + +\item{dbt_commands}{An array of commands to execute for jobs with the dbt task, for example \verb{'dbt_commands': ['dbt deps', 'dbt seed', 'dbt run']}.} + +\item{idempotency_token}{An optional token to guarantee the idempotency of job run requests.} + +\item{jar_params}{A list of parameters for jobs with Spark JAR tasks, for example \verb{'jar_params': ['john doe', '35']}.} + +\item{job_parameters}{Job-level parameters used in the run.} + +\item{notebook_params}{A map from keys to values for jobs with notebook task, for example \verb{'notebook_params': \{'name': 'john doe', 'age': '35'\}}.} + +\item{pipeline_params}{This field has no description yet.} + +\item{python_named_params}{A map from keys to values for jobs with Python wheel task, for example \verb{'python_named_params': \{'name': 'task', 'data': 'dbfs:/path/to/data.json'\}}.} + +\item{python_params}{A list of parameters for jobs with Python tasks, for example \verb{'python_params': ['john doe', '35']}.} + +\item{queue}{The queue settings of the run.} + +\item{spark_submit_params}{A list of parameters for jobs with spark submit task, for example \verb{'spark_submit_params': ['--class', 'org.apache.spark.examples.SparkPi']}.} + +\item{sql_params}{A map from keys to values for jobs with SQL task, for example \verb{'sql_params': \{'name': 'john doe', 'age': '35'\}}.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Jobs on Databricks reach +TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Jobs is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Run a job and return the \code{run_id} of the triggered run. +} diff --git a/man/run_lakehouse_monitor_refresh.Rd b/man/run_lakehouse_monitor_refresh.Rd new file mode 100644 index 00000000..66a938ca --- /dev/null +++ b/man/run_lakehouse_monitor_refresh.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakehouse_monitors.R +\name{run_lakehouse_monitor_refresh} +\alias{run_lakehouse_monitor_refresh} +\alias{lakehouseMonitorsRunRefresh} +\title{Queue a metric refresh for a monitor.} +\usage{ +run_lakehouse_monitor_refresh(client, full_name) + +lakehouseMonitorsRunRefresh(client, full_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the table.} +} +\description{ +Queues a metric refresh on the monitor for the specified table. The refresh +will execute in the background. +} +\details{ +The caller must either: 1. be an owner of the table's parent catalog 2. have +\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's +parent schema 3. have the following permissions: - \strong{USE_CATALOG} on the +table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - be an +owner of the table + +Additionally, the call must be made from the workspace where the monitor was +created. +} diff --git a/man/schemasCreate.Rd b/man/schemasCreate.Rd deleted file mode 100644 index 8eea3626..00000000 --- a/man/schemasCreate.Rd +++ /dev/null @@ -1,33 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/schemas.R -\name{schemasCreate} -\alias{schemasCreate} -\title{Create a schema.} -\usage{ -schemasCreate( - client, - name, - catalog_name, - comment = NULL, - properties = NULL, - storage_root = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of schema, relative to parent catalog.} - -\item{catalog_name}{Required. Name of parent catalog.} - -\item{comment}{User-provided free-form text description.} - -\item{properties}{A map of key-value properties attached to the securable.} - -\item{storage_root}{Storage root URL for managed tables within schema.} -} -\description{ -Creates a new schema for catalog in the Metatastore. The caller must be a -metastore admin, or have the \strong{CREATE_SCHEMA} privilege in the parent -catalog. -} diff --git a/man/schemasDelete.Rd b/man/schemasDelete.Rd deleted file mode 100644 index 20862465..00000000 --- a/man/schemasDelete.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/schemas.R -\name{schemasDelete} -\alias{schemasDelete} -\title{Delete a schema.} -\usage{ -schemasDelete(client, full_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the schema.} -} -\description{ -Deletes the specified schema from the parent catalog. The caller must be the -owner of the schema or an owner of the parent catalog. -} diff --git a/man/schemasGet.Rd b/man/schemasGet.Rd deleted file mode 100644 index 4efcbfe7..00000000 --- a/man/schemasGet.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/schemas.R -\name{schemasGet} -\alias{schemasGet} -\title{Get a schema.} -\usage{ -schemasGet(client, full_name, include_browse = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the schema.} - -\item{include_browse}{Whether to include schemas in the response for which the principal can only access selective metadata for.} -} -\description{ -Gets the specified schema within the metastore. The caller must be a -metastore admin, the owner of the schema, or a user that has the -\strong{USE_SCHEMA} privilege on the schema. -} diff --git a/man/schemasList.Rd b/man/schemasList.Rd deleted file mode 100644 index f0ecd46a..00000000 --- a/man/schemasList.Rd +++ /dev/null @@ -1,35 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/schemas.R -\name{schemasList} -\alias{schemasList} -\title{List schemas.} -\usage{ -schemasList( - client, - catalog_name, - include_browse = NULL, - max_results = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{catalog_name}{Required. Parent catalog for schemas of interest.} - -\item{include_browse}{Whether to include schemas in the response for which the principal can only access selective metadata for.} - -\item{max_results}{Maximum number of schemas to return.} - -\item{page_token}{Opaque pagination token to go to next page based on previous query.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of schemas for a catalog in the metastore. If the caller is the -metastore admin or the owner of the parent catalog, all schemas for the -catalog will be retrieved. Otherwise, only schemas owned by the caller (or -for which the caller has the \strong{USE_SCHEMA} privilege) will be retrieved. -There is no guarantee of a specific ordering of the elements in the array. -} diff --git a/man/schemasUpdate.Rd b/man/schemasUpdate.Rd deleted file mode 100644 index ea7ae0a5..00000000 --- a/man/schemasUpdate.Rd +++ /dev/null @@ -1,38 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/schemas.R -\name{schemasUpdate} -\alias{schemasUpdate} -\title{Update a schema.} -\usage{ -schemasUpdate( - client, - full_name, - comment = NULL, - enable_predictive_optimization = NULL, - new_name = NULL, - owner = NULL, - properties = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the schema.} - -\item{comment}{User-provided free-form text description.} - -\item{enable_predictive_optimization}{Whether predictive optimization should be enabled for this object and objects under it.} - -\item{new_name}{New name for the schema.} - -\item{owner}{Username of current owner of schema.} - -\item{properties}{A map of key-value properties attached to the securable.} -} -\description{ -Updates a schema for a catalog. The caller must be the owner of the schema or -a metastore admin. If the caller is a metastore admin, only the \strong{owner} -field can be changed in the update. If the \strong{name} field must be updated, -the caller must be a metastore admin or have the \strong{CREATE_SCHEMA} privilege -on the parent catalog. -} diff --git a/man/search_experiment_experiments.Rd b/man/search_experiment_experiments.Rd new file mode 100644 index 00000000..26355a93 --- /dev/null +++ b/man/search_experiment_experiments.Rd @@ -0,0 +1,44 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{search_experiment_experiments} +\alias{search_experiment_experiments} +\alias{experimentsSearchExperiments} +\title{Search experiments.} +\usage{ +search_experiment_experiments( + client, + filter = NULL, + max_results = NULL, + order_by = NULL, + page_token = NULL, + view_type = NULL +) + +experimentsSearchExperiments( + client, + filter = NULL, + max_results = NULL, + order_by = NULL, + page_token = NULL, + view_type = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{filter}{String representing a SQL filter condition (e.g.} + +\item{max_results}{Maximum number of experiments desired.} + +\item{order_by}{List of columns for ordering search results, which can include experiment name and last updated timestamp with an optional 'DESC' or 'ASC' annotation, where 'ASC' is the default.} + +\item{page_token}{Token indicating the page of experiments to fetch.} + +\item{view_type}{Qualifier for type of experiments to be returned.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Searches for experiments that satisfy specified search criteria. +} diff --git a/man/search_experiment_runs.Rd b/man/search_experiment_runs.Rd new file mode 100644 index 00000000..1ffb25f5 --- /dev/null +++ b/man/search_experiment_runs.Rd @@ -0,0 +1,51 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{search_experiment_runs} +\alias{search_experiment_runs} +\alias{experimentsSearchRuns} +\title{Search for runs.} +\usage{ +search_experiment_runs( + client, + experiment_ids = NULL, + filter = NULL, + max_results = NULL, + order_by = NULL, + page_token = NULL, + run_view_type = NULL +) + +experimentsSearchRuns( + client, + experiment_ids = NULL, + filter = NULL, + max_results = NULL, + order_by = NULL, + page_token = NULL, + run_view_type = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{experiment_ids}{List of experiment IDs to search over.} + +\item{filter}{A filter expression over params, metrics, and tags, that allows returning a subset of runs.} + +\item{max_results}{Maximum number of runs desired.} + +\item{order_by}{List of columns to be ordered by, including attributes, params, metrics, and tags with an optional 'DESC' or 'ASC' annotation, where 'ASC' is the default.} + +\item{page_token}{Token for the current page of runs.} + +\item{run_view_type}{Whether to display only active, only deleted, or all runs.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Searches for runs that satisfy expressions. +} +\details{ +Search expressions can use \code{mlflowMetric} and \code{mlflowParam} keys.', +} diff --git a/man/search_model_models.Rd b/man/search_model_models.Rd new file mode 100644 index 00000000..b397656e --- /dev/null +++ b/man/search_model_models.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{search_model_models} +\alias{search_model_models} +\alias{modelRegistrySearchModels} +\title{Search models.} +\usage{ +search_model_models( + client, + filter = NULL, + max_results = NULL, + order_by = NULL, + page_token = NULL +) + +modelRegistrySearchModels( + client, + filter = NULL, + max_results = NULL, + order_by = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{filter}{String filter condition, like 'name LIKE 'my-model-name''.} + +\item{max_results}{Maximum number of models desired.} + +\item{order_by}{List of columns for ordering search results, which can include model name and last updated timestamp with an optional 'DESC' or 'ASC' annotation, where 'ASC' is the default.} + +\item{page_token}{Pagination token to go to the next page based on a previous search query.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Search for registered models based on the specified \strong{filter}. +} diff --git a/man/search_model_versions.Rd b/man/search_model_versions.Rd new file mode 100644 index 00000000..d44316af --- /dev/null +++ b/man/search_model_versions.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{search_model_versions} +\alias{search_model_versions} +\alias{modelRegistrySearchModelVersions} +\title{Searches model versions.} +\usage{ +search_model_versions( + client, + filter = NULL, + max_results = NULL, + order_by = NULL, + page_token = NULL +) + +modelRegistrySearchModelVersions( + client, + filter = NULL, + max_results = NULL, + order_by = NULL, + page_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{filter}{String filter condition, like 'name='my-model-name''.} + +\item{max_results}{Maximum number of models desired.} + +\item{order_by}{List of columns to be ordered by including model name, version, stage with an optional 'DESC' or 'ASC' annotation, where 'ASC' is the default.} + +\item{page_token}{Pagination token to go to next page based on previous search query.} +} +\value{ +\code{data.frame} with all of the response pages. +} +\description{ +Searches for specific model versions based on the supplied \strong{filter}. +} diff --git a/man/secretsCreateScope.Rd b/man/secretsCreateScope.Rd deleted file mode 100644 index 1279f06c..00000000 --- a/man/secretsCreateScope.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/secrets.R -\name{secretsCreateScope} -\alias{secretsCreateScope} -\title{Create a new secret scope.} -\usage{ -secretsCreateScope( - client, - scope, - backend_azure_keyvault = NULL, - initial_manage_principal = NULL, - scope_backend_type = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{scope}{Required. Scope name requested by the user.} - -\item{backend_azure_keyvault}{The metadata for the secret scope if the type is \code{AZURE_KEYVAULT}.} - -\item{initial_manage_principal}{The principal that is initially granted \code{MANAGE} permission to the created scope.} - -\item{scope_backend_type}{The backend type the scope will be created with.} -} -\description{ -The scope name must consist of alphanumeric characters, dashes, underscores, -and periods, and may not exceed 128 characters. -} diff --git a/man/secretsDeleteAcl.Rd b/man/secretsDeleteAcl.Rd deleted file mode 100644 index 573d09d3..00000000 --- a/man/secretsDeleteAcl.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/secrets.R -\name{secretsDeleteAcl} -\alias{secretsDeleteAcl} -\title{Delete an ACL.} -\usage{ -secretsDeleteAcl(client, scope, principal) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{scope}{Required. The name of the scope to remove permissions from.} - -\item{principal}{Required. The principal to remove an existing ACL from.} -} -\description{ -Deletes the given ACL on the given scope. -} -\details{ -Users must have the \code{MANAGE} permission to invoke this API. Throws -\code{RESOURCE_DOES_NOT_EXIST} if no such secret scope, principal, or ACL exists. -Throws \code{PERMISSION_DENIED} if the user does not have permission to make this -API call. -} diff --git a/man/secretsDeleteScope.Rd b/man/secretsDeleteScope.Rd deleted file mode 100644 index 24c9f884..00000000 --- a/man/secretsDeleteScope.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/secrets.R -\name{secretsDeleteScope} -\alias{secretsDeleteScope} -\title{Delete a secret scope.} -\usage{ -secretsDeleteScope(client, scope) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{scope}{Required. Name of the scope to delete.} -} -\description{ -Deletes a secret scope. -} -\details{ -Throws \code{RESOURCE_DOES_NOT_EXIST} if the scope does not exist. Throws -\code{PERMISSION_DENIED} if the user does not have permission to make this API -call. -} diff --git a/man/secretsDeleteSecret.Rd b/man/secretsDeleteSecret.Rd deleted file mode 100644 index 8e3b8d6c..00000000 --- a/man/secretsDeleteSecret.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/secrets.R -\name{secretsDeleteSecret} -\alias{secretsDeleteSecret} -\title{Delete a secret.} -\usage{ -secretsDeleteSecret(client, scope, key) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{scope}{Required. The name of the scope that contains the secret to delete.} - -\item{key}{Required. Name of the secret to delete.} -} -\description{ -Deletes the secret stored in this secret scope. You must have \code{WRITE} or -\code{MANAGE} permission on the secret scope. -} -\details{ -Throws \code{RESOURCE_DOES_NOT_EXIST} if no such secret scope or secret exists. -Throws \code{PERMISSION_DENIED} if the user does not have permission to make this -API call. -} diff --git a/man/secretsGetAcl.Rd b/man/secretsGetAcl.Rd deleted file mode 100644 index d009c700..00000000 --- a/man/secretsGetAcl.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/secrets.R -\name{secretsGetAcl} -\alias{secretsGetAcl} -\title{Get secret ACL details.} -\usage{ -secretsGetAcl(client, scope, principal) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{scope}{Required. The name of the scope to fetch ACL information from.} - -\item{principal}{Required. The principal to fetch ACL information for.} -} -\description{ -Gets the details about the given ACL, such as the group and permission. Users -must have the \code{MANAGE} permission to invoke this API. -} -\details{ -Throws \code{RESOURCE_DOES_NOT_EXIST} if no such secret scope exists. Throws -\code{PERMISSION_DENIED} if the user does not have permission to make this API -call. -} diff --git a/man/secretsGetSecret.Rd b/man/secretsGetSecret.Rd deleted file mode 100644 index 2a1463cd..00000000 --- a/man/secretsGetSecret.Rd +++ /dev/null @@ -1,30 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/secrets.R -\name{secretsGetSecret} -\alias{secretsGetSecret} -\title{Get a secret.} -\usage{ -secretsGetSecret(client, scope, key) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{scope}{Required. The name of the scope to fetch secret information from.} - -\item{key}{Required. The key to fetch secret for.} -} -\description{ -Gets the bytes representation of a secret value for the specified scope and -key. -} -\details{ -Users need the READ permission to make this call. - -Note that the secret value returned is in bytes. The interpretation of the -bytes is determined by the caller in DBUtils and the type the data is decoded -into. - -Throws \code{PERMISSION_DENIED} if the user does not have permission to make -this API call. Throws \code{RESOURCE_DOES_NOT_EXIST} if no such secret or secret -scope exists. -} diff --git a/man/secretsListAcls.Rd b/man/secretsListAcls.Rd deleted file mode 100644 index 5b8f4e6c..00000000 --- a/man/secretsListAcls.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/secrets.R -\name{secretsListAcls} -\alias{secretsListAcls} -\title{Lists ACLs.} -\usage{ -secretsListAcls(client, scope) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{scope}{Required. The name of the scope to fetch ACL information from.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -List the ACLs for a given secret scope. Users must have the \code{MANAGE} -permission to invoke this API. -} -\details{ -Throws \code{RESOURCE_DOES_NOT_EXIST} if no such secret scope exists. Throws -\code{PERMISSION_DENIED} if the user does not have permission to make this API -call. -} diff --git a/man/secretsListScopes.Rd b/man/secretsListScopes.Rd deleted file mode 100644 index 6c4ddde2..00000000 --- a/man/secretsListScopes.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/secrets.R -\name{secretsListScopes} -\alias{secretsListScopes} -\title{List all scopes.} -\usage{ -secretsListScopes(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Lists all secret scopes available in the workspace. -} -\details{ -Throws \code{PERMISSION_DENIED} if the user does not have permission to make this -API call. -} diff --git a/man/secretsListSecrets.Rd b/man/secretsListSecrets.Rd deleted file mode 100644 index 6c303fed..00000000 --- a/man/secretsListSecrets.Rd +++ /dev/null @@ -1,27 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/secrets.R -\name{secretsListSecrets} -\alias{secretsListSecrets} -\title{List secret keys.} -\usage{ -secretsListSecrets(client, scope) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{scope}{Required. The name of the scope to list secrets within.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Lists the secret keys that are stored at this scope. This is a metadata-only -operation; secret data cannot be retrieved using this API. Users need the -READ permission to make this call. -} -\details{ -The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws -\code{RESOURCE_DOES_NOT_EXIST} if no such secret scope exists. Throws -\code{PERMISSION_DENIED} if the user does not have permission to make this API -call. -} diff --git a/man/secretsPutAcl.Rd b/man/secretsPutAcl.Rd deleted file mode 100644 index e8210501..00000000 --- a/man/secretsPutAcl.Rd +++ /dev/null @@ -1,47 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/secrets.R -\name{secretsPutAcl} -\alias{secretsPutAcl} -\title{Create/update an ACL.} -\usage{ -secretsPutAcl(client, scope, principal, permission) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{scope}{Required. The name of the scope to apply permissions to.} - -\item{principal}{Required. The principal in which the permission is applied.} - -\item{permission}{Required. The permission level applied to the principal.} -} -\description{ -Creates or overwrites the Access Control List (ACL) associated with the given -principal (user or group) on the specified scope point. -} -\details{ -In general, a user or group will use the most powerful permission available -to them, and permissions are ordered as follows: -\itemize{ -\item \code{MANAGE} - Allowed to change ACLs, and read and write to this secret scope. -\item \code{WRITE} - Allowed to read and write to this secret scope. * \code{READ} - -Allowed to read this secret scope and list what secrets are available. -} - -Note that in general, secret values can only be read from within a command on -a cluster (for example, through a notebook). There is no API to read the -actual secret value material outside of a cluster. However, the user's -permission will be applied based on who is executing the command, and they -must have at least READ permission. - -Users must have the \code{MANAGE} permission to invoke this API. - -The principal is a user or group name corresponding to an existing Databricks -principal to be granted or revoked access. - -Throws \code{RESOURCE_DOES_NOT_EXIST} if no such secret scope exists. Throws -\code{RESOURCE_ALREADY_EXISTS} if a permission for the principal already exists. -Throws \code{INVALID_PARAMETER_VALUE} if the permission or principal is invalid. -Throws \code{PERMISSION_DENIED} if the user does not have permission to make this -API call. -} diff --git a/man/secretsPutSecret.Rd b/man/secretsPutSecret.Rd deleted file mode 100644 index 16e42ca0..00000000 --- a/man/secretsPutSecret.Rd +++ /dev/null @@ -1,41 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/secrets.R -\name{secretsPutSecret} -\alias{secretsPutSecret} -\title{Add a secret.} -\usage{ -secretsPutSecret(client, scope, key, bytes_value = NULL, string_value = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{scope}{Required. The name of the scope to which the secret will be associated with.} - -\item{key}{Required. A unique name to identify the secret.} - -\item{bytes_value}{If specified, value will be stored as bytes.} - -\item{string_value}{If specified, note that the value will be stored in UTF-8 (MB4) form.} -} -\description{ -Inserts a secret under the provided scope with the given name. If a secret -already exists with the same name, this command overwrites the existing -secret's value. The server encrypts the secret using the secret scope's -encryption settings before storing it. -} -\details{ -You must have \code{WRITE} or \code{MANAGE} permission on the secret scope. The secret -key must consist of alphanumeric characters, dashes, underscores, and -periods, and cannot exceed 128 characters. The maximum allowed secret value -size is 128 KB. The maximum number of secrets in a given scope is 1000. - -The input fields 'string_value' or 'bytes_value' specify the type of the -secret, which will determine the value returned when the secret value is -requested. Exactly one must be specified. - -Throws \code{RESOURCE_DOES_NOT_EXIST} if no such secret scope exists. Throws -\code{RESOURCE_LIMIT_EXCEEDED} if maximum number of secrets in scope is exceeded. -Throws \code{INVALID_PARAMETER_VALUE} if the key name or value length is invalid. -Throws \code{PERMISSION_DENIED} if the user does not have permission to make this -API call. -} diff --git a/man/servicePrincipalsCreate.Rd b/man/servicePrincipalsCreate.Rd deleted file mode 100644 index 79664d77..00000000 --- a/man/servicePrincipalsCreate.Rd +++ /dev/null @@ -1,43 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service_principals.R -\name{servicePrincipalsCreate} -\alias{servicePrincipalsCreate} -\title{Create a service principal.} -\usage{ -servicePrincipalsCreate( - client, - active = NULL, - application_id = NULL, - display_name = NULL, - entitlements = NULL, - external_id = NULL, - groups = NULL, - id = NULL, - roles = NULL, - schemas = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{active}{If this user is active.} - -\item{application_id}{UUID relating to the service principal.} - -\item{display_name}{String that represents a concatenation of given and family names.} - -\item{entitlements}{Entitlements assigned to the service principal.} - -\item{external_id}{This field has no description yet.} - -\item{groups}{This field has no description yet.} - -\item{id}{Databricks service principal ID.} - -\item{roles}{Corresponds to AWS instance profile/arn role.} - -\item{schemas}{The schema of the List response.} -} -\description{ -Creates a new service principal in the Databricks workspace. -} diff --git a/man/servicePrincipalsDelete.Rd b/man/servicePrincipalsDelete.Rd deleted file mode 100644 index 7a3e9ca7..00000000 --- a/man/servicePrincipalsDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service_principals.R -\name{servicePrincipalsDelete} -\alias{servicePrincipalsDelete} -\title{Delete a service principal.} -\usage{ -servicePrincipalsDelete(client, id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Unique ID for a service principal in the Databricks workspace.} -} -\description{ -Delete a single service principal in the Databricks workspace. -} diff --git a/man/servicePrincipalsGet.Rd b/man/servicePrincipalsGet.Rd deleted file mode 100644 index 0de29900..00000000 --- a/man/servicePrincipalsGet.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service_principals.R -\name{servicePrincipalsGet} -\alias{servicePrincipalsGet} -\title{Get service principal details.} -\usage{ -servicePrincipalsGet(client, id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Unique ID for a service principal in the Databricks workspace.} -} -\description{ -Gets the details for a single service principal define in the Databricks -workspace. -} diff --git a/man/servicePrincipalsList.Rd b/man/servicePrincipalsList.Rd deleted file mode 100644 index cda2953d..00000000 --- a/man/servicePrincipalsList.Rd +++ /dev/null @@ -1,40 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service_principals.R -\name{servicePrincipalsList} -\alias{servicePrincipalsList} -\title{List service principals.} -\usage{ -servicePrincipalsList( - client, - attributes = NULL, - count = NULL, - excluded_attributes = NULL, - filter = NULL, - sort_by = NULL, - sort_order = NULL, - start_index = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{attributes}{Comma-separated list of attributes to return in response.} - -\item{count}{Desired number of results per page.} - -\item{excluded_attributes}{Comma-separated list of attributes to exclude in response.} - -\item{filter}{Query by which the results have to be filtered.} - -\item{sort_by}{Attribute to sort the results.} - -\item{sort_order}{The order to sort the results.} - -\item{start_index}{Specifies the index of the first result.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets the set of service principals associated with a Databricks workspace. -} diff --git a/man/servicePrincipalsPatch.Rd b/man/servicePrincipalsPatch.Rd deleted file mode 100644 index b035a979..00000000 --- a/man/servicePrincipalsPatch.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service_principals.R -\name{servicePrincipalsPatch} -\alias{servicePrincipalsPatch} -\title{Update service principal details.} -\usage{ -servicePrincipalsPatch(client, id, operations = NULL, schemas = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Unique ID for a service principal in the Databricks workspace.} - -\item{operations}{This field has no description yet.} - -\item{schemas}{The schema of the patch request.} -} -\description{ -Partially updates the details of a single service principal in the Databricks -workspace. -} diff --git a/man/servicePrincipalsUpdate.Rd b/man/servicePrincipalsUpdate.Rd deleted file mode 100644 index 64d73133..00000000 --- a/man/servicePrincipalsUpdate.Rd +++ /dev/null @@ -1,46 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/service_principals.R -\name{servicePrincipalsUpdate} -\alias{servicePrincipalsUpdate} -\title{Replace service principal.} -\usage{ -servicePrincipalsUpdate( - client, - id, - active = NULL, - application_id = NULL, - display_name = NULL, - entitlements = NULL, - external_id = NULL, - groups = NULL, - roles = NULL, - schemas = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Databricks service principal ID.} - -\item{active}{If this user is active.} - -\item{application_id}{UUID relating to the service principal.} - -\item{display_name}{String that represents a concatenation of given and family names.} - -\item{entitlements}{Entitlements assigned to the service principal.} - -\item{external_id}{This field has no description yet.} - -\item{groups}{This field has no description yet.} - -\item{roles}{Corresponds to AWS instance profile/arn role.} - -\item{schemas}{The schema of the List response.} -} -\description{ -Updates the details of a single service principal. -} -\details{ -This action replaces the existing service principal with the same name. -} diff --git a/man/servingEndpointsBuildLogs.Rd b/man/servingEndpointsBuildLogs.Rd deleted file mode 100644 index 0a466479..00000000 --- a/man/servingEndpointsBuildLogs.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsBuildLogs} -\alias{servingEndpointsBuildLogs} -\title{Get build logs for a served model.} -\usage{ -servingEndpointsBuildLogs(client, name, served_model_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the serving endpoint that the served model belongs to.} - -\item{served_model_name}{Required. The name of the served model that build logs will be retrieved for.} -} -\description{ -Retrieves the build logs associated with the provided served model. -} diff --git a/man/servingEndpointsCreate.Rd b/man/servingEndpointsCreate.Rd deleted file mode 100644 index 243fec96..00000000 --- a/man/servingEndpointsCreate.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsCreate} -\alias{servingEndpointsCreate} -\title{Create a new serving endpoint.} -\usage{ -servingEndpointsCreate(client, name, config, rate_limits = NULL, tags = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the serving endpoint.} - -\item{config}{Required. The core config of the serving endpoint.} - -\item{rate_limits}{Rate limits to be applied to the serving endpoint.} - -\item{tags}{Tags to be attached to the serving endpoint and automatically propagated to billing logs.} -} -\description{ -Create a new serving endpoint. -} diff --git a/man/servingEndpointsCreateAndWait.Rd b/man/servingEndpointsCreateAndWait.Rd deleted file mode 100644 index c9c92574..00000000 --- a/man/servingEndpointsCreateAndWait.Rd +++ /dev/null @@ -1,37 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsCreateAndWait} -\alias{servingEndpointsCreateAndWait} -\title{Create a new serving endpoint.} -\usage{ -servingEndpointsCreateAndWait( - client, - name, - config, - rate_limits = NULL, - tags = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the serving endpoint.} - -\item{config}{Required. The core config of the serving endpoint.} - -\item{rate_limits}{Rate limits to be applied to the serving endpoint.} - -\item{tags}{Tags to be attached to the serving endpoint and automatically propagated to billing logs.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Serving Endpoints on Databricks reach -NOT_UPDATING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Serving Endpoints is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} diff --git a/man/servingEndpointsDelete.Rd b/man/servingEndpointsDelete.Rd deleted file mode 100644 index ea9d936c..00000000 --- a/man/servingEndpointsDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsDelete} -\alias{servingEndpointsDelete} -\title{Delete a serving endpoint.} -\usage{ -servingEndpointsDelete(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the serving endpoint.} -} -\description{ -Delete a serving endpoint. -} diff --git a/man/servingEndpointsExportMetrics.Rd b/man/servingEndpointsExportMetrics.Rd deleted file mode 100644 index 013b2fd2..00000000 --- a/man/servingEndpointsExportMetrics.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsExportMetrics} -\alias{servingEndpointsExportMetrics} -\title{Get metrics of a serving endpoint.} -\usage{ -servingEndpointsExportMetrics(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the serving endpoint to retrieve metrics for.} -} -\description{ -Retrieves the metrics associated with the provided serving endpoint in either -Prometheus or OpenMetrics exposition format. -} diff --git a/man/servingEndpointsGet.Rd b/man/servingEndpointsGet.Rd deleted file mode 100644 index f0d5702f..00000000 --- a/man/servingEndpointsGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsGet} -\alias{servingEndpointsGet} -\title{Get a single serving endpoint.} -\usage{ -servingEndpointsGet(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the serving endpoint.} -} -\description{ -Retrieves the details for a single serving endpoint. -} diff --git a/man/servingEndpointsGetPermissionLevels.Rd b/man/servingEndpointsGetPermissionLevels.Rd deleted file mode 100644 index f54f0a2a..00000000 --- a/man/servingEndpointsGetPermissionLevels.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsGetPermissionLevels} -\alias{servingEndpointsGetPermissionLevels} -\title{Get serving endpoint permission levels.} -\usage{ -servingEndpointsGetPermissionLevels(client, serving_endpoint_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{serving_endpoint_id}{Required. The serving endpoint for which to get or manage permissions.} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/servingEndpointsGetPermissions.Rd b/man/servingEndpointsGetPermissions.Rd deleted file mode 100644 index 11448db3..00000000 --- a/man/servingEndpointsGetPermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsGetPermissions} -\alias{servingEndpointsGetPermissions} -\title{Get serving endpoint permissions.} -\usage{ -servingEndpointsGetPermissions(client, serving_endpoint_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{serving_endpoint_id}{Required. The serving endpoint for which to get or manage permissions.} -} -\description{ -Gets the permissions of a serving endpoint. Serving endpoints can inherit -permissions from their root object. -} diff --git a/man/servingEndpointsList.Rd b/man/servingEndpointsList.Rd deleted file mode 100644 index 7c3d4d73..00000000 --- a/man/servingEndpointsList.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsList} -\alias{servingEndpointsList} -\title{Get all serving endpoints.} -\usage{ -servingEndpointsList(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Get all serving endpoints. -} diff --git a/man/servingEndpointsLogs.Rd b/man/servingEndpointsLogs.Rd deleted file mode 100644 index 7b0ba171..00000000 --- a/man/servingEndpointsLogs.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsLogs} -\alias{servingEndpointsLogs} -\title{Get the latest logs for a served model.} -\usage{ -servingEndpointsLogs(client, name, served_model_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the serving endpoint that the served model belongs to.} - -\item{served_model_name}{Required. The name of the served model that logs will be retrieved for.} -} -\description{ -Retrieves the service logs associated with the provided served model. -} diff --git a/man/servingEndpointsPatch.Rd b/man/servingEndpointsPatch.Rd deleted file mode 100644 index b5bb6752..00000000 --- a/man/servingEndpointsPatch.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsPatch} -\alias{servingEndpointsPatch} -\title{Update tags of a serving endpoint.} -\usage{ -servingEndpointsPatch(client, name, add_tags = NULL, delete_tags = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the serving endpoint who's tags to patch.} - -\item{add_tags}{List of endpoint tags to add.} - -\item{delete_tags}{List of tag keys to delete.} -} -\description{ -Used to batch add and delete tags from a serving endpoint with a single API -call. -} diff --git a/man/servingEndpointsPut.Rd b/man/servingEndpointsPut.Rd deleted file mode 100644 index 50f517ec..00000000 --- a/man/servingEndpointsPut.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsPut} -\alias{servingEndpointsPut} -\title{Update rate limits of a serving endpoint.} -\usage{ -servingEndpointsPut(client, name, rate_limits = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the serving endpoint whose rate limits are being updated.} - -\item{rate_limits}{The list of endpoint rate limits.} -} -\description{ -Used to update the rate limits of a serving endpoint. NOTE: only external and -foundation model endpoints are supported as of now. -} diff --git a/man/servingEndpointsQuery.Rd b/man/servingEndpointsQuery.Rd deleted file mode 100644 index 8080ea19..00000000 --- a/man/servingEndpointsQuery.Rd +++ /dev/null @@ -1,58 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsQuery} -\alias{servingEndpointsQuery} -\title{Query a serving endpoint.} -\usage{ -servingEndpointsQuery( - client, - name, - dataframe_records = NULL, - dataframe_split = NULL, - extra_params = NULL, - input = NULL, - inputs = NULL, - instances = NULL, - max_tokens = NULL, - messages = NULL, - n = NULL, - prompt = NULL, - stop = NULL, - stream = NULL, - temperature = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the serving endpoint.} - -\item{dataframe_records}{Pandas Dataframe input in the records orientation.} - -\item{dataframe_split}{Pandas Dataframe input in the split orientation.} - -\item{extra_params}{The extra parameters field used ONLY for \strong{completions, chat,} and \strong{embeddings external & foundation model} serving endpoints.} - -\item{input}{The input string (or array of strings) field used ONLY for \strong{embeddings external & foundation model} serving endpoints and is the only field (along with extra_params if needed) used by embeddings queries.} - -\item{inputs}{Tensor-based input in columnar format.} - -\item{instances}{Tensor-based input in row format.} - -\item{max_tokens}{The max tokens field used ONLY for \strong{completions} and \strong{chat external & foundation model} serving endpoints.} - -\item{messages}{The messages field used ONLY for \strong{chat external & foundation model} serving endpoints.} - -\item{n}{The n (number of candidates) field used ONLY for \strong{completions} and \strong{chat external & foundation model} serving endpoints.} - -\item{prompt}{The prompt string (or array of strings) field used ONLY for \strong{completions external & foundation model} serving endpoints and should only be used with other completions query fields.} - -\item{stop}{The stop sequences field used ONLY for \strong{completions} and \strong{chat external & foundation model} serving endpoints.} - -\item{stream}{The stream field used ONLY for \strong{completions} and \strong{chat external & foundation model} serving endpoints.} - -\item{temperature}{The temperature field used ONLY for \strong{completions} and \strong{chat external & foundation model} serving endpoints.} -} -\description{ -Query a serving endpoint. -} diff --git a/man/servingEndpointsSetPermissions.Rd b/man/servingEndpointsSetPermissions.Rd deleted file mode 100644 index 34440150..00000000 --- a/man/servingEndpointsSetPermissions.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsSetPermissions} -\alias{servingEndpointsSetPermissions} -\title{Set serving endpoint permissions.} -\usage{ -servingEndpointsSetPermissions( - client, - serving_endpoint_id, - access_control_list = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{serving_endpoint_id}{Required. The serving endpoint for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on a serving endpoint. Serving endpoints can inherit -permissions from their root object. -} diff --git a/man/servingEndpointsUpdateConfig.Rd b/man/servingEndpointsUpdateConfig.Rd deleted file mode 100644 index ace0bb6d..00000000 --- a/man/servingEndpointsUpdateConfig.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsUpdateConfig} -\alias{servingEndpointsUpdateConfig} -\title{Update config of a serving endpoint.} -\usage{ -servingEndpointsUpdateConfig( - client, - name, - auto_capture_config = NULL, - served_entities = NULL, - served_models = NULL, - traffic_config = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the serving endpoint to update.} - -\item{auto_capture_config}{Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.} - -\item{served_entities}{A list of served entities for the endpoint to serve.} - -\item{served_models}{(Deprecated, use served_entities instead) A list of served models for the endpoint to serve.} - -\item{traffic_config}{The traffic config defining how invocations to the serving endpoint should be routed.} -} -\description{ -Updates any combination of the serving endpoint's served entities, the -compute configuration of those served entities, and the endpoint's traffic -config. An endpoint that already has an update in progress can not be updated -until the current update completes or fails. -} diff --git a/man/servingEndpointsUpdateConfigAndWait.Rd b/man/servingEndpointsUpdateConfigAndWait.Rd deleted file mode 100644 index c65abd5e..00000000 --- a/man/servingEndpointsUpdateConfigAndWait.Rd +++ /dev/null @@ -1,46 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsUpdateConfigAndWait} -\alias{servingEndpointsUpdateConfigAndWait} -\title{Update config of a serving endpoint.} -\usage{ -servingEndpointsUpdateConfigAndWait( - client, - name, - auto_capture_config = NULL, - served_entities = NULL, - served_models = NULL, - traffic_config = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the serving endpoint to update.} - -\item{auto_capture_config}{Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.} - -\item{served_entities}{A list of served entities for the endpoint to serve.} - -\item{served_models}{(Deprecated, use served_entities instead) A list of served models for the endpoint to serve.} - -\item{traffic_config}{The traffic config defining how invocations to the serving endpoint should be routed.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Serving Endpoints on Databricks reach -NOT_UPDATING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Serving Endpoints is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Updates any combination of the serving endpoint's served entities, the -compute configuration of those served entities, and the endpoint's traffic -config. An endpoint that already has an update in progress can not be updated -until the current update completes or fails. -} diff --git a/man/servingEndpointsUpdatePermissions.Rd b/man/servingEndpointsUpdatePermissions.Rd deleted file mode 100644 index 54f631af..00000000 --- a/man/servingEndpointsUpdatePermissions.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/serving_endpoints.R -\name{servingEndpointsUpdatePermissions} -\alias{servingEndpointsUpdatePermissions} -\title{Update serving endpoint permissions.} -\usage{ -servingEndpointsUpdatePermissions( - client, - serving_endpoint_id, - access_control_list = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{serving_endpoint_id}{Required. The serving endpoint for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on a serving endpoint. Serving endpoints can inherit -permissions from their root object. -} diff --git a/man/set_cluster_permissions.Rd b/man/set_cluster_permissions.Rd new file mode 100644 index 00000000..fdc525d6 --- /dev/null +++ b/man/set_cluster_permissions.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{set_cluster_permissions} +\alias{set_cluster_permissions} +\alias{clustersSetPermissions} +\title{Set cluster permissions.} +\usage{ +set_cluster_permissions(client, cluster_id, access_control_list = NULL) + +clustersSetPermissions(client, cluster_id, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on a cluster. Clusters can inherit permissions from their +root object. +} diff --git a/man/set_cluster_policy_permissions.Rd b/man/set_cluster_policy_permissions.Rd new file mode 100644 index 00000000..91ccf096 --- /dev/null +++ b/man/set_cluster_policy_permissions.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/cluster_policies.R +\name{set_cluster_policy_permissions} +\alias{set_cluster_policy_permissions} +\alias{clusterPoliciesSetPermissions} +\title{Set cluster policy permissions.} +\usage{ +set_cluster_policy_permissions( + client, + cluster_policy_id, + access_control_list = NULL +) + +clusterPoliciesSetPermissions( + client, + cluster_policy_id, + access_control_list = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_policy_id}{Required. The cluster policy for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on a cluster policy. Cluster policies can inherit +permissions from their root object. +} diff --git a/man/set_dbsql_permission.Rd b/man/set_dbsql_permission.Rd new file mode 100644 index 00000000..a2ff63c0 --- /dev/null +++ b/man/set_dbsql_permission.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbsql_permissions.R +\name{set_dbsql_permission} +\alias{set_dbsql_permission} +\alias{dbsqlPermissionsSet} +\title{Set object ACL.} +\usage{ +set_dbsql_permission( + client, + object_type, + object_id, + access_control_list = NULL +) + +dbsqlPermissionsSet(client, object_type, object_id, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{object_type}{Required. The type of object permission to set.} + +\item{object_id}{Required. Object ID.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets the access control list (ACL) for a specified object. This operation +will complete rewrite the ACL. +} diff --git a/man/set_experiment_permissions.Rd b/man/set_experiment_permissions.Rd new file mode 100644 index 00000000..1ad8ff8b --- /dev/null +++ b/man/set_experiment_permissions.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{set_experiment_permissions} +\alias{set_experiment_permissions} +\alias{experimentsSetPermissions} +\title{Set experiment permissions.} +\usage{ +set_experiment_permissions(client, experiment_id, access_control_list = NULL) + +experimentsSetPermissions(client, experiment_id, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{experiment_id}{Required. The experiment for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on an experiment. Experiments can inherit permissions from +their root object. +} diff --git a/man/set_experiment_tag.Rd b/man/set_experiment_tag.Rd new file mode 100644 index 00000000..5c023c1d --- /dev/null +++ b/man/set_experiment_tag.Rd @@ -0,0 +1,36 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{set_experiment_tag} +\alias{set_experiment_tag} +\alias{experimentsSetExperimentTag} +\alias{experimentsSetTag} +\title{Set a tag.} +\usage{ +set_experiment_tag(client, key, value, run_id = NULL, run_uuid = NULL) + +experimentsSetExperimentTag(client, experiment_id, key, value) + +set_experiment_tag(client, key, value, run_id = NULL, run_uuid = NULL) + +experimentsSetTag(client, key, value, run_id = NULL, run_uuid = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{key}{Required. Name of the tag.} + +\item{value}{Required. String value of the tag being logged.} + +\item{run_id}{ID of the run under which to log the tag.} + +\item{run_uuid}{Deprecated, use run_id instead. ID of the run under which to log the tag.} + +\item{experiment_id}{Required. ID of the experiment under which to log the tag.} +} +\description{ +Sets a tag on an experiment. Experiment tags are metadata that can be +updated. + +Sets a tag on a run. Tags are run metadata that can be updated during a run +and after a run completes. +} diff --git a/man/set_instance_pool_permissions.Rd b/man/set_instance_pool_permissions.Rd new file mode 100644 index 00000000..38e4fc27 --- /dev/null +++ b/man/set_instance_pool_permissions.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/instance_pools.R +\name{set_instance_pool_permissions} +\alias{set_instance_pool_permissions} +\alias{instancePoolsSetPermissions} +\title{Set instance pool permissions.} +\usage{ +set_instance_pool_permissions( + client, + instance_pool_id, + access_control_list = NULL +) + +instancePoolsSetPermissions( + client, + instance_pool_id, + access_control_list = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{instance_pool_id}{Required. The instance pool for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on an instance pool. Instance pools can inherit permissions +from their root object. +} diff --git a/man/set_job_permissions.Rd b/man/set_job_permissions.Rd new file mode 100644 index 00000000..88d73e01 --- /dev/null +++ b/man/set_job_permissions.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{set_job_permissions} +\alias{set_job_permissions} +\alias{jobsSetPermissions} +\title{Set job permissions.} +\usage{ +set_job_permissions(client, job_id, access_control_list = NULL) + +jobsSetPermissions(client, job_id, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{job_id}{Required. The job for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on a job. Jobs can inherit permissions from their root +object. +} diff --git a/man/set_model_permissions.Rd b/man/set_model_permissions.Rd new file mode 100644 index 00000000..33854f4e --- /dev/null +++ b/man/set_model_permissions.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{set_model_permissions} +\alias{set_model_permissions} +\alias{modelRegistrySetPermissions} +\title{Set registered model permissions.} +\usage{ +set_model_permissions(client, registered_model_id, access_control_list = NULL) + +modelRegistrySetPermissions( + client, + registered_model_id, + access_control_list = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{registered_model_id}{Required. The registered model for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on a registered model. Registered models can inherit +permissions from their root object. +} diff --git a/man/set_model_tag.Rd b/man/set_model_tag.Rd new file mode 100644 index 00000000..4631fbdc --- /dev/null +++ b/man/set_model_tag.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{set_model_tag} +\alias{set_model_tag} +\alias{modelRegistrySetModelTag} +\title{Set a tag.} +\usage{ +set_model_tag(client, name, key, value) + +modelRegistrySetModelTag(client, name, key, value) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Unique name of the model.} + +\item{key}{Required. Name of the tag.} + +\item{value}{Required. String value of the tag being logged.} +} +\description{ +Sets a tag on a registered model. +} diff --git a/man/set_model_version_tag.Rd b/man/set_model_version_tag.Rd new file mode 100644 index 00000000..ad139cd8 --- /dev/null +++ b/man/set_model_version_tag.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{set_model_version_tag} +\alias{set_model_version_tag} +\alias{modelRegistrySetModelVersionTag} +\title{Set a version tag.} +\usage{ +set_model_version_tag(client, name, version, key, value) + +modelRegistrySetModelVersionTag(client, name, version, key, value) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Unique name of the model.} + +\item{version}{Required. Model version number.} + +\item{key}{Required. Name of the tag.} + +\item{value}{Required. String value of the tag being logged.} +} +\description{ +Sets a model version tag. +} diff --git a/man/set_notebook_permissions.Rd b/man/set_notebook_permissions.Rd new file mode 100644 index 00000000..3757d33b --- /dev/null +++ b/man/set_notebook_permissions.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{set_notebook_permissions} +\alias{set_notebook_permissions} +\alias{workspaceSetPermissions} +\title{Set workspace object permissions.} +\usage{ +set_notebook_permissions( + client, + workspace_object_type, + workspace_object_id, + access_control_list = NULL +) + +workspaceSetPermissions( + client, + workspace_object_type, + workspace_object_id, + access_control_list = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{workspace_object_type}{Required. The workspace object type for which to get or manage permissions.} + +\item{workspace_object_id}{Required. The workspace object for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on a workspace object. Workspace objects can inherit +permissions from their parent objects or root object. +} diff --git a/man/set_permission.Rd b/man/set_permission.Rd new file mode 100644 index 00000000..406d37b0 --- /dev/null +++ b/man/set_permission.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/permissions.R +\name{set_permission} +\alias{set_permission} +\alias{permissionsSet} +\title{Set object permissions.} +\usage{ +set_permission( + client, + request_object_type, + request_object_id, + access_control_list = NULL +) + +permissionsSet( + client, + request_object_type, + request_object_id, + access_control_list = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{request_object_type}{Required. The type of the request object.} + +\item{request_object_id}{Required. The id of the request object.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on an object. Objects can inherit permissions from their +parent objects or root object. +} diff --git a/man/set_pipeline_permissions.Rd b/man/set_pipeline_permissions.Rd new file mode 100644 index 00000000..2981900f --- /dev/null +++ b/man/set_pipeline_permissions.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{set_pipeline_permissions} +\alias{set_pipeline_permissions} +\alias{pipelinesSetPermissions} +\title{Set pipeline permissions.} +\usage{ +set_pipeline_permissions(client, pipeline_id, access_control_list = NULL) + +pipelinesSetPermissions(client, pipeline_id, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Required. The pipeline for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on a pipeline. Pipelines can inherit permissions from their +root object. +} diff --git a/man/set_registered_model_alias.Rd b/man/set_registered_model_alias.Rd new file mode 100644 index 00000000..9714ec02 --- /dev/null +++ b/man/set_registered_model_alias.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/registered_models.R +\name{set_registered_model_alias} +\alias{set_registered_model_alias} +\alias{registeredModelsSetAlias} +\title{Set a Registered Model Alias.} +\usage{ +set_registered_model_alias(client, full_name, alias, version_num) + +registeredModelsSetAlias(client, full_name, alias, version_num) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the registered model.} + +\item{alias}{Required. The name of the alias.} + +\item{version_num}{Required. The version number of the model version to which the alias points.} +} +\description{ +Set an alias on the specified registered model. +} +\details{ +The caller must be a metastore admin or an owner of the registered model. For +the latter case, the caller must also be the owner or have the +\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} +privilege on the parent schema. +} diff --git a/man/set_repo_permissions.Rd b/man/set_repo_permissions.Rd new file mode 100644 index 00000000..784342b3 --- /dev/null +++ b/man/set_repo_permissions.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/repos.R +\name{set_repo_permissions} +\alias{set_repo_permissions} +\alias{reposSetPermissions} +\title{Set repo permissions.} +\usage{ +set_repo_permissions(client, repo_id, access_control_list = NULL) + +reposSetPermissions(client, repo_id, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{repo_id}{Required. The repo for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on a repo. Repos can inherit permissions from their root +object. +} diff --git a/man/set_serving_endpoint_permissions.Rd b/man/set_serving_endpoint_permissions.Rd new file mode 100644 index 00000000..ee9d937c --- /dev/null +++ b/man/set_serving_endpoint_permissions.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{set_serving_endpoint_permissions} +\alias{set_serving_endpoint_permissions} +\alias{servingEndpointsSetPermissions} +\title{Set serving endpoint permissions.} +\usage{ +set_serving_endpoint_permissions( + client, + serving_endpoint_id, + access_control_list = NULL +) + +servingEndpointsSetPermissions( + client, + serving_endpoint_id, + access_control_list = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{serving_endpoint_id}{Required. The serving endpoint for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on a serving endpoint. Serving endpoints can inherit +permissions from their root object. +} diff --git a/man/set_token_management_permissions.Rd b/man/set_token_management_permissions.Rd new file mode 100644 index 00000000..4761ba37 --- /dev/null +++ b/man/set_token_management_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token_management.R +\name{set_token_management_permissions} +\alias{set_token_management_permissions} +\alias{tokenManagementSetPermissions} +\title{Set token permissions.} +\usage{ +set_token_management_permissions(client, access_control_list = NULL) + +tokenManagementSetPermissions(client, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on all tokens. Tokens can inherit permissions from their +root object. +} diff --git a/man/set_user_permissions.Rd b/man/set_user_permissions.Rd new file mode 100644 index 00000000..583dcbe6 --- /dev/null +++ b/man/set_user_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/users.R +\name{set_user_permissions} +\alias{set_user_permissions} +\alias{usersSetPermissions} +\title{Set password permissions.} +\usage{ +set_user_permissions(client, access_control_list = NULL) + +usersSetPermissions(client, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on all passwords. Passwords can inherit permissions from +their root object. +} diff --git a/man/set_warehouse_permissions.Rd b/man/set_warehouse_permissions.Rd new file mode 100644 index 00000000..97268ca7 --- /dev/null +++ b/man/set_warehouse_permissions.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{set_warehouse_permissions} +\alias{set_warehouse_permissions} +\alias{warehousesSetPermissions} +\title{Set SQL warehouse permissions.} +\usage{ +set_warehouse_permissions(client, warehouse_id, access_control_list = NULL) + +warehousesSetPermissions(client, warehouse_id, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{warehouse_id}{Required. The SQL warehouse for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Sets permissions on a SQL warehouse. SQL warehouses can inherit permissions +from their root object. +} diff --git a/man/set_warehouse_workspace_config.Rd b/man/set_warehouse_workspace_config.Rd new file mode 100644 index 00000000..d6d8650c --- /dev/null +++ b/man/set_warehouse_workspace_config.Rd @@ -0,0 +1,58 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{set_warehouse_workspace_config} +\alias{set_warehouse_workspace_config} +\alias{warehousesSetWorkspaceWarehouseConfig} +\title{Set the workspace configuration.} +\usage{ +set_warehouse_workspace_config( + client, + channel = NULL, + config_param = NULL, + data_access_config = NULL, + enabled_warehouse_types = NULL, + global_param = NULL, + google_service_account = NULL, + instance_profile_arn = NULL, + security_policy = NULL, + sql_configuration_parameters = NULL +) + +warehousesSetWorkspaceWarehouseConfig( + client, + channel = NULL, + config_param = NULL, + data_access_config = NULL, + enabled_warehouse_types = NULL, + global_param = NULL, + google_service_account = NULL, + instance_profile_arn = NULL, + security_policy = NULL, + sql_configuration_parameters = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{channel}{Optional: Channel selection details.} + +\item{config_param}{Deprecated: Use sql_configuration_parameters.} + +\item{data_access_config}{Spark confs for external hive metastore configuration JSON serialized size must be less than <= 512K.} + +\item{enabled_warehouse_types}{List of Warehouse Types allowed in this workspace (limits allowed value of the type field in CreateWarehouse and EditWarehouse).} + +\item{global_param}{Deprecated: Use sql_configuration_parameters.} + +\item{google_service_account}{GCP only: Google Service Account used to pass to cluster to access Google Cloud Storage.} + +\item{instance_profile_arn}{AWS Only: Instance profile used to pass IAM role to the cluster.} + +\item{security_policy}{Security policy for warehouses.} + +\item{sql_configuration_parameters}{SQL configuration parameters.} +} +\description{ +Sets the workspace level configuration that is shared by all SQL warehouses +in a workspace. +} diff --git a/man/set_workspace_conf_status.Rd b/man/set_workspace_conf_status.Rd new file mode 100644 index 00000000..2aa36bb7 --- /dev/null +++ b/man/set_workspace_conf_status.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace_conf.R +\name{set_workspace_conf_status} +\alias{set_workspace_conf_status} +\alias{workspaceConfSetStatus} +\title{Enable/disable features.} +\usage{ +set_workspace_conf_status(client) + +workspaceConfSetStatus(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Sets the configuration status for a workspace, including enabling or +disabling it. +} diff --git a/man/share_permissions.Rd b/man/share_permissions.Rd new file mode 100644 index 00000000..56fb774e --- /dev/null +++ b/man/share_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/shares.R +\name{share_permissions} +\alias{share_permissions} +\alias{sharesSharePermissions} +\title{Get permissions.} +\usage{ +share_permissions(client, name) + +sharesSharePermissions(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the share.} +} +\description{ +Gets the permissions for a data share from the metastore. The caller must be +a metastore admin or the owner of the share. +} diff --git a/man/share_recipient_permissions.Rd b/man/share_recipient_permissions.Rd new file mode 100644 index 00000000..f29f90cd --- /dev/null +++ b/man/share_recipient_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/recipients.R +\name{share_recipient_permissions} +\alias{share_recipient_permissions} +\alias{recipientsSharePermissions} +\title{Get recipient share permissions.} +\usage{ +share_recipient_permissions(client, name) + +recipientsSharePermissions(client, name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the Recipient.} +} +\description{ +Gets the share permissions for the specified Recipient. The caller must be a +metastore admin or the owner of the Recipient. +} diff --git a/man/sharesCreate.Rd b/man/sharesCreate.Rd deleted file mode 100644 index 50182879..00000000 --- a/man/sharesCreate.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/shares.R -\name{sharesCreate} -\alias{sharesCreate} -\title{Create a share.} -\usage{ -sharesCreate(client, name, comment = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the share.} - -\item{comment}{User-provided free-form text description.} -} -\description{ -Creates a new share for data objects. Data objects can be added after -creation with \strong{update}. The caller must be a metastore admin or have the -\strong{CREATE_SHARE} privilege on the metastore. -} diff --git a/man/sharesDelete.Rd b/man/sharesDelete.Rd deleted file mode 100644 index d82f0ed3..00000000 --- a/man/sharesDelete.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/shares.R -\name{sharesDelete} -\alias{sharesDelete} -\title{Delete a share.} -\usage{ -sharesDelete(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the share.} -} -\description{ -Deletes a data object share from the metastore. The caller must be an owner -of the share. -} diff --git a/man/sharesGet.Rd b/man/sharesGet.Rd deleted file mode 100644 index 9883422f..00000000 --- a/man/sharesGet.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/shares.R -\name{sharesGet} -\alias{sharesGet} -\title{Get a share.} -\usage{ -sharesGet(client, name, include_shared_data = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the share.} - -\item{include_shared_data}{Query for data to include in the share.} -} -\description{ -Gets a data object share from the metastore. The caller must be a metastore -admin or the owner of the share. -} diff --git a/man/sharesList.Rd b/man/sharesList.Rd deleted file mode 100644 index bfa1dca4..00000000 --- a/man/sharesList.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/shares.R -\name{sharesList} -\alias{sharesList} -\title{List shares.} -\usage{ -sharesList(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of data object shares from the metastore. The caller must be a -metastore admin or the owner of the share. There is no guarantee of a -specific ordering of the elements in the array. -} diff --git a/man/sharesSharePermissions.Rd b/man/sharesSharePermissions.Rd deleted file mode 100644 index f0bf3ddb..00000000 --- a/man/sharesSharePermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/shares.R -\name{sharesSharePermissions} -\alias{sharesSharePermissions} -\title{Get permissions.} -\usage{ -sharesSharePermissions(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the share.} -} -\description{ -Gets the permissions for a data share from the metastore. The caller must be -a metastore admin or the owner of the share. -} diff --git a/man/sharesUpdate.Rd b/man/sharesUpdate.Rd deleted file mode 100644 index 40e20a47..00000000 --- a/man/sharesUpdate.Rd +++ /dev/null @@ -1,46 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/shares.R -\name{sharesUpdate} -\alias{sharesUpdate} -\title{Update a share.} -\usage{ -sharesUpdate( - client, - name, - comment = NULL, - new_name = NULL, - owner = NULL, - updates = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the share.} - -\item{comment}{User-provided free-form text description.} - -\item{new_name}{New name for the share.} - -\item{owner}{Username of current owner of share.} - -\item{updates}{Array of shared data object updates.} -} -\description{ -Updates the share with the changes and data objects in the request. The -caller must be the owner of the share or a metastore admin. -} -\details{ -When the caller is a metastore admin, only the \strong{owner} field can be -updated. - -In the case that the share name is changed, \strong{updateShare} requires that the -caller is both the share owner and a metastore admin. - -For each table that is added through this method, the share owner must also -have \strong{SELECT} privilege on the table. This privilege must be maintained -indefinitely for recipients to be able to access the table. Typically, you -should use a group as the share owner. - -Table removals through \strong{update} do not require additional privileges. -} diff --git a/man/sharesUpdatePermissions.Rd b/man/sharesUpdatePermissions.Rd deleted file mode 100644 index 969b1896..00000000 --- a/man/sharesUpdatePermissions.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/shares.R -\name{sharesUpdatePermissions} -\alias{sharesUpdatePermissions} -\title{Update permissions.} -\usage{ -sharesUpdatePermissions(client, name, changes = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the share.} - -\item{changes}{Array of permission changes.} -} -\description{ -Updates the permissions for a data share in the metastore. The caller must be -a metastore admin or an owner of the share. -} -\details{ -For new recipient grants, the user must also be the owner of the recipients. -recipient revocations do not require additional privileges. -} diff --git a/man/spark_cluster_versions.Rd b/man/spark_cluster_versions.Rd new file mode 100644 index 00000000..87bf2409 --- /dev/null +++ b/man/spark_cluster_versions.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{spark_cluster_versions} +\alias{spark_cluster_versions} +\alias{clustersSparkVersions} +\title{List available Spark versions.} +\usage{ +spark_cluster_versions(client) + +clustersSparkVersions(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Returns the list of available Spark versions. These versions can be used to +launch a cluster. +} diff --git a/man/start_cluster.Rd b/man/start_cluster.Rd new file mode 100644 index 00000000..7fc4fd3a --- /dev/null +++ b/man/start_cluster.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{start_cluster} +\alias{start_cluster} +\alias{clustersStart} +\title{Start terminated cluster.} +\usage{ +start_cluster(client, cluster_id) + +clustersStart(client, cluster_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster to be started.} +} +\description{ +Starts a terminated Spark cluster with the supplied ID. This works similar to +\code{createCluster} except: +} +\details{ +\itemize{ +\item The previous cluster id and attributes are preserved. * The cluster starts +with the last specified cluster size. * If the previous cluster was an +autoscaling cluster, the current cluster starts with the minimum number of +nodes. * If the cluster is not currently in a \code{TERMINATED} state, nothing +will happen. * Clusters launched to run a job cannot be started. +} +} diff --git a/man/start_cluster_and_wait.Rd b/man/start_cluster_and_wait.Rd new file mode 100644 index 00000000..5a363197 --- /dev/null +++ b/man/start_cluster_and_wait.Rd @@ -0,0 +1,39 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{start_cluster_and_wait} +\alias{start_cluster_and_wait} +\title{Start terminated cluster.} +\usage{ +start_cluster_and_wait( + client, + cluster_id, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster to be started.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Clusters on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Clusters is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Starts a terminated Spark cluster with the supplied ID. This works similar to +\code{createCluster} except: +\itemize{ +\item The previous cluster id and attributes are preserved. * The cluster starts +with the last specified cluster size. * If the previous cluster was an +autoscaling cluster, the current cluster starts with the minimum number of +nodes. * If the cluster is not currently in a \code{TERMINATED} state, nothing +will happen. * Clusters launched to run a job cannot be started. +} +} diff --git a/man/start_pipeline_update.Rd b/man/start_pipeline_update.Rd new file mode 100644 index 00000000..156b7472 --- /dev/null +++ b/man/start_pipeline_update.Rd @@ -0,0 +1,47 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{start_pipeline_update} +\alias{start_pipeline_update} +\alias{pipelinesStartUpdate} +\title{Start a pipeline.} +\usage{ +start_pipeline_update( + client, + pipeline_id, + cause = NULL, + full_refresh = NULL, + full_refresh_selection = NULL, + refresh_selection = NULL, + validate_only = NULL +) + +pipelinesStartUpdate( + client, + pipeline_id, + cause = NULL, + full_refresh = NULL, + full_refresh_selection = NULL, + refresh_selection = NULL, + validate_only = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Required. This field has no description yet.} + +\item{cause}{This field has no description yet.} + +\item{full_refresh}{If true, this update will reset all tables before running.} + +\item{full_refresh_selection}{A list of tables to update with fullRefresh.} + +\item{refresh_selection}{A list of tables to update without fullRefresh.} + +\item{validate_only}{If true, this update only validates the correctness of pipeline source code but does not materialize or publish any datasets.} +} +\description{ +Starts a new update for the pipeline. If there is already an active update +for the pipeline, the request will fail and the active update will remain +running. +} diff --git a/man/start_warehouse.Rd b/man/start_warehouse.Rd new file mode 100644 index 00000000..0081542d --- /dev/null +++ b/man/start_warehouse.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{start_warehouse} +\alias{start_warehouse} +\alias{warehousesStart} +\title{Start a warehouse.} +\usage{ +start_warehouse(client, id) + +warehousesStart(client, id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Required.} +} +\description{ +Starts a SQL warehouse. +} diff --git a/man/start_warehouse_and_wait.Rd b/man/start_warehouse_and_wait.Rd new file mode 100644 index 00000000..4b36384a --- /dev/null +++ b/man/start_warehouse_and_wait.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{start_warehouse_and_wait} +\alias{start_warehouse_and_wait} +\title{Start a warehouse.} +\usage{ +start_warehouse_and_wait(client, id, timeout = 20, callback = cli_reporter) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Required.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Warehouses on Databricks reach +RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Warehouses is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Starts a SQL warehouse. +} diff --git a/man/statementExecutionCancelExecution.Rd b/man/statementExecutionCancelExecution.Rd deleted file mode 100644 index ac5253e0..00000000 --- a/man/statementExecutionCancelExecution.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/statement_execution.R -\name{statementExecutionCancelExecution} -\alias{statementExecutionCancelExecution} -\title{Cancel statement execution.} -\usage{ -statementExecutionCancelExecution(client, statement_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{statement_id}{Required. The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls.} -} -\description{ -Requests that an executing statement be canceled. Callers must poll for -status to see the terminal state. -} diff --git a/man/statementExecutionExecuteStatement.Rd b/man/statementExecutionExecuteStatement.Rd deleted file mode 100644 index 8cfc6a14..00000000 --- a/man/statementExecutionExecuteStatement.Rd +++ /dev/null @@ -1,49 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/statement_execution.R -\name{statementExecutionExecuteStatement} -\alias{statementExecutionExecuteStatement} -\title{Execute a SQL statement.} -\usage{ -statementExecutionExecuteStatement( - client, - statement, - warehouse_id, - byte_limit = NULL, - catalog = NULL, - disposition = NULL, - format = NULL, - on_wait_timeout = NULL, - parameters = NULL, - row_limit = NULL, - schema = NULL, - wait_timeout = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{statement}{Required. The SQL statement to execute.} - -\item{warehouse_id}{Required. Warehouse upon which to execute a statement.} - -\item{byte_limit}{Applies the given byte limit to the statement's result size.} - -\item{catalog}{Sets default catalog for statement execution, similar to \href{https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-catalog.html}{\verb{USE CATALOG}} in SQL.} - -\item{disposition}{The fetch disposition provides two modes of fetching results: \code{INLINE} and \code{EXTERNAL_LINKS}.} - -\item{format}{Statement execution supports three result formats: \code{JSON_ARRAY} (default), \code{ARROW_STREAM}, and \code{CSV}.} - -\item{on_wait_timeout}{When \verb{wait_timeout > 0s}, the call will block up to the specified time.} - -\item{parameters}{A list of parameters to pass into a SQL statement containing parameter markers.} - -\item{row_limit}{Applies the given row limit to the statement's result set, but unlike the \code{LIMIT} clause in SQL, it also sets the \code{truncated} field in the response to indicate whether the result was trimmed due to the limit or not.} - -\item{schema}{Sets default schema for statement execution, similar to \href{https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-use-schema.html}{\verb{USE SCHEMA}} in SQL.} - -\item{wait_timeout}{The time in seconds the call will wait for the statement's result set as \code{Ns}, where \code{N} can be set to 0 or to a value between 5 and 50.} -} -\description{ -Execute a SQL statement. -} diff --git a/man/statementExecutionGetStatement.Rd b/man/statementExecutionGetStatement.Rd deleted file mode 100644 index 7969d25c..00000000 --- a/man/statementExecutionGetStatement.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/statement_execution.R -\name{statementExecutionGetStatement} -\alias{statementExecutionGetStatement} -\title{Get status, manifest, and result first chunk.} -\usage{ -statementExecutionGetStatement(client, statement_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{statement_id}{Required. The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls.} -} -\description{ -This request can be used to poll for the statement's status. When the -\code{status.state} field is \code{SUCCEEDED} it will also return the result manifest -and the first chunk of the result data. When the statement is in the terminal -states \code{CANCELED}, \code{CLOSED} or \code{FAILED}, it returns HTTP 200 with the state -set. After at least 12 hours in terminal state, the statement is removed from -the warehouse and further calls will receive an HTTP 404 response. -} -\details{ -\strong{NOTE} This call currently might take up to 5 seconds to get the latest -status and result. -} diff --git a/man/statementExecutionGetStatementResultChunkN.Rd b/man/statementExecutionGetStatementResultChunkN.Rd deleted file mode 100644 index 1b105f6c..00000000 --- a/man/statementExecutionGetStatementResultChunkN.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/statement_execution.R -\name{statementExecutionGetStatementResultChunkN} -\alias{statementExecutionGetStatementResultChunkN} -\title{Get result chunk by index.} -\usage{ -statementExecutionGetStatementResultChunkN(client, statement_id, chunk_index) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{statement_id}{Required. The statement ID is returned upon successfully submitting a SQL statement, and is a required reference for all subsequent calls.} - -\item{chunk_index}{Required. This field has no description yet.} -} -\description{ -After the statement execution has \code{SUCCEEDED}, this request can be used to -fetch any chunk by index. Whereas the first chunk with \code{chunk_index=0} is -typically fetched with :method:statementexecution/executeStatement or -:method:statementexecution/getStatement, this request can be used to fetch -subsequent chunks. The response structure is identical to the nested \code{result} -element described in the :method:statementexecution/getStatement request, and -similarly includes the \code{next_chunk_index} and \code{next_chunk_internal_link} -fields for simple iteration through the result set. -} diff --git a/man/stop_pipeline.Rd b/man/stop_pipeline.Rd new file mode 100644 index 00000000..0f31b96f --- /dev/null +++ b/man/stop_pipeline.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{stop_pipeline} +\alias{stop_pipeline} +\alias{pipelinesStop} +\title{Stop a pipeline.} +\usage{ +stop_pipeline(client, pipeline_id) + +pipelinesStop(client, pipeline_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Required. This field has no description yet.} +} +\description{ +Stops the pipeline by canceling the active update. If there is no active +update for the pipeline, this request is a no-op. +} diff --git a/man/stop_pipeline_and_wait.Rd b/man/stop_pipeline_and_wait.Rd new file mode 100644 index 00000000..49125616 --- /dev/null +++ b/man/stop_pipeline_and_wait.Rd @@ -0,0 +1,32 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{stop_pipeline_and_wait} +\alias{stop_pipeline_and_wait} +\title{Stop a pipeline.} +\usage{ +stop_pipeline_and_wait( + client, + pipeline_id, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Required. This field has no description yet.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Pipelines on Databricks reach +IDLE state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Pipelines is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Stops the pipeline by canceling the active update. If there is no active +update for the pipeline, this request is a no-op. +} diff --git a/man/stop_warehouse.Rd b/man/stop_warehouse.Rd new file mode 100644 index 00000000..67214ddf --- /dev/null +++ b/man/stop_warehouse.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{stop_warehouse} +\alias{stop_warehouse} +\alias{warehousesStop} +\title{Stop a warehouse.} +\usage{ +stop_warehouse(client, id) + +warehousesStop(client, id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Required.} +} +\description{ +Stops a SQL warehouse. +} diff --git a/man/stop_warehouse_and_wait.Rd b/man/stop_warehouse_and_wait.Rd new file mode 100644 index 00000000..82baf52a --- /dev/null +++ b/man/stop_warehouse_and_wait.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{stop_warehouse_and_wait} +\alias{stop_warehouse_and_wait} +\title{Stop a warehouse.} +\usage{ +stop_warehouse_and_wait(client, id, timeout = 20, callback = cli_reporter) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Required.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Warehouses on Databricks reach +STOPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Warehouses is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Stops a SQL warehouse. +} diff --git a/man/storageCredentialsCreate.Rd b/man/storageCredentialsCreate.Rd deleted file mode 100644 index bd0cbba9..00000000 --- a/man/storageCredentialsCreate.Rd +++ /dev/null @@ -1,43 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/storage_credentials.R -\name{storageCredentialsCreate} -\alias{storageCredentialsCreate} -\title{Create a storage credential.} -\usage{ -storageCredentialsCreate( - client, - name, - aws_iam_role = NULL, - azure_managed_identity = NULL, - azure_service_principal = NULL, - cloudflare_api_token = NULL, - comment = NULL, - databricks_gcp_service_account = NULL, - read_only = NULL, - skip_validation = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The credential name.} - -\item{aws_iam_role}{The AWS IAM role configuration.} - -\item{azure_managed_identity}{The Azure managed identity configuration.} - -\item{azure_service_principal}{The Azure service principal configuration.} - -\item{cloudflare_api_token}{The Cloudflare API token configuration.} - -\item{comment}{Comment associated with the credential.} - -\item{databricks_gcp_service_account}{The \if{html}{\out{}} managed GCP service account configuration.} - -\item{read_only}{Whether the storage credential is only usable for read operations.} - -\item{skip_validation}{Supplying true to this argument skips validation of the created credential.} -} -\description{ -Creates a new storage credential. -} diff --git a/man/storageCredentialsDelete.Rd b/man/storageCredentialsDelete.Rd deleted file mode 100644 index f0ef698a..00000000 --- a/man/storageCredentialsDelete.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/storage_credentials.R -\name{storageCredentialsDelete} -\alias{storageCredentialsDelete} -\title{Delete a credential.} -\usage{ -storageCredentialsDelete(client, name, force = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the storage credential.} - -\item{force}{Force deletion even if there are dependent external locations or external tables.} -} -\description{ -Deletes a storage credential from the metastore. The caller must be an owner -of the storage credential. -} diff --git a/man/storageCredentialsGet.Rd b/man/storageCredentialsGet.Rd deleted file mode 100644 index a30d93d2..00000000 --- a/man/storageCredentialsGet.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/storage_credentials.R -\name{storageCredentialsGet} -\alias{storageCredentialsGet} -\title{Get a credential.} -\usage{ -storageCredentialsGet(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the storage credential.} -} -\description{ -Gets a storage credential from the metastore. The caller must be a metastore -admin, the owner of the storage credential, or have some permission on the -storage credential. -} diff --git a/man/storageCredentialsList.Rd b/man/storageCredentialsList.Rd deleted file mode 100644 index bed4e5b6..00000000 --- a/man/storageCredentialsList.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/storage_credentials.R -\name{storageCredentialsList} -\alias{storageCredentialsList} -\title{List credentials.} -\usage{ -storageCredentialsList(client, max_results = NULL, page_token = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{max_results}{Maximum number of storage credentials to return.} - -\item{page_token}{Opaque pagination token to go to next page based on previous query.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of storage credentials (as \strong{StorageCredentialInfo} objects). -The array is limited to only those storage credentials the caller has -permission to access. If the caller is a metastore admin, retrieval of -credentials is unrestricted. There is no guarantee of a specific ordering of -the elements in the array. -} diff --git a/man/storageCredentialsUpdate.Rd b/man/storageCredentialsUpdate.Rd deleted file mode 100644 index d6e718c0..00000000 --- a/man/storageCredentialsUpdate.Rd +++ /dev/null @@ -1,52 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/storage_credentials.R -\name{storageCredentialsUpdate} -\alias{storageCredentialsUpdate} -\title{Update a credential.} -\usage{ -storageCredentialsUpdate( - client, - name, - aws_iam_role = NULL, - azure_managed_identity = NULL, - azure_service_principal = NULL, - cloudflare_api_token = NULL, - comment = NULL, - databricks_gcp_service_account = NULL, - force = NULL, - new_name = NULL, - owner = NULL, - read_only = NULL, - skip_validation = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the storage credential.} - -\item{aws_iam_role}{The AWS IAM role configuration.} - -\item{azure_managed_identity}{The Azure managed identity configuration.} - -\item{azure_service_principal}{The Azure service principal configuration.} - -\item{cloudflare_api_token}{The Cloudflare API token configuration.} - -\item{comment}{Comment associated with the credential.} - -\item{databricks_gcp_service_account}{The \if{html}{\out{}} managed GCP service account configuration.} - -\item{force}{Force update even if there are dependent external locations or external tables.} - -\item{new_name}{New name for the storage credential.} - -\item{owner}{Username of current owner of credential.} - -\item{read_only}{Whether the storage credential is only usable for read operations.} - -\item{skip_validation}{Supplying true to this argument skips validation of the updated credential.} -} -\description{ -Updates a storage credential on the metastore. -} diff --git a/man/storageCredentialsValidate.Rd b/man/storageCredentialsValidate.Rd deleted file mode 100644 index 8f582cef..00000000 --- a/man/storageCredentialsValidate.Rd +++ /dev/null @@ -1,55 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/storage_credentials.R -\name{storageCredentialsValidate} -\alias{storageCredentialsValidate} -\title{Validate a storage credential.} -\usage{ -storageCredentialsValidate( - client, - aws_iam_role = NULL, - azure_managed_identity = NULL, - azure_service_principal = NULL, - cloudflare_api_token = NULL, - databricks_gcp_service_account = NULL, - external_location_name = NULL, - read_only = NULL, - storage_credential_name = NULL, - url = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{aws_iam_role}{The AWS IAM role configuration.} - -\item{azure_managed_identity}{The Azure managed identity configuration.} - -\item{azure_service_principal}{The Azure service principal configuration.} - -\item{cloudflare_api_token}{The Cloudflare API token configuration.} - -\item{databricks_gcp_service_account}{The Databricks created GCP service account configuration.} - -\item{external_location_name}{The name of an existing external location to validate.} - -\item{read_only}{Whether the storage credential is only usable for read operations.} - -\item{storage_credential_name}{The name of the storage credential to validate.} - -\item{url}{The external location url to validate.} -} -\description{ -Validates a storage credential. At least one of \strong{external_location_name} -and \strong{url} need to be provided. If only one of them is provided, it will be -used for validation. And if both are provided, the \strong{url} will be used for -validation, and \strong{external_location_name} will be ignored when checking -overlapping urls. -} -\details{ -Either the \strong{storage_credential_name} or the cloud-specific credential must -be provided. - -The caller must be a metastore admin or the storage credential owner or have -the \strong{CREATE_EXTERNAL_LOCATION} privilege on the metastore and the storage -credential. -} diff --git a/man/submit_job.Rd b/man/submit_job.Rd new file mode 100644 index 00000000..d826b034 --- /dev/null +++ b/man/submit_job.Rd @@ -0,0 +1,68 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{submit_job} +\alias{submit_job} +\alias{jobsSubmit} +\title{Create and trigger a one-time run.} +\usage{ +submit_job( + client, + access_control_list = NULL, + email_notifications = NULL, + git_source = NULL, + health = NULL, + idempotency_token = NULL, + notification_settings = NULL, + queue = NULL, + run_name = NULL, + tasks = NULL, + timeout_seconds = NULL, + webhook_notifications = NULL +) + +jobsSubmit( + client, + access_control_list = NULL, + email_notifications = NULL, + git_source = NULL, + health = NULL, + idempotency_token = NULL, + notification_settings = NULL, + queue = NULL, + run_name = NULL, + tasks = NULL, + timeout_seconds = NULL, + webhook_notifications = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{access_control_list}{List of permissions to set on the job.} + +\item{email_notifications}{An optional set of email addresses notified when the run begins or completes.} + +\item{git_source}{An optional specification for a remote Git repository containing the source code used by tasks.} + +\item{health}{An optional set of health rules that can be defined for this job.} + +\item{idempotency_token}{An optional token that can be used to guarantee the idempotency of job run requests.} + +\item{notification_settings}{Optional notification settings that are used when sending notifications to each of the \code{email_notifications} and \code{webhook_notifications} for this run.} + +\item{queue}{The queue settings of the one-time run.} + +\item{run_name}{An optional name for the run.} + +\item{tasks}{This field has no description yet.} + +\item{timeout_seconds}{An optional timeout applied to each run of this job.} + +\item{webhook_notifications}{A collection of system notification IDs to notify when the run begins or completes.} +} +\description{ +Submit a one-time run. This endpoint allows you to submit a workload directly +without creating a job. Runs submitted using this endpoint don’t display in +the UI. Use the \code{jobs/runs/get} API to check the run state after the job is +submitted. +} diff --git a/man/submit_job_and_wait.Rd b/man/submit_job_and_wait.Rd new file mode 100644 index 00000000..883325a4 --- /dev/null +++ b/man/submit_job_and_wait.Rd @@ -0,0 +1,64 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{submit_job_and_wait} +\alias{submit_job_and_wait} +\title{Create and trigger a one-time run.} +\usage{ +submit_job_and_wait( + client, + access_control_list = NULL, + email_notifications = NULL, + git_source = NULL, + health = NULL, + idempotency_token = NULL, + notification_settings = NULL, + queue = NULL, + run_name = NULL, + tasks = NULL, + timeout_seconds = NULL, + webhook_notifications = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{access_control_list}{List of permissions to set on the job.} + +\item{email_notifications}{An optional set of email addresses notified when the run begins or completes.} + +\item{git_source}{An optional specification for a remote Git repository containing the source code used by tasks.} + +\item{health}{An optional set of health rules that can be defined for this job.} + +\item{idempotency_token}{An optional token that can be used to guarantee the idempotency of job run requests.} + +\item{notification_settings}{Optional notification settings that are used when sending notifications to each of the \code{email_notifications} and \code{webhook_notifications} for this run.} + +\item{queue}{The queue settings of the one-time run.} + +\item{run_name}{An optional name for the run.} + +\item{tasks}{This field has no description yet.} + +\item{timeout_seconds}{An optional timeout applied to each run of this job.} + +\item{webhook_notifications}{A collection of system notification IDs to notify when the run begins or completes.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Jobs on Databricks reach +TERMINATED or SKIPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Jobs is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Submit a one-time run. This endpoint allows you to submit a workload directly +without creating a job. Runs submitted using this endpoint don’t display in +the UI. Use the \code{jobs/runs/get} API to check the run state after the job is +submitted. +} diff --git a/man/summary_metastore.Rd b/man/summary_metastore.Rd new file mode 100644 index 00000000..43dfe512 --- /dev/null +++ b/man/summary_metastore.Rd @@ -0,0 +1,18 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/metastores.R +\name{summary_metastore} +\alias{summary_metastore} +\alias{metastoresSummary} +\title{Get a metastore summary.} +\usage{ +summary_metastore(client) + +metastoresSummary(client) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} +} +\description{ +Gets information about a metastore. This summary includes the storage +credential, the cloud vendor, the cloud region, and the global metastore ID. +} diff --git a/man/sync_vector_search_index.Rd b/man/sync_vector_search_index.Rd new file mode 100644 index 00000000..96d80071 --- /dev/null +++ b/man/sync_vector_search_index.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_indexes.R +\name{sync_vector_search_index} +\alias{sync_vector_search_index} +\alias{vectorSearchIndexesSyncIndex} +\title{Synchronize an index.} +\usage{ +sync_vector_search_index(client, index_name) + +vectorSearchIndexesSyncIndex(client, index_name) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{index_name}{Required. Name of the vector index to synchronize.} +} +\description{ +Triggers a synchronization process for a specified vector index. +} diff --git a/man/systemSchemasDisable.Rd b/man/systemSchemasDisable.Rd deleted file mode 100644 index d82ddf74..00000000 --- a/man/systemSchemasDisable.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/system_schemas.R -\name{systemSchemasDisable} -\alias{systemSchemasDisable} -\title{Disable a system schema.} -\usage{ -systemSchemasDisable(client, metastore_id, schema_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{metastore_id}{Required. The metastore ID under which the system schema lives.} - -\item{schema_name}{Required. Full name of the system schema.} -} -\description{ -Disables the system schema and removes it from the system catalog. The caller -must be an account admin or a metastore admin. -} diff --git a/man/systemSchemasEnable.Rd b/man/systemSchemasEnable.Rd deleted file mode 100644 index 29454a79..00000000 --- a/man/systemSchemasEnable.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/system_schemas.R -\name{systemSchemasEnable} -\alias{systemSchemasEnable} -\title{Enable a system schema.} -\usage{ -systemSchemasEnable(client, metastore_id, schema_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{metastore_id}{Required. The metastore ID under which the system schema lives.} - -\item{schema_name}{Required. Full name of the system schema.} -} -\description{ -Enables the system schema and adds it to the system catalog. The caller must -be an account admin or a metastore admin. -} diff --git a/man/systemSchemasList.Rd b/man/systemSchemasList.Rd deleted file mode 100644 index e57a8f10..00000000 --- a/man/systemSchemasList.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/system_schemas.R -\name{systemSchemasList} -\alias{systemSchemasList} -\title{List system schemas.} -\usage{ -systemSchemasList(client, metastore_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{metastore_id}{Required. The ID for the metastore in which the system schema resides.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of system schemas for a metastore. The caller must be an -account admin or a metastore admin. -} diff --git a/man/tableConstraintsCreate.Rd b/man/tableConstraintsCreate.Rd deleted file mode 100644 index 424303e3..00000000 --- a/man/tableConstraintsCreate.Rd +++ /dev/null @@ -1,28 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/table_constraints.R -\name{tableConstraintsCreate} -\alias{tableConstraintsCreate} -\title{Create a table constraint.} -\usage{ -tableConstraintsCreate(client, full_name_arg, constraint) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name_arg}{Required. The full name of the table referenced by the constraint.} - -\item{constraint}{Required. A table constraint, as defined by \emph{one} of the following fields being set: \strong{primary_key_constraint}, \strong{foreign_key_constraint}, \strong{named_table_constraint}.} -} -\description{ -Creates a new table constraint. -} -\details{ -For the table constraint creation to succeed, the user must satisfy both of -these conditions: - the user must have the \strong{USE_CATALOG} privilege on the -table's parent catalog, the \strong{USE_SCHEMA} privilege on the table's parent -schema, and be the owner of the table. - if the new constraint is a -\strong{ForeignKeyConstraint}, the user must have the \strong{USE_CATALOG} privilege on -the referenced parent table's catalog, the \strong{USE_SCHEMA} privilege on the -referenced parent table's schema, and be the owner of the referenced parent -table. -} diff --git a/man/tableConstraintsDelete.Rd b/man/tableConstraintsDelete.Rd deleted file mode 100644 index c868db82..00000000 --- a/man/tableConstraintsDelete.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/table_constraints.R -\name{tableConstraintsDelete} -\alias{tableConstraintsDelete} -\title{Delete a table constraint.} -\usage{ -tableConstraintsDelete(client, full_name, constraint_name, cascade) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the table referenced by the constraint.} - -\item{constraint_name}{Required. The name of the constraint to delete.} - -\item{cascade}{Required. If true, try deleting all child constraints of the current constraint.} -} -\description{ -Deletes a table constraint. -} -\details{ -For the table constraint deletion to succeed, the user must satisfy both of -these conditions: - the user must have the \strong{USE_CATALOG} privilege on the -table's parent catalog, the \strong{USE_SCHEMA} privilege on the table's parent -schema, and be the owner of the table. - if \strong{cascade} argument is \strong{true}, -the user must have the following permissions on all of the child tables: the -\strong{USE_CATALOG} privilege on the table's catalog, the \strong{USE_SCHEMA} -privilege on the table's schema, and be the owner of the table. -} diff --git a/man/tablesDelete.Rd b/man/tablesDelete.Rd deleted file mode 100644 index 345eb0aa..00000000 --- a/man/tablesDelete.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tables.R -\name{tablesDelete} -\alias{tablesDelete} -\title{Delete a table.} -\usage{ -tablesDelete(client, full_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the table.} -} -\description{ -Deletes a table from the specified parent catalog and schema. The caller must -be the owner of the parent catalog, have the \strong{USE_CATALOG} privilege on the -parent catalog and be the owner of the parent schema, or be the owner of the -table and have the \strong{USE_CATALOG} privilege on the parent catalog and the -\strong{USE_SCHEMA} privilege on the parent schema. -} diff --git a/man/tablesExists.Rd b/man/tablesExists.Rd deleted file mode 100644 index 5d377678..00000000 --- a/man/tablesExists.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tables.R -\name{tablesExists} -\alias{tablesExists} -\title{Get boolean reflecting if table exists.} -\usage{ -tablesExists(client, full_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the table.} -} -\description{ -Gets if a table exists in the metastore for a specific catalog and schema. -The caller must satisfy one of the following requirements: * Be a metastore -admin * Be the owner of the parent catalog * Be the owner of the parent -schema and have the USE_CATALOG privilege on the parent catalog * Have the -\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} -privilege on the parent schema, and either be the table owner or have the -SELECT privilege on the table. * Have BROWSE privilege on the parent catalog -\itemize{ -\item Have BROWSE privilege on the parent schema. -} -} diff --git a/man/tablesGet.Rd b/man/tablesGet.Rd deleted file mode 100644 index 218e4b54..00000000 --- a/man/tablesGet.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tables.R -\name{tablesGet} -\alias{tablesGet} -\title{Get a table.} -\usage{ -tablesGet( - client, - full_name, - include_browse = NULL, - include_delta_metadata = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the table.} - -\item{include_browse}{Whether to include tables in the response for which the principal can only access selective metadata for.} - -\item{include_delta_metadata}{Whether delta metadata should be included in the response.} -} -\description{ -Gets a table from the metastore for a specific catalog and schema. The caller -must satisfy one of the following requirements: * Be a metastore admin * Be -the owner of the parent catalog * Be the owner of the parent schema and have -the USE_CATALOG privilege on the parent catalog * Have the \strong{USE_CATALOG} -privilege on the parent catalog and the \strong{USE_SCHEMA} privilege on the -parent schema, and either be the table owner or have the SELECT privilege on -the table. -} diff --git a/man/tablesList.Rd b/man/tablesList.Rd deleted file mode 100644 index 21c77150..00000000 --- a/man/tablesList.Rd +++ /dev/null @@ -1,48 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tables.R -\name{tablesList} -\alias{tablesList} -\title{List tables.} -\usage{ -tablesList( - client, - catalog_name, - schema_name, - include_browse = NULL, - include_delta_metadata = NULL, - max_results = NULL, - omit_columns = NULL, - omit_properties = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{catalog_name}{Required. Name of parent catalog for tables of interest.} - -\item{schema_name}{Required. Parent schema of tables.} - -\item{include_browse}{Whether to include tables in the response for which the principal can only access selective metadata for.} - -\item{include_delta_metadata}{Whether delta metadata should be included in the response.} - -\item{max_results}{Maximum number of tables to return.} - -\item{omit_columns}{Whether to omit the columns of the table from the response or not.} - -\item{omit_properties}{Whether to omit the properties of the table from the response or not.} - -\item{page_token}{Opaque token to send for the next page of results (pagination).} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of all tables for the current metastore under the parent -catalog and schema. The caller must be a metastore admin or an owner of (or -have the \strong{SELECT} privilege on) the table. For the latter case, the caller -must also be the owner or have the \strong{USE_CATALOG} privilege on the parent -catalog and the \strong{USE_SCHEMA} privilege on the parent schema. There is no -guarantee of a specific ordering of the elements in the array. -} diff --git a/man/tablesListSummaries.Rd b/man/tablesListSummaries.Rd deleted file mode 100644 index 70ab2d3f..00000000 --- a/man/tablesListSummaries.Rd +++ /dev/null @@ -1,47 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tables.R -\name{tablesListSummaries} -\alias{tablesListSummaries} -\title{List table summaries.} -\usage{ -tablesListSummaries( - client, - catalog_name, - max_results = NULL, - page_token = NULL, - schema_name_pattern = NULL, - table_name_pattern = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{catalog_name}{Required. Name of parent catalog for tables of interest.} - -\item{max_results}{Maximum number of summaries for tables to return.} - -\item{page_token}{Opaque pagination token to go to next page based on previous query.} - -\item{schema_name_pattern}{A sql LIKE pattern (\% and _) for schema names.} - -\item{table_name_pattern}{A sql LIKE pattern (\% and _) for table names.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of summaries for tables for a schema and catalog within the -metastore. The table summaries returned are either: -} -\details{ -\itemize{ -\item summaries for tables (within the current metastore and parent catalog and -schema), when the user is a metastore admin, or: * summaries for tables and -schemas (within the current metastore and parent catalog) for which the user -has ownership or the \strong{SELECT} privilege on the table and ownership or -\strong{USE_SCHEMA} privilege on the schema, provided that the user also has -ownership or the \strong{USE_CATALOG} privilege on the parent catalog. -} - -There is no guarantee of a specific ordering of the elements in the array. -} diff --git a/man/tablesUpdate.Rd b/man/tablesUpdate.Rd deleted file mode 100644 index 04a3960b..00000000 --- a/man/tablesUpdate.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tables.R -\name{tablesUpdate} -\alias{tablesUpdate} -\title{Update a table owner.} -\usage{ -tablesUpdate(client, full_name, owner = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{full_name}{Required. Full name of the table.} - -\item{owner}{This field has no description yet.} -} -\description{ -Change the owner of the table. The caller must be the owner of the parent -catalog, have the \strong{USE_CATALOG} privilege on the parent catalog and be the -owner of the parent schema, or be the owner of the table and have the -\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} -privilege on the parent schema. -} diff --git a/man/test_model_registry_webhook.Rd b/man/test_model_registry_webhook.Rd new file mode 100644 index 00000000..5dad6cc8 --- /dev/null +++ b/man/test_model_registry_webhook.Rd @@ -0,0 +1,24 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{test_model_registry_webhook} +\alias{test_model_registry_webhook} +\alias{modelRegistryTestRegistryWebhook} +\title{Test a webhook.} +\usage{ +test_model_registry_webhook(client, id, event = NULL) + +modelRegistryTestRegistryWebhook(client, id, event = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Webhook ID.} + +\item{event}{If \code{event} is specified, the test trigger uses the specified event.} +} +\description{ +\strong{NOTE:} This endpoint is in Public Preview. +} +\details{ +Tests a registry webhook. +} diff --git a/man/tokenManagementCreateOboToken.Rd b/man/tokenManagementCreateOboToken.Rd deleted file mode 100644 index 5f042710..00000000 --- a/man/tokenManagementCreateOboToken.Rd +++ /dev/null @@ -1,25 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/token_management.R -\name{tokenManagementCreateOboToken} -\alias{tokenManagementCreateOboToken} -\title{Create on-behalf token.} -\usage{ -tokenManagementCreateOboToken( - client, - application_id, - comment = NULL, - lifetime_seconds = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{application_id}{Required. Application ID of the service principal.} - -\item{comment}{Comment that describes the purpose of the token.} - -\item{lifetime_seconds}{The number of seconds before the token expires.} -} -\description{ -Creates a token on behalf of a service principal. -} diff --git a/man/tokenManagementDelete.Rd b/man/tokenManagementDelete.Rd deleted file mode 100644 index ea80bc2b..00000000 --- a/man/tokenManagementDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/token_management.R -\name{tokenManagementDelete} -\alias{tokenManagementDelete} -\title{Delete a token.} -\usage{ -tokenManagementDelete(client, token_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{token_id}{Required. The ID of the token to get.} -} -\description{ -Deletes a token, specified by its ID. -} diff --git a/man/tokenManagementGet.Rd b/man/tokenManagementGet.Rd deleted file mode 100644 index 45be6f2d..00000000 --- a/man/tokenManagementGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/token_management.R -\name{tokenManagementGet} -\alias{tokenManagementGet} -\title{Get token info.} -\usage{ -tokenManagementGet(client, token_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{token_id}{Required. The ID of the token to get.} -} -\description{ -Gets information about a token, specified by its ID. -} diff --git a/man/tokenManagementGetPermissionLevels.Rd b/man/tokenManagementGetPermissionLevels.Rd deleted file mode 100644 index 4ed42b6e..00000000 --- a/man/tokenManagementGetPermissionLevels.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/token_management.R -\name{tokenManagementGetPermissionLevels} -\alias{tokenManagementGetPermissionLevels} -\title{Get token permission levels.} -\usage{ -tokenManagementGetPermissionLevels(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/tokenManagementGetPermissions.Rd b/man/tokenManagementGetPermissions.Rd deleted file mode 100644 index c2fee2d0..00000000 --- a/man/tokenManagementGetPermissions.Rd +++ /dev/null @@ -1,15 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/token_management.R -\name{tokenManagementGetPermissions} -\alias{tokenManagementGetPermissions} -\title{Get token permissions.} -\usage{ -tokenManagementGetPermissions(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Gets the permissions of all tokens. Tokens can inherit permissions from their -root object. -} diff --git a/man/tokenManagementList.Rd b/man/tokenManagementList.Rd deleted file mode 100644 index 8fc836c7..00000000 --- a/man/tokenManagementList.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/token_management.R -\name{tokenManagementList} -\alias{tokenManagementList} -\title{List all tokens.} -\usage{ -tokenManagementList(client, created_by_id = NULL, created_by_username = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{created_by_id}{User ID of the user that created the token.} - -\item{created_by_username}{Username of the user that created the token.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Lists all tokens associated with the specified workspace or user. -} diff --git a/man/tokenManagementSetPermissions.Rd b/man/tokenManagementSetPermissions.Rd deleted file mode 100644 index fffdecd6..00000000 --- a/man/tokenManagementSetPermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/token_management.R -\name{tokenManagementSetPermissions} -\alias{tokenManagementSetPermissions} -\title{Set token permissions.} -\usage{ -tokenManagementSetPermissions(client, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on all tokens. Tokens can inherit permissions from their -root object. -} diff --git a/man/tokenManagementUpdatePermissions.Rd b/man/tokenManagementUpdatePermissions.Rd deleted file mode 100644 index b6fd0bb0..00000000 --- a/man/tokenManagementUpdatePermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/token_management.R -\name{tokenManagementUpdatePermissions} -\alias{tokenManagementUpdatePermissions} -\title{Update token permissions.} -\usage{ -tokenManagementUpdatePermissions(client, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on all tokens. Tokens can inherit permissions from -their root object. -} diff --git a/man/tokensCreate.Rd b/man/tokensCreate.Rd deleted file mode 100644 index 265d8e4b..00000000 --- a/man/tokensCreate.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tokens.R -\name{tokensCreate} -\alias{tokensCreate} -\title{Create a user token.} -\usage{ -tokensCreate(client, comment = NULL, lifetime_seconds = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{comment}{Optional description to attach to the token.} - -\item{lifetime_seconds}{The lifetime of the token, in seconds.} -} -\description{ -Creates and returns a token for a user. If this call is made through token -authentication, it creates a token with the same client ID as the -authenticated token. If the user's token quota is exceeded, this call returns -an error \strong{QUOTA_EXCEEDED}. -} diff --git a/man/tokensDelete.Rd b/man/tokensDelete.Rd deleted file mode 100644 index 6d97c796..00000000 --- a/man/tokensDelete.Rd +++ /dev/null @@ -1,20 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tokens.R -\name{tokensDelete} -\alias{tokensDelete} -\title{Revoke token.} -\usage{ -tokensDelete(client, token_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{token_id}{Required. The ID of the token to be revoked.} -} -\description{ -Revokes an access token. -} -\details{ -If a token with the specified ID is not valid, this call returns an error -\strong{RESOURCE_DOES_NOT_EXIST}. -} diff --git a/man/tokensList.Rd b/man/tokensList.Rd deleted file mode 100644 index a989b9eb..00000000 --- a/man/tokensList.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/tokens.R -\name{tokensList} -\alias{tokensList} -\title{List tokens.} -\usage{ -tokensList(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Lists all the valid tokens for a user-workspace pair. -} diff --git a/man/transfer_dbsql_permission_ownership.Rd b/man/transfer_dbsql_permission_ownership.Rd new file mode 100644 index 00000000..2f944c31 --- /dev/null +++ b/man/transfer_dbsql_permission_ownership.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dbsql_permissions.R +\name{transfer_dbsql_permission_ownership} +\alias{transfer_dbsql_permission_ownership} +\alias{dbsqlPermissionsTransferOwnership} +\title{Transfer object ownership.} +\usage{ +transfer_dbsql_permission_ownership( + client, + object_type, + object_id, + new_owner = NULL +) + +dbsqlPermissionsTransferOwnership( + client, + object_type, + object_id, + new_owner = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{object_type}{Required. The type of object on which to change ownership.} + +\item{object_id}{Required. The ID of the object on which to change ownership.} + +\item{new_owner}{Email address for the new owner, who must exist in the workspace.} +} +\description{ +Transfers ownership of a dashboard, query, or alert to an active user. +Requires an admin API key. +} diff --git a/man/transition_model_stage.Rd b/man/transition_model_stage.Rd new file mode 100644 index 00000000..21733ae8 --- /dev/null +++ b/man/transition_model_stage.Rd @@ -0,0 +1,46 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{transition_model_stage} +\alias{transition_model_stage} +\alias{modelRegistryTransitionStage} +\title{Transition a stage.} +\usage{ +transition_model_stage( + client, + name, + version, + stage, + archive_existing_versions, + comment = NULL +) + +modelRegistryTransitionStage( + client, + name, + version, + stage, + archive_existing_versions, + comment = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the model.} + +\item{version}{Required. Version of the model.} + +\item{stage}{Required. Target stage of the transition.} + +\item{archive_existing_versions}{Required. Specifies whether to archive all current model versions in the target stage.} + +\item{comment}{User-provided comment on the action.} +} +\description{ +Transition a model version's stage. This is a Databricks workspace version of +the MLflow endpoint that also accepts a comment associated with the +transition to be recorded.', +} +\details{ +MLflow endpoint: https://www.mlflow.org/docs/latest/rest-api.html#transition-modelversion-stage +} diff --git a/man/trash_lakeview.Rd b/man/trash_lakeview.Rd new file mode 100644 index 00000000..4895b65f --- /dev/null +++ b/man/trash_lakeview.Rd @@ -0,0 +1,19 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakeview.R +\name{trash_lakeview} +\alias{trash_lakeview} +\alias{lakeviewTrash} +\title{Trash dashboard.} +\usage{ +trash_lakeview(client, dashboard_id) + +lakeviewTrash(client, dashboard_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{dashboard_id}{Required. UUID identifying the dashboard.} +} +\description{ +Trash a dashboard. +} diff --git a/man/unassign_metastore.Rd b/man/unassign_metastore.Rd new file mode 100644 index 00000000..737e53d4 --- /dev/null +++ b/man/unassign_metastore.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/metastores.R +\name{unassign_metastore} +\alias{unassign_metastore} +\alias{metastoresUnassign} +\title{Delete an assignment.} +\usage{ +unassign_metastore(client, workspace_id, metastore_id) + +metastoresUnassign(client, workspace_id, metastore_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{workspace_id}{Required. A workspace ID.} + +\item{metastore_id}{Required. Query for the ID of the metastore to delete.} +} +\description{ +Deletes a metastore assignment. The caller must be an account administrator. +} diff --git a/man/uninstall_cluster_library.Rd b/man/uninstall_cluster_library.Rd new file mode 100644 index 00000000..75cca3ee --- /dev/null +++ b/man/uninstall_cluster_library.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/libraries.R +\name{uninstall_cluster_library} +\alias{uninstall_cluster_library} +\alias{librariesUninstall} +\title{Uninstall libraries.} +\usage{ +uninstall_cluster_library(client, cluster_id, libraries) + +librariesUninstall(client, cluster_id, libraries) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. Unique identifier for the cluster on which to uninstall these libraries.} + +\item{libraries}{Required. The libraries to uninstall.} +} +\description{ +Set libraries to be uninstalled on a cluster. The libraries won't be +uninstalled until the cluster is restarted. Uninstalling libraries that are +not installed on the cluster will have no impact but is not an error. +} diff --git a/man/unpin_cluster.Rd b/man/unpin_cluster.Rd new file mode 100644 index 00000000..7823b8e3 --- /dev/null +++ b/man/unpin_cluster.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{unpin_cluster} +\alias{unpin_cluster} +\alias{clustersUnpin} +\title{Unpin cluster.} +\usage{ +unpin_cluster(client, cluster_id) + +clustersUnpin(client, cluster_id) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. \if{html}{\out{}}.} +} +\description{ +Unpinning a cluster will allow the cluster to eventually be removed from the +ListClusters API. Unpinning a cluster that is not pinned will have no effect. +This API can only be called by workspace admins. +} diff --git a/man/update_account_access_control_proxy_rule_set.Rd b/man/update_account_access_control_proxy_rule_set.Rd new file mode 100644 index 00000000..e33dac35 --- /dev/null +++ b/man/update_account_access_control_proxy_rule_set.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/account_access_control_proxy.R +\name{update_account_access_control_proxy_rule_set} +\alias{update_account_access_control_proxy_rule_set} +\alias{accountAccessControlProxyUpdateRuleSet} +\title{Update a rule set.} +\usage{ +update_account_access_control_proxy_rule_set(client, name, rule_set) + +accountAccessControlProxyUpdateRuleSet(client, name, rule_set) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the rule set.} + +\item{rule_set}{Required. This field has no description yet.} +} +\description{ +Replace the rules of a rule set. First, use a GET rule set request to read +the current version of the rule set before modifying it. This pattern helps +prevent conflicts between concurrent updates. +} diff --git a/man/update_alert.Rd b/man/update_alert.Rd new file mode 100644 index 00000000..56b3fb32 --- /dev/null +++ b/man/update_alert.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/alerts.R +\name{update_alert} +\alias{update_alert} +\alias{alertsUpdate} +\title{Update an alert.} +\usage{ +update_alert(client, alert_id, name, options, query_id, rearm = NULL) + +alertsUpdate(client, alert_id, name, options, query_id, rearm = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{alert_id}{Required. This field has no description yet.} + +\item{name}{Required. Name of the alert.} + +\item{options}{Required. Alert configuration options.} + +\item{query_id}{Required. Query ID.} + +\item{rearm}{Number of seconds after being triggered before the alert rearms itself and can be triggered again.} +} +\description{ +Updates an alert. +} diff --git a/man/update_artifact_allowlist.Rd b/man/update_artifact_allowlist.Rd new file mode 100644 index 00000000..a0424177 --- /dev/null +++ b/man/update_artifact_allowlist.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/artifact_allowlists.R +\name{update_artifact_allowlist} +\alias{update_artifact_allowlist} +\alias{artifactAllowlistsUpdate} +\title{Set an artifact allowlist.} +\usage{ +update_artifact_allowlist(client, artifact_type, artifact_matchers) + +artifactAllowlistsUpdate(client, artifact_type, artifact_matchers) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{artifact_type}{Required. The artifact type of the allowlist.} + +\item{artifact_matchers}{Required. A list of allowed artifact match patterns.} +} +\description{ +Set the artifact allowlist of a certain artifact type. The whole artifact +allowlist is replaced with the new allowlist. The caller must be a metastore +admin or have the \strong{MANAGE ALLOWLIST} privilege on the metastore. +} diff --git a/man/update_automatic_cluster.Rd b/man/update_automatic_cluster.Rd new file mode 100644 index 00000000..ad28e15c --- /dev/null +++ b/man/update_automatic_cluster.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/automatic_cluster_update.R +\name{update_automatic_cluster} +\alias{update_automatic_cluster} +\alias{automaticClusterUpdateUpdate} +\title{Update the automatic cluster update setting.} +\usage{ +update_automatic_cluster(client, allow_missing, setting, field_mask) + +automaticClusterUpdateUpdate(client, allow_missing, setting, field_mask) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{allow_missing}{Required. This should always be set to true for Settings API.} + +\item{setting}{Required. This field has no description yet.} + +\item{field_mask}{Required. Field mask is required to be passed into the PATCH request.} +} +\description{ +Updates the automatic cluster update setting for the workspace. A fresh etag +needs to be provided in \code{PATCH} requests (as part of the setting field). The +etag can be retrieved by making a \code{GET} request before the \code{PATCH} request. +If the setting is updated concurrently, \code{PATCH} fails with 409 and the +request must be retried by using the fresh etag in the 409 response. +} diff --git a/man/update_catalog.Rd b/man/update_catalog.Rd new file mode 100644 index 00000000..f5a6e12e --- /dev/null +++ b/man/update_catalog.Rd @@ -0,0 +1,51 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/catalogs.R +\name{update_catalog} +\alias{update_catalog} +\alias{catalogsUpdate} +\title{Update a catalog.} +\usage{ +update_catalog( + client, + name, + comment = NULL, + enable_predictive_optimization = NULL, + isolation_mode = NULL, + new_name = NULL, + owner = NULL, + properties = NULL +) + +catalogsUpdate( + client, + name, + comment = NULL, + enable_predictive_optimization = NULL, + isolation_mode = NULL, + new_name = NULL, + owner = NULL, + properties = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the catalog.} + +\item{comment}{User-provided free-form text description.} + +\item{enable_predictive_optimization}{Whether predictive optimization should be enabled for this object and objects under it.} + +\item{isolation_mode}{Whether the current securable is accessible from all workspaces or a specific set of workspaces.} + +\item{new_name}{New name for the catalog.} + +\item{owner}{Username of current owner of catalog.} + +\item{properties}{A map of key-value properties attached to the securable.} +} +\description{ +Updates the catalog that matches the supplied name. The caller must be either +the owner of the catalog, or a metastore admin (when changing the owner field +of the catalog). +} diff --git a/man/update_clean_room.Rd b/man/update_clean_room.Rd new file mode 100644 index 00000000..2db6af75 --- /dev/null +++ b/man/update_clean_room.Rd @@ -0,0 +1,52 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clean_rooms.R +\name{update_clean_room} +\alias{update_clean_room} +\alias{cleanRoomsUpdate} +\title{Update a clean room.} +\usage{ +update_clean_room( + client, + name, + catalog_updates = NULL, + comment = NULL, + owner = NULL +) + +cleanRoomsUpdate( + client, + name, + catalog_updates = NULL, + comment = NULL, + owner = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the clean room.} + +\item{catalog_updates}{Array of shared data object updates.} + +\item{comment}{User-provided free-form text description.} + +\item{owner}{Username of current owner of clean room.} +} +\description{ +Updates the clean room with the changes and data objects in the request. The +caller must be the owner of the clean room or a metastore admin. +} +\details{ +When the caller is a metastore admin, only the \strong{owner} field can be +updated. + +In the case that the clean room name is changed \strong{updateCleanRoom} requires +that the caller is both the clean room owner and a metastore admin. + +For each table that is added through this method, the clean room owner must +also have \strong{SELECT} privilege on the table. The privilege must be maintained +indefinitely for recipients to be able to access the table. Typically, you +should use a group as the clean room owner. + +Table removals through \strong{update} do not require additional privileges. +} diff --git a/man/update_cluster_permissions.Rd b/man/update_cluster_permissions.Rd new file mode 100644 index 00000000..33b08c0c --- /dev/null +++ b/man/update_cluster_permissions.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/clusters.R +\name{update_cluster_permissions} +\alias{update_cluster_permissions} +\alias{clustersUpdatePermissions} +\title{Update cluster permissions.} +\usage{ +update_cluster_permissions(client, cluster_id, access_control_list = NULL) + +clustersUpdatePermissions(client, cluster_id, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_id}{Required. The cluster for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on a cluster. Clusters can inherit permissions from +their root object. +} diff --git a/man/update_cluster_policy_permissions.Rd b/man/update_cluster_policy_permissions.Rd new file mode 100644 index 00000000..ca03546c --- /dev/null +++ b/man/update_cluster_policy_permissions.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/cluster_policies.R +\name{update_cluster_policy_permissions} +\alias{update_cluster_policy_permissions} +\alias{clusterPoliciesUpdatePermissions} +\title{Update cluster policy permissions.} +\usage{ +update_cluster_policy_permissions( + client, + cluster_policy_id, + access_control_list = NULL +) + +clusterPoliciesUpdatePermissions( + client, + cluster_policy_id, + access_control_list = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{cluster_policy_id}{Required. The cluster policy for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on a cluster policy. Cluster policies can inherit +permissions from their root object. +} diff --git a/man/update_connection.Rd b/man/update_connection.Rd new file mode 100644 index 00000000..0f24b5cc --- /dev/null +++ b/man/update_connection.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/connections.R +\name{update_connection} +\alias{update_connection} +\alias{connectionsUpdate} +\title{Update a connection.} +\usage{ +update_connection(client, name, options, new_name = NULL, owner = NULL) + +connectionsUpdate(client, name, options, new_name = NULL, owner = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the connection.} + +\item{options}{Required. A map of key-value properties attached to the securable.} + +\item{new_name}{New name for the connection.} + +\item{owner}{Username of current owner of the connection.} +} +\description{ +Updates the connection that matches the supplied name. +} diff --git a/man/update_csp_enablement.Rd b/man/update_csp_enablement.Rd new file mode 100644 index 00000000..a868b5f8 --- /dev/null +++ b/man/update_csp_enablement.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/csp_enablement.R +\name{update_csp_enablement} +\alias{update_csp_enablement} +\alias{cspEnablementUpdate} +\title{Update the compliance security profile setting.} +\usage{ +update_csp_enablement(client, allow_missing, setting, field_mask) + +cspEnablementUpdate(client, allow_missing, setting, field_mask) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{allow_missing}{Required. This should always be set to true for Settings API.} + +\item{setting}{Required. This field has no description yet.} + +\item{field_mask}{Required. Field mask is required to be passed into the PATCH request.} +} +\description{ +Updates the compliance security profile setting for the workspace. A fresh +etag needs to be provided in \code{PATCH} requests (as part of the setting field). +The etag can be retrieved by making a \code{GET} request before the \code{PATCH} +request. If the setting is updated concurrently, \code{PATCH} fails with 409 and +the request must be retried by using the fresh etag in the 409 response. +} diff --git a/man/update_dashboard.Rd b/man/update_dashboard.Rd new file mode 100644 index 00000000..7d9e9085 --- /dev/null +++ b/man/update_dashboard.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dashboards.R +\name{update_dashboard} +\alias{update_dashboard} +\alias{dashboardsUpdate} +\title{Change a dashboard definition.} +\usage{ +update_dashboard(client, dashboard_id, name = NULL, run_as_role = NULL) + +dashboardsUpdate(client, dashboard_id, name = NULL, run_as_role = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{dashboard_id}{Required. This field has no description yet.} + +\item{name}{The title of this dashboard that appears in list views and at the top of the dashboard page.} + +\item{run_as_role}{Sets the \strong{Run as} role for the object.} +} +\description{ +Modify this dashboard definition. This operation only affects attributes of +the dashboard object. It does not add, modify, or remove widgets. +} +\details{ +\strong{Note}: You cannot undo this operation. +} diff --git a/man/update_dashboard_widget.Rd b/man/update_dashboard_widget.Rd new file mode 100644 index 00000000..5ba12a6a --- /dev/null +++ b/man/update_dashboard_widget.Rd @@ -0,0 +1,45 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/dashboard_widgets.R +\name{update_dashboard_widget} +\alias{update_dashboard_widget} +\alias{dashboardWidgetsUpdate} +\title{Update existing widget.} +\usage{ +update_dashboard_widget( + client, + id, + dashboard_id, + options, + width, + text = NULL, + visualization_id = NULL +) + +dashboardWidgetsUpdate( + client, + id, + dashboard_id, + options, + width, + text = NULL, + visualization_id = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Widget ID returned by :method:dashboardwidgets/create.} + +\item{dashboard_id}{Required. Dashboard ID returned by :method:dashboards/create.} + +\item{options}{Required. This field has no description yet.} + +\item{width}{Required. Width of a widget.} + +\item{text}{If this is a textbox widget, the application displays this text.} + +\item{visualization_id}{Query Vizualization ID returned by :method:queryvisualizations/create.} +} +\description{ +Update existing widget. +} diff --git a/man/update_default_namespace.Rd b/man/update_default_namespace.Rd new file mode 100644 index 00000000..45898b72 --- /dev/null +++ b/man/update_default_namespace.Rd @@ -0,0 +1,29 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/default_namespace.R +\name{update_default_namespace} +\alias{update_default_namespace} +\alias{defaultNamespaceUpdate} +\title{Update the default namespace setting.} +\usage{ +update_default_namespace(client, allow_missing, setting, field_mask) + +defaultNamespaceUpdate(client, allow_missing, setting, field_mask) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{allow_missing}{Required. This should always be set to true for Settings API.} + +\item{setting}{Required. This represents the setting configuration for the default namespace in the Databricks workspace.} + +\item{field_mask}{Required. Field mask is required to be passed into the PATCH request.} +} +\description{ +Updates the default namespace setting for the workspace. A fresh etag needs +to be provided in \code{PATCH} requests (as part of the setting field). The etag +can be retrieved by making a \code{GET} request before the \code{PATCH} request. Note +that if the setting does not exist, \code{GET} returns a NOT_FOUND error and the +etag is present in the error response, which should be set in the \code{PATCH} +request. If the setting is updated concurrently, \code{PATCH} fails with 409 and +the request must be retried by using the fresh etag in the 409 response. +} diff --git a/man/update_esm_enablement.Rd b/man/update_esm_enablement.Rd new file mode 100644 index 00000000..3159a451 --- /dev/null +++ b/man/update_esm_enablement.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/esm_enablement.R +\name{update_esm_enablement} +\alias{update_esm_enablement} +\alias{esmEnablementUpdate} +\title{Update the enhanced security monitoring setting.} +\usage{ +update_esm_enablement(client, allow_missing, setting, field_mask) + +esmEnablementUpdate(client, allow_missing, setting, field_mask) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{allow_missing}{Required. This should always be set to true for Settings API.} + +\item{setting}{Required. This field has no description yet.} + +\item{field_mask}{Required. Field mask is required to be passed into the PATCH request.} +} +\description{ +Updates the enhanced security monitoring setting for the workspace. A fresh +etag needs to be provided in \code{PATCH} requests (as part of the setting field). +The etag can be retrieved by making a \code{GET} request before the \code{PATCH} +request. If the setting is updated concurrently, \code{PATCH} fails with 409 and +the request must be retried by using the fresh etag in the 409 response. +} diff --git a/man/update_experiment.Rd b/man/update_experiment.Rd new file mode 100644 index 00000000..01ed1eab --- /dev/null +++ b/man/update_experiment.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{update_experiment} +\alias{update_experiment} +\alias{experimentsUpdateExperiment} +\title{Update an experiment.} +\usage{ +update_experiment(client, experiment_id, new_name = NULL) + +experimentsUpdateExperiment(client, experiment_id, new_name = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{experiment_id}{Required. ID of the associated experiment.} + +\item{new_name}{If provided, the experiment's name is changed to the new name.} +} +\description{ +Updates experiment metadata. +} diff --git a/man/update_experiment_permissions.Rd b/man/update_experiment_permissions.Rd new file mode 100644 index 00000000..c0632068 --- /dev/null +++ b/man/update_experiment_permissions.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{update_experiment_permissions} +\alias{update_experiment_permissions} +\alias{experimentsUpdatePermissions} +\title{Update experiment permissions.} +\usage{ +update_experiment_permissions( + client, + experiment_id, + access_control_list = NULL +) + +experimentsUpdatePermissions(client, experiment_id, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{experiment_id}{Required. The experiment for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on an experiment. Experiments can inherit permissions +from their root object. +} diff --git a/man/update_experiment_run.Rd b/man/update_experiment_run.Rd new file mode 100644 index 00000000..0c821e05 --- /dev/null +++ b/man/update_experiment_run.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/experiments.R +\name{update_experiment_run} +\alias{update_experiment_run} +\alias{experimentsUpdateRun} +\title{Update a run.} +\usage{ +update_experiment_run( + client, + end_time = NULL, + run_id = NULL, + run_uuid = NULL, + status = NULL +) + +experimentsUpdateRun( + client, + end_time = NULL, + run_id = NULL, + run_uuid = NULL, + status = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{end_time}{Unix timestamp in milliseconds of when the run ended.} + +\item{run_id}{ID of the run to update.} + +\item{run_uuid}{Deprecated, use run_id instead. ID of the run to update.} + +\item{status}{Updated status of the run.} +} +\description{ +Updates run metadata. +} diff --git a/man/update_external_location.Rd b/man/update_external_location.Rd new file mode 100644 index 00000000..d338f380 --- /dev/null +++ b/man/update_external_location.Rd @@ -0,0 +1,67 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/external_locations.R +\name{update_external_location} +\alias{update_external_location} +\alias{externalLocationsUpdate} +\title{Update an external location.} +\usage{ +update_external_location( + client, + name, + access_point = NULL, + comment = NULL, + credential_name = NULL, + encryption_details = NULL, + force = NULL, + new_name = NULL, + owner = NULL, + read_only = NULL, + skip_validation = NULL, + url = NULL +) + +externalLocationsUpdate( + client, + name, + access_point = NULL, + comment = NULL, + credential_name = NULL, + encryption_details = NULL, + force = NULL, + new_name = NULL, + owner = NULL, + read_only = NULL, + skip_validation = NULL, + url = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the external location.} + +\item{access_point}{The AWS access point to use when accesing s3 for this external location.} + +\item{comment}{User-provided free-form text description.} + +\item{credential_name}{Name of the storage credential used with this location.} + +\item{encryption_details}{Encryption options that apply to clients connecting to cloud storage.} + +\item{force}{Force update even if changing url invalidates dependent external tables or mounts.} + +\item{new_name}{New name for the external location.} + +\item{owner}{The owner of the external location.} + +\item{read_only}{Indicates whether the external location is read-only.} + +\item{skip_validation}{Skips validation of the storage credential associated with the external location.} + +\item{url}{Path URL of the external location.} +} +\description{ +Updates an external location in the metastore. The caller must be the owner +of the external location, or be a metastore admin. In the second case, the +admin can only update the name of the external location. +} diff --git a/man/update_function.Rd b/man/update_function.Rd new file mode 100644 index 00000000..56db43f1 --- /dev/null +++ b/man/update_function.Rd @@ -0,0 +1,28 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/functions.R +\name{update_function} +\alias{update_function} +\alias{functionsUpdate} +\title{Update a function.} +\usage{ +update_function(client, name, owner = NULL) + +functionsUpdate(client, name, owner = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The fully-qualified name of the function (of the form \strong{catalog_name}.\strong{schema_name}.\strong{function__name}).} + +\item{owner}{Username of current owner of function.} +} +\description{ +Updates the function that matches the supplied name. Only the owner of the +function can be updated. If the user is not a metastore admin, the user must +be a member of the group that is the new function owner. - Is a metastore +admin - Is the owner of the function's parent catalog - Is the owner of the +function's parent schema and has the \strong{USE_CATALOG} privilege on its parent +catalog - Is the owner of the function itself and has the \strong{USE_CATALOG} +privilege on its parent catalog as well as the \strong{USE_SCHEMA} privilege on +the function's parent schema. +} diff --git a/man/update_git_credential.Rd b/man/update_git_credential.Rd new file mode 100644 index 00000000..fd426333 --- /dev/null +++ b/man/update_git_credential.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/git_credentials.R +\name{update_git_credential} +\alias{update_git_credential} +\alias{gitCredentialsUpdate} +\title{Update a credential.} +\usage{ +update_git_credential( + client, + credential_id, + git_provider = NULL, + git_username = NULL, + personal_access_token = NULL +) + +gitCredentialsUpdate( + client, + credential_id, + git_provider = NULL, + git_username = NULL, + personal_access_token = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{credential_id}{Required. The ID for the corresponding credential to access.} + +\item{git_provider}{Git provider.} + +\item{git_username}{Git username.} + +\item{personal_access_token}{The personal access token used to authenticate to the corresponding Git provider.} +} +\description{ +Updates the specified Git credential. +} diff --git a/man/update_global_init_script.Rd b/man/update_global_init_script.Rd new file mode 100644 index 00000000..017206f0 --- /dev/null +++ b/man/update_global_init_script.Rd @@ -0,0 +1,42 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/global_init_scripts.R +\name{update_global_init_script} +\alias{update_global_init_script} +\alias{globalInitScriptsUpdate} +\title{Update init script.} +\usage{ +update_global_init_script( + client, + script_id, + name, + script, + enabled = NULL, + position = NULL +) + +globalInitScriptsUpdate( + client, + script_id, + name, + script, + enabled = NULL, + position = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{script_id}{Required. The ID of the global init script.} + +\item{name}{Required. The name of the script.} + +\item{script}{Required. The Base64-encoded content of the script.} + +\item{enabled}{Specifies whether the script is enabled.} + +\item{position}{The position of a script, where 0 represents the first script to run, 1 is the second script to run, in ascending order.} +} +\description{ +Updates a global init script, specifying only the fields to change. All +fields are optional. Unspecified fields retain their current value. +} diff --git a/man/update_grant.Rd b/man/update_grant.Rd new file mode 100644 index 00000000..21a25cc0 --- /dev/null +++ b/man/update_grant.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/grants.R +\name{update_grant} +\alias{update_grant} +\alias{grantsUpdate} +\title{Update permissions.} +\usage{ +update_grant(client, securable_type, full_name, changes = NULL) + +grantsUpdate(client, securable_type, full_name, changes = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{securable_type}{Required. Type of securable.} + +\item{full_name}{Required. Full name of securable.} + +\item{changes}{Array of permissions change objects.} +} +\description{ +Updates the permissions for a securable. +} diff --git a/man/update_group.Rd b/man/update_group.Rd new file mode 100644 index 00000000..66f4ef8b --- /dev/null +++ b/man/update_group.Rd @@ -0,0 +1,57 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/groups.R +\name{update_group} +\alias{update_group} +\alias{groupsUpdate} +\title{Replace a group.} +\usage{ +update_group( + client, + id, + display_name = NULL, + entitlements = NULL, + external_id = NULL, + groups = NULL, + members = NULL, + meta = NULL, + roles = NULL, + schemas = NULL +) + +groupsUpdate( + client, + id, + display_name = NULL, + entitlements = NULL, + external_id = NULL, + groups = NULL, + members = NULL, + meta = NULL, + roles = NULL, + schemas = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Databricks group ID.} + +\item{display_name}{String that represents a human-readable group name.} + +\item{entitlements}{Entitlements assigned to the group.} + +\item{external_id}{This field has no description yet.} + +\item{groups}{This field has no description yet.} + +\item{members}{This field has no description yet.} + +\item{meta}{Container for the group identifier.} + +\item{roles}{Corresponds to AWS instance profile/arn role.} + +\item{schemas}{The schema of the group.} +} +\description{ +Updates the details of a group by replacing the entire group entity. +} diff --git a/man/update_instance_pool_permissions.Rd b/man/update_instance_pool_permissions.Rd new file mode 100644 index 00000000..8b6d8827 --- /dev/null +++ b/man/update_instance_pool_permissions.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/instance_pools.R +\name{update_instance_pool_permissions} +\alias{update_instance_pool_permissions} +\alias{instancePoolsUpdatePermissions} +\title{Update instance pool permissions.} +\usage{ +update_instance_pool_permissions( + client, + instance_pool_id, + access_control_list = NULL +) + +instancePoolsUpdatePermissions( + client, + instance_pool_id, + access_control_list = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{instance_pool_id}{Required. The instance pool for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on an instance pool. Instance pools can inherit +permissions from their root object. +} diff --git a/man/update_ip_access_list.Rd b/man/update_ip_access_list.Rd new file mode 100644 index 00000000..1bf18030 --- /dev/null +++ b/man/update_ip_access_list.Rd @@ -0,0 +1,59 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/ip_access_lists.R +\name{update_ip_access_list} +\alias{update_ip_access_list} +\alias{ipAccessListsUpdate} +\title{Update access list.} +\usage{ +update_ip_access_list( + client, + ip_access_list_id, + enabled = NULL, + ip_addresses = NULL, + label = NULL, + list_type = NULL +) + +ipAccessListsUpdate( + client, + ip_access_list_id, + enabled = NULL, + ip_addresses = NULL, + label = NULL, + list_type = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{ip_access_list_id}{Required. The ID for the corresponding IP access list.} + +\item{enabled}{Specifies whether this IP access list is enabled.} + +\item{ip_addresses}{This field has no description yet.} + +\item{label}{Label for the IP access list.} + +\item{list_type}{Type of IP access list.} +} +\description{ +Updates an existing IP access list, specified by its ID. +} +\details{ +A list can include allow lists and block lists. See the top of this file for +a description of how the server treats allow lists and block lists at run +time. + +When updating an IP access list: +\itemize{ +\item For all allow lists and block lists combined, the API supports a maximum of +1000 IP/CIDR values, where one CIDR counts as a single value. Attempts to +exceed that number return error 400 with \code{error_code} value \code{QUOTA_EXCEEDED}. +\item If the updated list would block the calling user's current IP, error 400 is +returned with \code{error_code} value \code{INVALID_STATE}. +} + +It can take a few minutes for the changes to take effect. Note that your +resulting IP access list has no effect until you enable the feature. See +:method:workspaceconf/setStatus. +} diff --git a/man/update_job.Rd b/man/update_job.Rd new file mode 100644 index 00000000..cd03ceba --- /dev/null +++ b/man/update_job.Rd @@ -0,0 +1,23 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{update_job} +\alias{update_job} +\alias{jobsUpdate} +\title{Update job settings partially.} +\usage{ +update_job(client, job_id, fields_to_remove = NULL, new_settings = NULL) + +jobsUpdate(client, job_id, fields_to_remove = NULL, new_settings = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{job_id}{Required. The canonical identifier of the job to update.} + +\item{fields_to_remove}{Remove top-level fields in the job settings.} + +\item{new_settings}{The new settings for the job.} +} +\description{ +Add, update, or remove specific settings of an existing job. Use the \href{:method:jobs/reset}{\emph{Reset} endpoint} to overwrite all job settings. +} diff --git a/man/update_job_permissions.Rd b/man/update_job_permissions.Rd new file mode 100644 index 00000000..848b6ac6 --- /dev/null +++ b/man/update_job_permissions.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/jobs.R +\name{update_job_permissions} +\alias{update_job_permissions} +\alias{jobsUpdatePermissions} +\title{Update job permissions.} +\usage{ +update_job_permissions(client, job_id, access_control_list = NULL) + +jobsUpdatePermissions(client, job_id, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{job_id}{Required. The job for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on a job. Jobs can inherit permissions from their +root object. +} diff --git a/man/update_lakehouse_monitor.Rd b/man/update_lakehouse_monitor.Rd new file mode 100644 index 00000000..bc0737a5 --- /dev/null +++ b/man/update_lakehouse_monitor.Rd @@ -0,0 +1,78 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakehouse_monitors.R +\name{update_lakehouse_monitor} +\alias{update_lakehouse_monitor} +\alias{lakehouseMonitorsUpdate} +\title{Update a table monitor.} +\usage{ +update_lakehouse_monitor( + client, + full_name, + output_schema_name, + baseline_table_name = NULL, + custom_metrics = NULL, + data_classification_config = NULL, + inference_log = NULL, + notifications = NULL, + schedule = NULL, + slicing_exprs = NULL, + snapshot = NULL, + time_series = NULL +) + +lakehouseMonitorsUpdate( + client, + full_name, + output_schema_name, + baseline_table_name = NULL, + custom_metrics = NULL, + data_classification_config = NULL, + inference_log = NULL, + notifications = NULL, + schedule = NULL, + slicing_exprs = NULL, + snapshot = NULL, + time_series = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the table.} + +\item{output_schema_name}{Required. Schema where output metric tables are created.} + +\item{baseline_table_name}{Name of the baseline table from which drift metrics are computed from.} + +\item{custom_metrics}{Custom metrics to compute on the monitored table.} + +\item{data_classification_config}{The data classification config for the monitor.} + +\item{inference_log}{Configuration for monitoring inference logs.} + +\item{notifications}{The notification settings for the monitor.} + +\item{schedule}{The schedule for automatically updating and refreshing metric tables.} + +\item{slicing_exprs}{List of column expressions to slice data with for targeted analysis.} + +\item{snapshot}{Configuration for monitoring snapshot tables.} + +\item{time_series}{Configuration for monitoring time series tables.} +} +\description{ +Updates a monitor for the specified table. +} +\details{ +The caller must either: 1. be an owner of the table's parent catalog 2. have +\strong{USE_CATALOG} on the table's parent catalog and be an owner of the table's +parent schema 3. have the following permissions: - \strong{USE_CATALOG} on the +table's parent catalog - \strong{USE_SCHEMA} on the table's parent schema - be an +owner of the table. + +Additionally, the call must be made from the workspace where the monitor was +created, and the caller must be the original creator of the monitor. + +Certain configuration fields, such as output asset identifiers, cannot be +updated. +} diff --git a/man/update_lakeview.Rd b/man/update_lakeview.Rd new file mode 100644 index 00000000..a61704ef --- /dev/null +++ b/man/update_lakeview.Rd @@ -0,0 +1,41 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/lakeview.R +\name{update_lakeview} +\alias{update_lakeview} +\alias{lakeviewUpdate} +\title{Update dashboard.} +\usage{ +update_lakeview( + client, + dashboard_id, + display_name = NULL, + etag = NULL, + serialized_dashboard = NULL, + warehouse_id = NULL +) + +lakeviewUpdate( + client, + dashboard_id, + display_name = NULL, + etag = NULL, + serialized_dashboard = NULL, + warehouse_id = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{dashboard_id}{Required. UUID identifying the dashboard.} + +\item{display_name}{The display name of the dashboard.} + +\item{etag}{The etag for the dashboard.} + +\item{serialized_dashboard}{The contents of the dashboard in serialized string form.} + +\item{warehouse_id}{The warehouse ID used to run the dashboard.} +} +\description{ +Update a draft dashboard. +} diff --git a/man/update_metastore.Rd b/man/update_metastore.Rd new file mode 100644 index 00000000..f8aab8d0 --- /dev/null +++ b/man/update_metastore.Rd @@ -0,0 +1,55 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/metastores.R +\name{update_metastore} +\alias{update_metastore} +\alias{metastoresUpdate} +\title{Update a metastore.} +\usage{ +update_metastore( + client, + id, + delta_sharing_organization_name = NULL, + delta_sharing_recipient_token_lifetime_in_seconds = NULL, + delta_sharing_scope = NULL, + new_name = NULL, + owner = NULL, + privilege_model_version = NULL, + storage_root_credential_id = NULL +) + +metastoresUpdate( + client, + id, + delta_sharing_organization_name = NULL, + delta_sharing_recipient_token_lifetime_in_seconds = NULL, + delta_sharing_scope = NULL, + new_name = NULL, + owner = NULL, + privilege_model_version = NULL, + storage_root_credential_id = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Unique ID of the metastore.} + +\item{delta_sharing_organization_name}{The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name.} + +\item{delta_sharing_recipient_token_lifetime_in_seconds}{The lifetime of delta sharing recipient token in seconds.} + +\item{delta_sharing_scope}{The scope of Delta Sharing enabled for the metastore.} + +\item{new_name}{New name for the metastore.} + +\item{owner}{The owner of the metastore.} + +\item{privilege_model_version}{Privilege model version of the metastore, of the form \code{major.minor} (e.g., \code{1.0}).} + +\item{storage_root_credential_id}{UUID of storage credential to access the metastore storage_root.} +} +\description{ +Updates information for a specific metastore. The caller must be a metastore +admin. If the \strong{owner} field is set to the empty string (\strong{''}), the +ownership is updated to the System User. +} diff --git a/man/update_metastore_assignment.Rd b/man/update_metastore_assignment.Rd new file mode 100644 index 00000000..a0e27205 --- /dev/null +++ b/man/update_metastore_assignment.Rd @@ -0,0 +1,37 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/metastores.R +\name{update_metastore_assignment} +\alias{update_metastore_assignment} +\alias{metastoresUpdateAssignment} +\title{Update an assignment.} +\usage{ +update_metastore_assignment( + client, + workspace_id, + default_catalog_name = NULL, + metastore_id = NULL +) + +metastoresUpdateAssignment( + client, + workspace_id, + default_catalog_name = NULL, + metastore_id = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{workspace_id}{Required. A workspace ID.} + +\item{default_catalog_name}{The name of the default catalog for the metastore.} + +\item{metastore_id}{The unique ID of the metastore.} +} +\description{ +Updates a metastore assignment. This operation can be used to update +\strong{metastore_id} or \strong{default_catalog_name} for a specified Workspace, if +the Workspace is already assigned a metastore. The caller must be an account +admin to update \strong{metastore_id}; otherwise, the caller can be a Workspace +admin. +} diff --git a/man/update_model.Rd b/man/update_model.Rd new file mode 100644 index 00000000..14398d53 --- /dev/null +++ b/man/update_model.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{update_model} +\alias{update_model} +\alias{modelRegistryUpdateModel} +\title{Update model.} +\usage{ +update_model(client, name, description = NULL) + +modelRegistryUpdateModel(client, name, description = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Registered model unique name identifier.} + +\item{description}{If provided, updates the description for this \code{registered_model}.} +} +\description{ +Updates a registered model. +} diff --git a/man/update_model_comment.Rd b/man/update_model_comment.Rd new file mode 100644 index 00000000..76f073c8 --- /dev/null +++ b/man/update_model_comment.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{update_model_comment} +\alias{update_model_comment} +\alias{modelRegistryUpdateComment} +\title{Update a comment.} +\usage{ +update_model_comment(client, id, comment) + +modelRegistryUpdateComment(client, id, comment) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Unique identifier of an activity.} + +\item{comment}{Required. User-provided comment on the action.} +} +\description{ +Post an edit to a comment on a model version. +} diff --git a/man/update_model_permissions.Rd b/man/update_model_permissions.Rd new file mode 100644 index 00000000..86ca2fa1 --- /dev/null +++ b/man/update_model_permissions.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{update_model_permissions} +\alias{update_model_permissions} +\alias{modelRegistryUpdatePermissions} +\title{Update registered model permissions.} +\usage{ +update_model_permissions( + client, + registered_model_id, + access_control_list = NULL +) + +modelRegistryUpdatePermissions( + client, + registered_model_id, + access_control_list = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{registered_model_id}{Required. The registered model for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on a registered model. Registered models can inherit +permissions from their root object. +} diff --git a/man/update_model_version.Rd b/man/update_model_version.Rd new file mode 100644 index 00000000..02d736b1 --- /dev/null +++ b/man/update_model_version.Rd @@ -0,0 +1,42 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R, R/model_versions.R +\name{update_model_version} +\alias{update_model_version} +\alias{modelRegistryUpdateModelVersion} +\alias{modelVersionsUpdate} +\title{Update model version.} +\usage{ +update_model_version(client, full_name, version, comment = NULL) + +modelRegistryUpdateModelVersion(client, name, version, description = NULL) + +update_model_version(client, full_name, version, comment = NULL) + +modelVersionsUpdate(client, full_name, version, comment = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. The three-level (fully qualified) name of the model version.} + +\item{version}{Required. The integer version number of the model version.} + +\item{comment}{The comment attached to the model version.} + +\item{name}{Required. Name of the registered model.} + +\item{description}{If provided, updates the description for this \code{registered_model}.} +} +\description{ +Updates the model version. + +Updates the specified model version. +} +\details{ +The caller must be a metastore admin or an owner of the parent registered +model. For the latter case, the caller must also be the owner or have the +\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} +privilege on the parent schema. + +Currently only the comment of the model version can be updated. +} diff --git a/man/update_model_webhook.Rd b/man/update_model_webhook.Rd new file mode 100644 index 00000000..ca59790e --- /dev/null +++ b/man/update_model_webhook.Rd @@ -0,0 +1,48 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/model_registry.R +\name{update_model_webhook} +\alias{update_model_webhook} +\alias{modelRegistryUpdateWebhook} +\title{Update a webhook.} +\usage{ +update_model_webhook( + client, + id, + description = NULL, + events = NULL, + http_url_spec = NULL, + job_spec = NULL, + status = NULL +) + +modelRegistryUpdateWebhook( + client, + id, + description = NULL, + events = NULL, + http_url_spec = NULL, + job_spec = NULL, + status = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Required. Webhook ID.} + +\item{description}{User-specified description for the webhook.} + +\item{events}{Events that can trigger a registry webhook: * \code{MODEL_VERSION_CREATED}: A new model version was created for the associated model.} + +\item{http_url_spec}{This field has no description yet.} + +\item{job_spec}{This field has no description yet.} + +\item{status}{Enable or disable triggering the webhook, or put the webhook into test mode.} +} +\description{ +\strong{NOTE:} This endpoint is in Public Preview. +} +\details{ +Updates a registry webhook. +} diff --git a/man/update_notebook_permissions.Rd b/man/update_notebook_permissions.Rd new file mode 100644 index 00000000..05ed7211 --- /dev/null +++ b/man/update_notebook_permissions.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace.R +\name{update_notebook_permissions} +\alias{update_notebook_permissions} +\alias{workspaceUpdatePermissions} +\title{Update workspace object permissions.} +\usage{ +update_notebook_permissions( + client, + workspace_object_type, + workspace_object_id, + access_control_list = NULL +) + +workspaceUpdatePermissions( + client, + workspace_object_type, + workspace_object_id, + access_control_list = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{workspace_object_type}{Required. The workspace object type for which to get or manage permissions.} + +\item{workspace_object_id}{Required. The workspace object for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on a workspace object. Workspace objects can inherit +permissions from their parent objects or root object. +} diff --git a/man/update_permission.Rd b/man/update_permission.Rd new file mode 100644 index 00000000..42869be1 --- /dev/null +++ b/man/update_permission.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/permissions.R +\name{update_permission} +\alias{update_permission} +\alias{permissionsUpdate} +\title{Update object permissions.} +\usage{ +update_permission( + client, + request_object_type, + request_object_id, + access_control_list = NULL +) + +permissionsUpdate( + client, + request_object_type, + request_object_id, + access_control_list = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{request_object_type}{Required. The type of the request object.} + +\item{request_object_id}{Required. The id of the request object.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on an object. Objects can inherit permissions from +their parent objects or root object. +} diff --git a/man/update_pipeline.Rd b/man/update_pipeline.Rd new file mode 100644 index 00000000..d56019ee --- /dev/null +++ b/man/update_pipeline.Rd @@ -0,0 +1,101 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{update_pipeline} +\alias{update_pipeline} +\alias{pipelinesUpdate} +\title{Edit a pipeline.} +\usage{ +update_pipeline( + client, + pipeline_id, + allow_duplicate_names = NULL, + catalog = NULL, + channel = NULL, + clusters = NULL, + configuration = NULL, + continuous = NULL, + development = NULL, + edition = NULL, + expected_last_modified = NULL, + filters = NULL, + id = NULL, + libraries = NULL, + name = NULL, + notifications = NULL, + photon = NULL, + serverless = NULL, + storage = NULL, + target = NULL, + trigger = NULL +) + +pipelinesUpdate( + client, + pipeline_id, + allow_duplicate_names = NULL, + catalog = NULL, + channel = NULL, + clusters = NULL, + configuration = NULL, + continuous = NULL, + development = NULL, + edition = NULL, + expected_last_modified = NULL, + filters = NULL, + id = NULL, + libraries = NULL, + name = NULL, + notifications = NULL, + photon = NULL, + serverless = NULL, + storage = NULL, + target = NULL, + trigger = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Unique identifier for this pipeline.} + +\item{allow_duplicate_names}{If false, deployment will fail if name has changed and conflicts the name of another pipeline.} + +\item{catalog}{A catalog in Unity Catalog to publish data from this pipeline to.} + +\item{channel}{DLT Release Channel that specifies which version to use.} + +\item{clusters}{Cluster settings for this pipeline deployment.} + +\item{configuration}{String-String configuration for this pipeline execution.} + +\item{continuous}{Whether the pipeline is continuous or triggered.} + +\item{development}{Whether the pipeline is in Development mode.} + +\item{edition}{Pipeline product edition.} + +\item{expected_last_modified}{If present, the last-modified time of the pipeline settings before the edit.} + +\item{filters}{Filters on which Pipeline packages to include in the deployed graph.} + +\item{id}{Unique identifier for this pipeline.} + +\item{libraries}{Libraries or code needed by this deployment.} + +\item{name}{Friendly identifier for this pipeline.} + +\item{notifications}{List of notification settings for this pipeline.} + +\item{photon}{Whether Photon is enabled for this pipeline.} + +\item{serverless}{Whether serverless compute is enabled for this pipeline.} + +\item{storage}{DBFS root directory for storing checkpoints and tables.} + +\item{target}{Target schema (database) to add tables in this pipeline to.} + +\item{trigger}{Which pipeline trigger to use.} +} +\description{ +Updates a pipeline with the supplied configuration. +} diff --git a/man/update_pipeline_permissions.Rd b/man/update_pipeline_permissions.Rd new file mode 100644 index 00000000..c197070c --- /dev/null +++ b/man/update_pipeline_permissions.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/pipelines.R +\name{update_pipeline_permissions} +\alias{update_pipeline_permissions} +\alias{pipelinesUpdatePermissions} +\title{Update pipeline permissions.} +\usage{ +update_pipeline_permissions(client, pipeline_id, access_control_list = NULL) + +pipelinesUpdatePermissions(client, pipeline_id, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{pipeline_id}{Required. The pipeline for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on a pipeline. Pipelines can inherit permissions from +their root object. +} diff --git a/man/update_provider.Rd b/man/update_provider.Rd new file mode 100644 index 00000000..649fddd2 --- /dev/null +++ b/man/update_provider.Rd @@ -0,0 +1,44 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/providers.R +\name{update_provider} +\alias{update_provider} +\alias{providersUpdate} +\title{Update a provider.} +\usage{ +update_provider( + client, + name, + comment = NULL, + new_name = NULL, + owner = NULL, + recipient_profile_str = NULL +) + +providersUpdate( + client, + name, + comment = NULL, + new_name = NULL, + owner = NULL, + recipient_profile_str = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the provider.} + +\item{comment}{Description about the provider.} + +\item{new_name}{New name for the provider.} + +\item{owner}{Username of Provider owner.} + +\item{recipient_profile_str}{This field is required when the \strong{authentication_type} is \strong{TOKEN} or not provided.} +} +\description{ +Updates the information for an authentication provider, if the caller is a +metastore admin or is the owner of the provider. If the update changes the +provider name, the caller must be both a metastore admin and the owner of the +provider. +} diff --git a/man/update_query.Rd b/man/update_query.Rd new file mode 100644 index 00000000..04681f90 --- /dev/null +++ b/man/update_query.Rd @@ -0,0 +1,52 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/queries.R +\name{update_query} +\alias{update_query} +\alias{queriesUpdate} +\title{Change a query definition.} +\usage{ +update_query( + client, + query_id, + data_source_id = NULL, + description = NULL, + name = NULL, + options = NULL, + query = NULL, + run_as_role = NULL +) + +queriesUpdate( + client, + query_id, + data_source_id = NULL, + description = NULL, + name = NULL, + options = NULL, + query = NULL, + run_as_role = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{query_id}{Required. This field has no description yet.} + +\item{data_source_id}{Data source ID maps to the ID of the data source used by the resource and is distinct from the warehouse ID.} + +\item{description}{General description that conveys additional information about this query such as usage notes.} + +\item{name}{The title of this query that appears in list views, widget headings, and on the query page.} + +\item{options}{Exclusively used for storing a list parameter definitions.} + +\item{query}{The text of the query to be run.} + +\item{run_as_role}{Sets the \strong{Run as} role for the object.} +} +\description{ +Modify this query definition. +} +\details{ +\strong{Note}: You cannot undo this operation. +} diff --git a/man/update_query_visualization.Rd b/man/update_query_visualization.Rd new file mode 100644 index 00000000..0fcca0a3 --- /dev/null +++ b/man/update_query_visualization.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/query_visualizations.R +\name{update_query_visualization} +\alias{update_query_visualization} +\alias{queryVisualizationsUpdate} +\title{Edit existing visualization.} +\usage{ +update_query_visualization( + client, + id, + created_at = NULL, + description = NULL, + name = NULL, + options = NULL, + type = NULL, + updated_at = NULL +) + +queryVisualizationsUpdate( + client, + id, + created_at = NULL, + description = NULL, + name = NULL, + options = NULL, + type = NULL, + updated_at = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{The UUID for this visualization.} + +\item{created_at}{This field has no description yet.} + +\item{description}{A short description of this visualization.} + +\item{name}{The name of the visualization that appears on dashboards and the query screen.} + +\item{options}{The options object varies widely from one visualization type to the next and is unsupported.} + +\item{type}{The type of visualization: chart, table, pivot table, and so on.} + +\item{updated_at}{This field has no description yet.} +} +\description{ +Edit existing visualization. +} diff --git a/man/update_recipient.Rd b/man/update_recipient.Rd new file mode 100644 index 00000000..7012b8e4 --- /dev/null +++ b/man/update_recipient.Rd @@ -0,0 +1,48 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/recipients.R +\name{update_recipient} +\alias{update_recipient} +\alias{recipientsUpdate} +\title{Update a share recipient.} +\usage{ +update_recipient( + client, + name, + comment = NULL, + ip_access_list = NULL, + new_name = NULL, + owner = NULL, + properties_kvpairs = NULL +) + +recipientsUpdate( + client, + name, + comment = NULL, + ip_access_list = NULL, + new_name = NULL, + owner = NULL, + properties_kvpairs = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the recipient.} + +\item{comment}{Description about the recipient.} + +\item{ip_access_list}{IP Access List.} + +\item{new_name}{New name for the recipient.} + +\item{owner}{Username of the recipient owner.} + +\item{properties_kvpairs}{Recipient properties as map of string key-value pairs.} +} +\description{ +Updates an existing recipient in the metastore. The caller must be a +metastore admin or the owner of the recipient. If the recipient name will be +updated, the user must be both a metastore admin and the owner of the +recipient. +} diff --git a/man/update_registered_model.Rd b/man/update_registered_model.Rd new file mode 100644 index 00000000..0f8d3eac --- /dev/null +++ b/man/update_registered_model.Rd @@ -0,0 +1,46 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/registered_models.R +\name{update_registered_model} +\alias{update_registered_model} +\alias{registeredModelsUpdate} +\title{Update a Registered Model.} +\usage{ +update_registered_model( + client, + full_name, + comment = NULL, + new_name = NULL, + owner = NULL +) + +registeredModelsUpdate( + client, + full_name, + comment = NULL, + new_name = NULL, + owner = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. The three-level (fully qualified) name of the registered model.} + +\item{comment}{The comment attached to the registered model.} + +\item{new_name}{New name for the registered model.} + +\item{owner}{The identifier of the user who owns the registered model.} +} +\description{ +Updates the specified registered model. +} +\details{ +The caller must be a metastore admin or an owner of the registered model. For +the latter case, the caller must also be the owner or have the +\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} +privilege on the parent schema. + +Currently only the name, the owner or the comment of the registered model can +be updated. +} diff --git a/man/update_repo.Rd b/man/update_repo.Rd new file mode 100644 index 00000000..478e7610 --- /dev/null +++ b/man/update_repo.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/repos.R +\name{update_repo} +\alias{update_repo} +\alias{reposUpdate} +\title{Update a repo.} +\usage{ +update_repo(client, repo_id, branch = NULL, sparse_checkout = NULL, tag = NULL) + +reposUpdate(client, repo_id, branch = NULL, sparse_checkout = NULL, tag = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{repo_id}{Required. The ID for the corresponding repo to access.} + +\item{branch}{Branch that the local version of the repo is checked out to.} + +\item{sparse_checkout}{If specified, update the sparse checkout settings.} + +\item{tag}{Tag that the local version of the repo is checked out to.} +} +\description{ +Updates the repo to a different branch or tag, or updates the repo to the +latest commit on the same branch. +} diff --git a/man/update_repo_permissions.Rd b/man/update_repo_permissions.Rd new file mode 100644 index 00000000..4ba43389 --- /dev/null +++ b/man/update_repo_permissions.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/repos.R +\name{update_repo_permissions} +\alias{update_repo_permissions} +\alias{reposUpdatePermissions} +\title{Update repo permissions.} +\usage{ +update_repo_permissions(client, repo_id, access_control_list = NULL) + +reposUpdatePermissions(client, repo_id, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{repo_id}{Required. The repo for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on a repo. Repos can inherit permissions from their +root object. +} diff --git a/man/update_restrict_workspace_admin.Rd b/man/update_restrict_workspace_admin.Rd new file mode 100644 index 00000000..2fdd7064 --- /dev/null +++ b/man/update_restrict_workspace_admin.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/restrict_workspace_admins.R +\name{update_restrict_workspace_admin} +\alias{update_restrict_workspace_admin} +\alias{restrictWorkspaceAdminsUpdate} +\title{Update the restrict workspace admins setting.} +\usage{ +update_restrict_workspace_admin(client, allow_missing, setting, field_mask) + +restrictWorkspaceAdminsUpdate(client, allow_missing, setting, field_mask) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{allow_missing}{Required. This should always be set to true for Settings API.} + +\item{setting}{Required. This field has no description yet.} + +\item{field_mask}{Required. Field mask is required to be passed into the PATCH request.} +} +\description{ +Updates the restrict workspace admins setting for the workspace. A fresh etag +needs to be provided in \code{PATCH} requests (as part of the setting field). The +etag can be retrieved by making a GET request before the \code{PATCH} request. If +the setting is updated concurrently, \code{PATCH} fails with 409 and the request +must be retried by using the fresh etag in the 409 response. +} diff --git a/man/update_schema.Rd b/man/update_schema.Rd new file mode 100644 index 00000000..4e623c33 --- /dev/null +++ b/man/update_schema.Rd @@ -0,0 +1,49 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/schemas.R +\name{update_schema} +\alias{update_schema} +\alias{schemasUpdate} +\title{Update a schema.} +\usage{ +update_schema( + client, + full_name, + comment = NULL, + enable_predictive_optimization = NULL, + new_name = NULL, + owner = NULL, + properties = NULL +) + +schemasUpdate( + client, + full_name, + comment = NULL, + enable_predictive_optimization = NULL, + new_name = NULL, + owner = NULL, + properties = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the schema.} + +\item{comment}{User-provided free-form text description.} + +\item{enable_predictive_optimization}{Whether predictive optimization should be enabled for this object and objects under it.} + +\item{new_name}{New name for the schema.} + +\item{owner}{Username of current owner of schema.} + +\item{properties}{A map of key-value properties attached to the securable.} +} +\description{ +Updates a schema for a catalog. The caller must be the owner of the schema or +a metastore admin. If the caller is a metastore admin, only the \strong{owner} +field can be changed in the update. If the \strong{name} field must be updated, +the caller must be a metastore admin or have the \strong{CREATE_SCHEMA} privilege +on the parent catalog. +} diff --git a/man/update_service_principal.Rd b/man/update_service_principal.Rd new file mode 100644 index 00000000..3f289a6b --- /dev/null +++ b/man/update_service_principal.Rd @@ -0,0 +1,60 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/service_principals.R +\name{update_service_principal} +\alias{update_service_principal} +\alias{servicePrincipalsUpdate} +\title{Replace service principal.} +\usage{ +update_service_principal( + client, + id, + active = NULL, + application_id = NULL, + display_name = NULL, + entitlements = NULL, + external_id = NULL, + groups = NULL, + roles = NULL, + schemas = NULL +) + +servicePrincipalsUpdate( + client, + id, + active = NULL, + application_id = NULL, + display_name = NULL, + entitlements = NULL, + external_id = NULL, + groups = NULL, + roles = NULL, + schemas = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Databricks service principal ID.} + +\item{active}{If this user is active.} + +\item{application_id}{UUID relating to the service principal.} + +\item{display_name}{String that represents a concatenation of given and family names.} + +\item{entitlements}{Entitlements assigned to the service principal.} + +\item{external_id}{This field has no description yet.} + +\item{groups}{This field has no description yet.} + +\item{roles}{Corresponds to AWS instance profile/arn role.} + +\item{schemas}{The schema of the List response.} +} +\description{ +Updates the details of a single service principal. +} +\details{ +This action replaces the existing service principal with the same name. +} diff --git a/man/update_serving_endpoint_config.Rd b/man/update_serving_endpoint_config.Rd new file mode 100644 index 00000000..fbb40e62 --- /dev/null +++ b/man/update_serving_endpoint_config.Rd @@ -0,0 +1,44 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{update_serving_endpoint_config} +\alias{update_serving_endpoint_config} +\alias{servingEndpointsUpdateConfig} +\title{Update config of a serving endpoint.} +\usage{ +update_serving_endpoint_config( + client, + name, + auto_capture_config = NULL, + served_entities = NULL, + served_models = NULL, + traffic_config = NULL +) + +servingEndpointsUpdateConfig( + client, + name, + auto_capture_config = NULL, + served_entities = NULL, + served_models = NULL, + traffic_config = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint to update.} + +\item{auto_capture_config}{Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.} + +\item{served_entities}{A list of served entities for the endpoint to serve.} + +\item{served_models}{(Deprecated, use served_entities instead) A list of served models for the endpoint to serve.} + +\item{traffic_config}{The traffic config defining how invocations to the serving endpoint should be routed.} +} +\description{ +Updates any combination of the serving endpoint's served entities, the +compute configuration of those served entities, and the endpoint's traffic +config. An endpoint that already has an update in progress can not be updated +until the current update completes or fails. +} diff --git a/man/update_serving_endpoint_config_and_wait.Rd b/man/update_serving_endpoint_config_and_wait.Rd new file mode 100644 index 00000000..04f587f1 --- /dev/null +++ b/man/update_serving_endpoint_config_and_wait.Rd @@ -0,0 +1,46 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{update_serving_endpoint_config_and_wait} +\alias{update_serving_endpoint_config_and_wait} +\title{Update config of a serving endpoint.} +\usage{ +update_serving_endpoint_config_and_wait( + client, + name, + auto_capture_config = NULL, + served_entities = NULL, + served_models = NULL, + traffic_config = NULL, + timeout = 20, + callback = cli_reporter +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the serving endpoint to update.} + +\item{auto_capture_config}{Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.} + +\item{served_entities}{A list of served entities for the endpoint to serve.} + +\item{served_models}{(Deprecated, use served_entities instead) A list of served models for the endpoint to serve.} + +\item{traffic_config}{The traffic config defining how invocations to the serving endpoint should be routed.} + +\item{timeout}{Time to wait for the operation to complete in minutes.} + +\item{callback}{Function to report the status of the operation. By default, it reports to console.} +} +\description{ +This is a long-running operation, which blocks until Serving Endpoints on Databricks reach +NOT_UPDATING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. +By default, the state of Databricks Serving Endpoints is reported to console. You can change this behavior +by changing the \code{callback} parameter. +} +\details{ +Updates any combination of the serving endpoint's served entities, the +compute configuration of those served entities, and the endpoint's traffic +config. An endpoint that already has an update in progress can not be updated +until the current update completes or fails. +} diff --git a/man/update_serving_endpoint_permissions.Rd b/man/update_serving_endpoint_permissions.Rd new file mode 100644 index 00000000..c65a1b45 --- /dev/null +++ b/man/update_serving_endpoint_permissions.Rd @@ -0,0 +1,30 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/serving_endpoints.R +\name{update_serving_endpoint_permissions} +\alias{update_serving_endpoint_permissions} +\alias{servingEndpointsUpdatePermissions} +\title{Update serving endpoint permissions.} +\usage{ +update_serving_endpoint_permissions( + client, + serving_endpoint_id, + access_control_list = NULL +) + +servingEndpointsUpdatePermissions( + client, + serving_endpoint_id, + access_control_list = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{serving_endpoint_id}{Required. The serving endpoint for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on a serving endpoint. Serving endpoints can inherit +permissions from their root object. +} diff --git a/man/update_share.Rd b/man/update_share.Rd new file mode 100644 index 00000000..54898922 --- /dev/null +++ b/man/update_share.Rd @@ -0,0 +1,56 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/shares.R +\name{update_share} +\alias{update_share} +\alias{sharesUpdate} +\title{Update a share.} +\usage{ +update_share( + client, + name, + comment = NULL, + new_name = NULL, + owner = NULL, + updates = NULL +) + +sharesUpdate( + client, + name, + comment = NULL, + new_name = NULL, + owner = NULL, + updates = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the share.} + +\item{comment}{User-provided free-form text description.} + +\item{new_name}{New name for the share.} + +\item{owner}{Username of current owner of share.} + +\item{updates}{Array of shared data object updates.} +} +\description{ +Updates the share with the changes and data objects in the request. The +caller must be the owner of the share or a metastore admin. +} +\details{ +When the caller is a metastore admin, only the \strong{owner} field can be +updated. + +In the case that the share name is changed, \strong{updateShare} requires that the +caller is both the share owner and a metastore admin. + +For each table that is added through this method, the share owner must also +have \strong{SELECT} privilege on the table. This privilege must be maintained +indefinitely for recipients to be able to access the table. Typically, you +should use a group as the share owner. + +Table removals through \strong{update} do not require additional privileges. +} diff --git a/man/update_share_permissions.Rd b/man/update_share_permissions.Rd new file mode 100644 index 00000000..dd4bf433 --- /dev/null +++ b/man/update_share_permissions.Rd @@ -0,0 +1,26 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/shares.R +\name{update_share_permissions} +\alias{update_share_permissions} +\alias{sharesUpdatePermissions} +\title{Update permissions.} +\usage{ +update_share_permissions(client, name, changes = NULL) + +sharesUpdatePermissions(client, name, changes = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the share.} + +\item{changes}{Array of permission changes.} +} +\description{ +Updates the permissions for a data share in the metastore. The caller must be +a metastore admin or an owner of the share. +} +\details{ +For new recipient grants, the user must also be the owner of the recipients. +recipient revocations do not require additional privileges. +} diff --git a/man/update_storage_credential.Rd b/man/update_storage_credential.Rd new file mode 100644 index 00000000..43f2d610 --- /dev/null +++ b/man/update_storage_credential.Rd @@ -0,0 +1,69 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/storage_credentials.R +\name{update_storage_credential} +\alias{update_storage_credential} +\alias{storageCredentialsUpdate} +\title{Update a credential.} +\usage{ +update_storage_credential( + client, + name, + aws_iam_role = NULL, + azure_managed_identity = NULL, + azure_service_principal = NULL, + cloudflare_api_token = NULL, + comment = NULL, + databricks_gcp_service_account = NULL, + force = NULL, + new_name = NULL, + owner = NULL, + read_only = NULL, + skip_validation = NULL +) + +storageCredentialsUpdate( + client, + name, + aws_iam_role = NULL, + azure_managed_identity = NULL, + azure_service_principal = NULL, + cloudflare_api_token = NULL, + comment = NULL, + databricks_gcp_service_account = NULL, + force = NULL, + new_name = NULL, + owner = NULL, + read_only = NULL, + skip_validation = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. Name of the storage credential.} + +\item{aws_iam_role}{The AWS IAM role configuration.} + +\item{azure_managed_identity}{The Azure managed identity configuration.} + +\item{azure_service_principal}{The Azure service principal configuration.} + +\item{cloudflare_api_token}{The Cloudflare API token configuration.} + +\item{comment}{Comment associated with the credential.} + +\item{databricks_gcp_service_account}{The \if{html}{\out{}} managed GCP service account configuration.} + +\item{force}{Force update even if there are dependent external locations or external tables.} + +\item{new_name}{New name for the storage credential.} + +\item{owner}{Username of current owner of credential.} + +\item{read_only}{Whether the storage credential is only usable for read operations.} + +\item{skip_validation}{Supplying true to this argument skips validation of the updated credential.} +} +\description{ +Updates a storage credential on the metastore. +} diff --git a/man/update_table.Rd b/man/update_table.Rd new file mode 100644 index 00000000..565a7dd9 --- /dev/null +++ b/man/update_table.Rd @@ -0,0 +1,25 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/tables.R +\name{update_table} +\alias{update_table} +\alias{tablesUpdate} +\title{Update a table owner.} +\usage{ +update_table(client, full_name, owner = NULL) + +tablesUpdate(client, full_name, owner = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{full_name}{Required. Full name of the table.} + +\item{owner}{This field has no description yet.} +} +\description{ +Change the owner of the table. The caller must be the owner of the parent +catalog, have the \strong{USE_CATALOG} privilege on the parent catalog and be the +owner of the parent schema, or be the owner of the table and have the +\strong{USE_CATALOG} privilege on the parent catalog and the \strong{USE_SCHEMA} +privilege on the parent schema. +} diff --git a/man/update_token_management_permissions.Rd b/man/update_token_management_permissions.Rd new file mode 100644 index 00000000..aa5844a7 --- /dev/null +++ b/man/update_token_management_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/token_management.R +\name{update_token_management_permissions} +\alias{update_token_management_permissions} +\alias{tokenManagementUpdatePermissions} +\title{Update token permissions.} +\usage{ +update_token_management_permissions(client, access_control_list = NULL) + +tokenManagementUpdatePermissions(client, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on all tokens. Tokens can inherit permissions from +their root object. +} diff --git a/man/update_user.Rd b/man/update_user.Rd new file mode 100644 index 00000000..7e5ca2ae --- /dev/null +++ b/man/update_user.Rd @@ -0,0 +1,65 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/users.R +\name{update_user} +\alias{update_user} +\alias{usersUpdate} +\title{Replace a user.} +\usage{ +update_user( + client, + id, + active = NULL, + display_name = NULL, + emails = NULL, + entitlements = NULL, + external_id = NULL, + groups = NULL, + name = NULL, + roles = NULL, + schemas = NULL, + user_name = NULL +) + +usersUpdate( + client, + id, + active = NULL, + display_name = NULL, + emails = NULL, + entitlements = NULL, + external_id = NULL, + groups = NULL, + name = NULL, + roles = NULL, + schemas = NULL, + user_name = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{id}{Databricks user ID.} + +\item{active}{If this user is active.} + +\item{display_name}{String that represents a concatenation of given and family names.} + +\item{emails}{All the emails associated with the Databricks user.} + +\item{entitlements}{Entitlements assigned to the user.} + +\item{external_id}{External ID is not currently supported.} + +\item{groups}{This field has no description yet.} + +\item{name}{This field has no description yet.} + +\item{roles}{Corresponds to AWS instance profile/arn role.} + +\item{schemas}{The schema of the user.} + +\item{user_name}{Email address of the Databricks user.} +} +\description{ +Replaces a user's information with the data supplied in request. +} diff --git a/man/update_user_permissions.Rd b/man/update_user_permissions.Rd new file mode 100644 index 00000000..6fa1c617 --- /dev/null +++ b/man/update_user_permissions.Rd @@ -0,0 +1,20 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/users.R +\name{update_user_permissions} +\alias{update_user_permissions} +\alias{usersUpdatePermissions} +\title{Update password permissions.} +\usage{ +update_user_permissions(client, access_control_list = NULL) + +usersUpdatePermissions(client, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on all passwords. Passwords can inherit permissions +from their root object. +} diff --git a/man/update_volume.Rd b/man/update_volume.Rd new file mode 100644 index 00000000..e0b1fa6e --- /dev/null +++ b/man/update_volume.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/volumes.R +\name{update_volume} +\alias{update_volume} +\alias{volumesUpdate} +\title{Update a Volume.} +\usage{ +update_volume(client, name, comment = NULL, new_name = NULL, owner = NULL) + +volumesUpdate(client, name, comment = NULL, new_name = NULL, owner = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The three-level (fully qualified) name of the volume.} + +\item{comment}{The comment attached to the volume.} + +\item{new_name}{New name for the volume.} + +\item{owner}{The identifier of the user who owns the volume.} +} +\description{ +Updates the specified volume under the specified parent catalog and schema. +} +\details{ +The caller must be a metastore admin or an owner of the volume. For the +latter case, the caller must also be the owner or have the \strong{USE_CATALOG} +privilege on the parent catalog and the \strong{USE_SCHEMA} privilege on the +parent schema. + +Currently only the name, the owner or the comment of the volume could be +updated. +} diff --git a/man/update_warehouse_permissions.Rd b/man/update_warehouse_permissions.Rd new file mode 100644 index 00000000..389e4615 --- /dev/null +++ b/man/update_warehouse_permissions.Rd @@ -0,0 +1,22 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/warehouses.R +\name{update_warehouse_permissions} +\alias{update_warehouse_permissions} +\alias{warehousesUpdatePermissions} +\title{Update SQL warehouse permissions.} +\usage{ +update_warehouse_permissions(client, warehouse_id, access_control_list = NULL) + +warehousesUpdatePermissions(client, warehouse_id, access_control_list = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{warehouse_id}{Required. The SQL warehouse for which to get or manage permissions.} + +\item{access_control_list}{This field has no description yet.} +} +\description{ +Updates the permissions on a SQL warehouse. SQL warehouses can inherit +permissions from their root object. +} diff --git a/man/update_workspace_binding.Rd b/man/update_workspace_binding.Rd new file mode 100644 index 00000000..0bf4fe98 --- /dev/null +++ b/man/update_workspace_binding.Rd @@ -0,0 +1,34 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace_bindings.R +\name{update_workspace_binding} +\alias{update_workspace_binding} +\alias{workspaceBindingsUpdate} +\title{Update catalog workspace bindings.} +\usage{ +update_workspace_binding( + client, + name, + assign_workspaces = NULL, + unassign_workspaces = NULL +) + +workspaceBindingsUpdate( + client, + name, + assign_workspaces = NULL, + unassign_workspaces = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{name}{Required. The name of the catalog.} + +\item{assign_workspaces}{A list of workspace IDs.} + +\item{unassign_workspaces}{A list of workspace IDs.} +} +\description{ +Updates workspace bindings of the catalog. The caller must be a metastore +admin or an owner of the catalog. +} diff --git a/man/update_workspace_binding_bindings.Rd b/man/update_workspace_binding_bindings.Rd new file mode 100644 index 00000000..bfb9071c --- /dev/null +++ b/man/update_workspace_binding_bindings.Rd @@ -0,0 +1,38 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/workspace_bindings.R +\name{update_workspace_binding_bindings} +\alias{update_workspace_binding_bindings} +\alias{workspaceBindingsUpdateBindings} +\title{Update securable workspace bindings.} +\usage{ +update_workspace_binding_bindings( + client, + securable_type, + securable_name, + add = NULL, + remove = NULL +) + +workspaceBindingsUpdateBindings( + client, + securable_type, + securable_name, + add = NULL, + remove = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{securable_type}{Required. The type of the securable.} + +\item{securable_name}{Required. The name of the securable.} + +\item{add}{List of workspace bindings.} + +\item{remove}{List of workspace bindings.} +} +\description{ +Updates workspace bindings of the securable. The caller must be a metastore +admin or an owner of the securable. +} diff --git a/man/upload_file.Rd b/man/upload_file.Rd new file mode 100644 index 00000000..d285d678 --- /dev/null +++ b/man/upload_file.Rd @@ -0,0 +1,27 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/files.R +\name{upload_file} +\alias{upload_file} +\alias{filesUpload} +\title{Upload a file.} +\usage{ +upload_file(client, file_path, contents, overwrite = NULL) + +filesUpload(client, file_path, contents, overwrite = NULL) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{file_path}{Required. The absolute path of the file.} + +\item{contents}{This field has no description yet.} + +\item{overwrite}{If true, an existing file will be overwritten.} +} +\description{ +Uploads a file of up to 5 GiB. The file contents should be sent as the +request body as raw bytes (an octet stream); do not encode or otherwise +modify the bytes before sending. The contents of the resulting file will be +exactly the bytes sent in the request body. If the request is successful, +there is no response body. +} diff --git a/man/upsert_vector_search_index_data.Rd b/man/upsert_vector_search_index_data.Rd new file mode 100644 index 00000000..6fe19b82 --- /dev/null +++ b/man/upsert_vector_search_index_data.Rd @@ -0,0 +1,21 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/vector_search_indexes.R +\name{upsert_vector_search_index_data} +\alias{upsert_vector_search_index_data} +\alias{vectorSearchIndexesUpsertDataVectorIndex} +\title{Upsert data into an index.} +\usage{ +upsert_vector_search_index_data(client, index_name, inputs_json) + +vectorSearchIndexesUpsertDataVectorIndex(client, index_name, inputs_json) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{index_name}{Required. Name of the vector index where data is to be upserted.} + +\item{inputs_json}{Required. JSON string representing the data to be upserted.} +} +\description{ +Handles the upserting of data into a specified vector index. +} diff --git a/man/usersCreate.Rd b/man/usersCreate.Rd deleted file mode 100644 index 4669a1d2..00000000 --- a/man/usersCreate.Rd +++ /dev/null @@ -1,50 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/users.R -\name{usersCreate} -\alias{usersCreate} -\title{Create a new user.} -\usage{ -usersCreate( - client, - active = NULL, - display_name = NULL, - emails = NULL, - entitlements = NULL, - external_id = NULL, - groups = NULL, - id = NULL, - name = NULL, - roles = NULL, - schemas = NULL, - user_name = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{active}{If this user is active.} - -\item{display_name}{String that represents a concatenation of given and family names.} - -\item{emails}{All the emails associated with the Databricks user.} - -\item{entitlements}{Entitlements assigned to the user.} - -\item{external_id}{External ID is not currently supported.} - -\item{groups}{This field has no description yet.} - -\item{id}{Databricks user ID.} - -\item{name}{This field has no description yet.} - -\item{roles}{Corresponds to AWS instance profile/arn role.} - -\item{schemas}{The schema of the user.} - -\item{user_name}{Email address of the Databricks user.} -} -\description{ -Creates a new user in the Databricks workspace. This new user will also be -added to the Databricks account. -} diff --git a/man/usersDelete.Rd b/man/usersDelete.Rd deleted file mode 100644 index fb8fdad8..00000000 --- a/man/usersDelete.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/users.R -\name{usersDelete} -\alias{usersDelete} -\title{Delete a user.} -\usage{ -usersDelete(client, id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Unique ID for a user in the Databricks workspace.} -} -\description{ -Deletes a user. Deleting a user from a Databricks workspace also removes -objects associated with the user. -} diff --git a/man/usersGet.Rd b/man/usersGet.Rd deleted file mode 100644 index ab8816fe..00000000 --- a/man/usersGet.Rd +++ /dev/null @@ -1,40 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/users.R -\name{usersGet} -\alias{usersGet} -\title{Get user details.} -\usage{ -usersGet( - client, - id, - attributes = NULL, - count = NULL, - excluded_attributes = NULL, - filter = NULL, - sort_by = NULL, - sort_order = NULL, - start_index = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Unique ID for a user in the Databricks workspace.} - -\item{attributes}{Comma-separated list of attributes to return in response.} - -\item{count}{Desired number of results per page.} - -\item{excluded_attributes}{Comma-separated list of attributes to exclude in response.} - -\item{filter}{Query by which the results have to be filtered.} - -\item{sort_by}{Attribute to sort the results.} - -\item{sort_order}{The order to sort the results.} - -\item{start_index}{Specifies the index of the first result.} -} -\description{ -Gets information for a specific user in Databricks workspace. -} diff --git a/man/usersGetPermissionLevels.Rd b/man/usersGetPermissionLevels.Rd deleted file mode 100644 index f2cb6276..00000000 --- a/man/usersGetPermissionLevels.Rd +++ /dev/null @@ -1,14 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/users.R -\name{usersGetPermissionLevels} -\alias{usersGetPermissionLevels} -\title{Get password permission levels.} -\usage{ -usersGetPermissionLevels(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/usersGetPermissions.Rd b/man/usersGetPermissions.Rd deleted file mode 100644 index 8ded373f..00000000 --- a/man/usersGetPermissions.Rd +++ /dev/null @@ -1,15 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/users.R -\name{usersGetPermissions} -\alias{usersGetPermissions} -\title{Get password permissions.} -\usage{ -usersGetPermissions(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Gets the permissions of all passwords. Passwords can inherit permissions from -their root object. -} diff --git a/man/usersList.Rd b/man/usersList.Rd deleted file mode 100644 index 8899628e..00000000 --- a/man/usersList.Rd +++ /dev/null @@ -1,40 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/users.R -\name{usersList} -\alias{usersList} -\title{List users.} -\usage{ -usersList( - client, - attributes = NULL, - count = NULL, - excluded_attributes = NULL, - filter = NULL, - sort_by = NULL, - sort_order = NULL, - start_index = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{attributes}{Comma-separated list of attributes to return in response.} - -\item{count}{Desired number of results per page.} - -\item{excluded_attributes}{Comma-separated list of attributes to exclude in response.} - -\item{filter}{Query by which the results have to be filtered.} - -\item{sort_by}{Attribute to sort the results.} - -\item{sort_order}{The order to sort the results.} - -\item{start_index}{Specifies the index of the first result.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets details for all the users associated with a Databricks workspace. -} diff --git a/man/usersPatch.Rd b/man/usersPatch.Rd deleted file mode 100644 index 5b79ce0a..00000000 --- a/man/usersPatch.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/users.R -\name{usersPatch} -\alias{usersPatch} -\title{Update user details.} -\usage{ -usersPatch(client, id, operations = NULL, schemas = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Unique ID for a user in the Databricks workspace.} - -\item{operations}{This field has no description yet.} - -\item{schemas}{The schema of the patch request.} -} -\description{ -Partially updates a user resource by applying the supplied operations on -specific user attributes. -} diff --git a/man/usersSetPermissions.Rd b/man/usersSetPermissions.Rd deleted file mode 100644 index b70448f5..00000000 --- a/man/usersSetPermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/users.R -\name{usersSetPermissions} -\alias{usersSetPermissions} -\title{Set password permissions.} -\usage{ -usersSetPermissions(client, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on all passwords. Passwords can inherit permissions from -their root object. -} diff --git a/man/usersUpdate.Rd b/man/usersUpdate.Rd deleted file mode 100644 index 91b27b0d..00000000 --- a/man/usersUpdate.Rd +++ /dev/null @@ -1,49 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/users.R -\name{usersUpdate} -\alias{usersUpdate} -\title{Replace a user.} -\usage{ -usersUpdate( - client, - id, - active = NULL, - display_name = NULL, - emails = NULL, - entitlements = NULL, - external_id = NULL, - groups = NULL, - name = NULL, - roles = NULL, - schemas = NULL, - user_name = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Databricks user ID.} - -\item{active}{If this user is active.} - -\item{display_name}{String that represents a concatenation of given and family names.} - -\item{emails}{All the emails associated with the Databricks user.} - -\item{entitlements}{Entitlements assigned to the user.} - -\item{external_id}{External ID is not currently supported.} - -\item{groups}{This field has no description yet.} - -\item{name}{This field has no description yet.} - -\item{roles}{Corresponds to AWS instance profile/arn role.} - -\item{schemas}{The schema of the user.} - -\item{user_name}{Email address of the Databricks user.} -} -\description{ -Replaces a user's information with the data supplied in request. -} diff --git a/man/usersUpdatePermissions.Rd b/man/usersUpdatePermissions.Rd deleted file mode 100644 index 2e0720df..00000000 --- a/man/usersUpdatePermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/users.R -\name{usersUpdatePermissions} -\alias{usersUpdatePermissions} -\title{Update password permissions.} -\usage{ -usersUpdatePermissions(client, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on all passwords. Passwords can inherit permissions -from their root object. -} diff --git a/man/validate_storage_credential.Rd b/man/validate_storage_credential.Rd new file mode 100644 index 00000000..f45209d6 --- /dev/null +++ b/man/validate_storage_credential.Rd @@ -0,0 +1,69 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/storage_credentials.R +\name{validate_storage_credential} +\alias{validate_storage_credential} +\alias{storageCredentialsValidate} +\title{Validate a storage credential.} +\usage{ +validate_storage_credential( + client, + aws_iam_role = NULL, + azure_managed_identity = NULL, + azure_service_principal = NULL, + cloudflare_api_token = NULL, + databricks_gcp_service_account = NULL, + external_location_name = NULL, + read_only = NULL, + storage_credential_name = NULL, + url = NULL +) + +storageCredentialsValidate( + client, + aws_iam_role = NULL, + azure_managed_identity = NULL, + azure_service_principal = NULL, + cloudflare_api_token = NULL, + databricks_gcp_service_account = NULL, + external_location_name = NULL, + read_only = NULL, + storage_credential_name = NULL, + url = NULL +) +} +\arguments{ +\item{client}{Required. Instance of DatabricksClient()} + +\item{aws_iam_role}{The AWS IAM role configuration.} + +\item{azure_managed_identity}{The Azure managed identity configuration.} + +\item{azure_service_principal}{The Azure service principal configuration.} + +\item{cloudflare_api_token}{The Cloudflare API token configuration.} + +\item{databricks_gcp_service_account}{The Databricks created GCP service account configuration.} + +\item{external_location_name}{The name of an existing external location to validate.} + +\item{read_only}{Whether the storage credential is only usable for read operations.} + +\item{storage_credential_name}{The name of the storage credential to validate.} + +\item{url}{The external location url to validate.} +} +\description{ +Validates a storage credential. At least one of \strong{external_location_name} +and \strong{url} need to be provided. If only one of them is provided, it will be +used for validation. And if both are provided, the \strong{url} will be used for +validation, and \strong{external_location_name} will be ignored when checking +overlapping urls. +} +\details{ +Either the \strong{storage_credential_name} or the cloud-specific credential must +be provided. + +The caller must be a metastore admin or the storage credential owner or have +the \strong{CREATE_EXTERNAL_LOCATION} privilege on the metastore and the storage +credential. +} diff --git a/man/vectorSearchEndpointsCreateEndpoint.Rd b/man/vectorSearchEndpointsCreateEndpoint.Rd deleted file mode 100644 index 6571a128..00000000 --- a/man/vectorSearchEndpointsCreateEndpoint.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/vector_search_endpoints.R -\name{vectorSearchEndpointsCreateEndpoint} -\alias{vectorSearchEndpointsCreateEndpoint} -\title{Create an endpoint.} -\usage{ -vectorSearchEndpointsCreateEndpoint(client, name, endpoint_type) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of endpoint.} - -\item{endpoint_type}{Required. Type of endpoint.} -} -\description{ -Create a new endpoint. -} diff --git a/man/vectorSearchEndpointsCreateEndpointAndWait.Rd b/man/vectorSearchEndpointsCreateEndpointAndWait.Rd deleted file mode 100644 index 18a266df..00000000 --- a/man/vectorSearchEndpointsCreateEndpointAndWait.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/vector_search_endpoints.R -\name{vectorSearchEndpointsCreateEndpointAndWait} -\alias{vectorSearchEndpointsCreateEndpointAndWait} -\title{Create an endpoint.} -\usage{ -vectorSearchEndpointsCreateEndpointAndWait( - client, - name, - endpoint_type, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of endpoint.} - -\item{endpoint_type}{Required. Type of endpoint.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Vector Search Endpoints on Databricks reach -ONLINE state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Vector Search Endpoints is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Create a new endpoint. -} diff --git a/man/vectorSearchEndpointsDeleteEndpoint.Rd b/man/vectorSearchEndpointsDeleteEndpoint.Rd deleted file mode 100644 index a2ceee21..00000000 --- a/man/vectorSearchEndpointsDeleteEndpoint.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/vector_search_endpoints.R -\name{vectorSearchEndpointsDeleteEndpoint} -\alias{vectorSearchEndpointsDeleteEndpoint} -\title{Delete an endpoint.} -\usage{ -vectorSearchEndpointsDeleteEndpoint(client, endpoint_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{endpoint_name}{Required. Name of the endpoint.} -} -\description{ -Delete an endpoint. -} diff --git a/man/vectorSearchEndpointsGetEndpoint.Rd b/man/vectorSearchEndpointsGetEndpoint.Rd deleted file mode 100644 index 4e582753..00000000 --- a/man/vectorSearchEndpointsGetEndpoint.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/vector_search_endpoints.R -\name{vectorSearchEndpointsGetEndpoint} -\alias{vectorSearchEndpointsGetEndpoint} -\title{Get an endpoint.} -\usage{ -vectorSearchEndpointsGetEndpoint(client, endpoint_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{endpoint_name}{Required. Name of the endpoint.} -} -\description{ -Get an endpoint. -} diff --git a/man/vectorSearchEndpointsListEndpoints.Rd b/man/vectorSearchEndpointsListEndpoints.Rd deleted file mode 100644 index 16ab6cdf..00000000 --- a/man/vectorSearchEndpointsListEndpoints.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/vector_search_endpoints.R -\name{vectorSearchEndpointsListEndpoints} -\alias{vectorSearchEndpointsListEndpoints} -\title{List all endpoints.} -\usage{ -vectorSearchEndpointsListEndpoints(client, page_token = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{page_token}{Token for pagination.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -List all endpoints. -} diff --git a/man/vectorSearchIndexesCreateIndex.Rd b/man/vectorSearchIndexesCreateIndex.Rd deleted file mode 100644 index e3eda3e3..00000000 --- a/man/vectorSearchIndexesCreateIndex.Rd +++ /dev/null @@ -1,34 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/vector_search_indexes.R -\name{vectorSearchIndexesCreateIndex} -\alias{vectorSearchIndexesCreateIndex} -\title{Create an index.} -\usage{ -vectorSearchIndexesCreateIndex( - client, - name, - endpoint_name, - primary_key, - index_type, - delta_sync_index_spec = NULL, - direct_access_index_spec = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. Name of the index.} - -\item{endpoint_name}{Required. Name of the endpoint to be used for serving the index.} - -\item{primary_key}{Required. Primary key of the index.} - -\item{index_type}{Required. There are 2 types of Vector Search indexes: - \code{DELTA_SYNC}: An index that automatically syncs with a source Delta Table, automatically and incrementally updating the index as the underlying data in the Delta Table changes.} - -\item{delta_sync_index_spec}{Specification for Delta Sync Index.} - -\item{direct_access_index_spec}{Specification for Direct Vector Access Index.} -} -\description{ -Create a new index. -} diff --git a/man/vectorSearchIndexesDeleteDataVectorIndex.Rd b/man/vectorSearchIndexesDeleteDataVectorIndex.Rd deleted file mode 100644 index 703312bd..00000000 --- a/man/vectorSearchIndexesDeleteDataVectorIndex.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/vector_search_indexes.R -\name{vectorSearchIndexesDeleteDataVectorIndex} -\alias{vectorSearchIndexesDeleteDataVectorIndex} -\title{Delete data from index.} -\usage{ -vectorSearchIndexesDeleteDataVectorIndex(client, index_name, primary_keys) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{index_name}{Required. Name of the vector index where data is to be deleted.} - -\item{primary_keys}{Required. List of primary keys for the data to be deleted.} -} -\description{ -Handles the deletion of data from a specified vector index. -} diff --git a/man/vectorSearchIndexesDeleteIndex.Rd b/man/vectorSearchIndexesDeleteIndex.Rd deleted file mode 100644 index bce9a9aa..00000000 --- a/man/vectorSearchIndexesDeleteIndex.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/vector_search_indexes.R -\name{vectorSearchIndexesDeleteIndex} -\alias{vectorSearchIndexesDeleteIndex} -\title{Delete an index.} -\usage{ -vectorSearchIndexesDeleteIndex(client, index_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{index_name}{Required. Name of the index.} -} -\description{ -Delete an index. -} diff --git a/man/vectorSearchIndexesGetIndex.Rd b/man/vectorSearchIndexesGetIndex.Rd deleted file mode 100644 index f0576c8b..00000000 --- a/man/vectorSearchIndexesGetIndex.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/vector_search_indexes.R -\name{vectorSearchIndexesGetIndex} -\alias{vectorSearchIndexesGetIndex} -\title{Get an index.} -\usage{ -vectorSearchIndexesGetIndex(client, index_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{index_name}{Required. Name of the index.} -} -\description{ -Get an index. -} diff --git a/man/vectorSearchIndexesListIndexes.Rd b/man/vectorSearchIndexesListIndexes.Rd deleted file mode 100644 index d182dc3c..00000000 --- a/man/vectorSearchIndexesListIndexes.Rd +++ /dev/null @@ -1,21 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/vector_search_indexes.R -\name{vectorSearchIndexesListIndexes} -\alias{vectorSearchIndexesListIndexes} -\title{List indexes.} -\usage{ -vectorSearchIndexesListIndexes(client, endpoint_name, page_token = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{endpoint_name}{Required. Name of the endpoint.} - -\item{page_token}{Token for pagination.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -List all indexes in the given endpoint. -} diff --git a/man/vectorSearchIndexesQueryIndex.Rd b/man/vectorSearchIndexesQueryIndex.Rd deleted file mode 100644 index 9a003d1f..00000000 --- a/man/vectorSearchIndexesQueryIndex.Rd +++ /dev/null @@ -1,37 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/vector_search_indexes.R -\name{vectorSearchIndexesQueryIndex} -\alias{vectorSearchIndexesQueryIndex} -\title{Query an index.} -\usage{ -vectorSearchIndexesQueryIndex( - client, - index_name, - columns, - filters_json = NULL, - num_results = NULL, - query_text = NULL, - query_vector = NULL, - score_threshold = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{index_name}{Required. Name of the vector index to query.} - -\item{columns}{Required. List of column names to include in the response.} - -\item{filters_json}{JSON string representing query filters.} - -\item{num_results}{Number of results to return.} - -\item{query_text}{Query text.} - -\item{query_vector}{Query vector.} - -\item{score_threshold}{Threshold for the approximate nearest neighbor search.} -} -\description{ -Query the specified vector index. -} diff --git a/man/vectorSearchIndexesSyncIndex.Rd b/man/vectorSearchIndexesSyncIndex.Rd deleted file mode 100644 index 3650a00b..00000000 --- a/man/vectorSearchIndexesSyncIndex.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/vector_search_indexes.R -\name{vectorSearchIndexesSyncIndex} -\alias{vectorSearchIndexesSyncIndex} -\title{Synchronize an index.} -\usage{ -vectorSearchIndexesSyncIndex(client, index_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{index_name}{Required. Name of the vector index to synchronize.} -} -\description{ -Triggers a synchronization process for a specified vector index. -} diff --git a/man/vectorSearchIndexesUpsertDataVectorIndex.Rd b/man/vectorSearchIndexesUpsertDataVectorIndex.Rd deleted file mode 100644 index dd6ba80f..00000000 --- a/man/vectorSearchIndexesUpsertDataVectorIndex.Rd +++ /dev/null @@ -1,18 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/vector_search_indexes.R -\name{vectorSearchIndexesUpsertDataVectorIndex} -\alias{vectorSearchIndexesUpsertDataVectorIndex} -\title{Upsert data into an index.} -\usage{ -vectorSearchIndexesUpsertDataVectorIndex(client, index_name, inputs_json) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{index_name}{Required. Name of the vector index where data is to be upserted.} - -\item{inputs_json}{Required. JSON string representing the data to be upserted.} -} -\description{ -Handles the upserting of data into a specified vector index. -} diff --git a/man/volumesCreate.Rd b/man/volumesCreate.Rd deleted file mode 100644 index 73bc25ea..00000000 --- a/man/volumesCreate.Rd +++ /dev/null @@ -1,52 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/volumes.R -\name{volumesCreate} -\alias{volumesCreate} -\title{Create a Volume.} -\usage{ -volumesCreate( - client, - catalog_name, - schema_name, - name, - volume_type, - comment = NULL, - storage_location = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{catalog_name}{Required. The name of the catalog where the schema and the volume are.} - -\item{schema_name}{Required. The name of the schema where the volume is.} - -\item{name}{Required. The name of the volume.} - -\item{volume_type}{Required. This field has no description yet.} - -\item{comment}{The comment attached to the volume.} - -\item{storage_location}{The storage location on the cloud.} -} -\description{ -Creates a new volume. -} -\details{ -The user could create either an external volume or a managed volume. An -external volume will be created in the specified external location, while a -managed volume will be located in the default location which is specified by -the parent schema, or the parent catalog, or the Metastore. - -For the volume creation to succeed, the user must satisfy following -conditions: - The caller must be a metastore admin, or be the owner of the -parent catalog and schema, or have the \strong{USE_CATALOG} privilege on the -parent catalog and the \strong{USE_SCHEMA} privilege on the parent schema. - The -caller must have \strong{CREATE VOLUME} privilege on the parent schema. - -For an external volume, following conditions also need to satisfy - The -caller must have \strong{CREATE EXTERNAL VOLUME} privilege on the external -location. - There are no other tables, nor volumes existing in the specified -storage location. - The specified storage location is not under the location -of other tables, nor volumes, or catalogs or schemas. -} diff --git a/man/volumesDelete.Rd b/man/volumesDelete.Rd deleted file mode 100644 index f341960c..00000000 --- a/man/volumesDelete.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/volumes.R -\name{volumesDelete} -\alias{volumesDelete} -\title{Delete a Volume.} -\usage{ -volumesDelete(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The three-level (fully qualified) name of the volume.} -} -\description{ -Deletes a volume from the specified parent catalog and schema. -} -\details{ -The caller must be a metastore admin or an owner of the volume. For the -latter case, the caller must also be the owner or have the \strong{USE_CATALOG} -privilege on the parent catalog and the \strong{USE_SCHEMA} privilege on the -parent schema. -} diff --git a/man/volumesList.Rd b/man/volumesList.Rd deleted file mode 100644 index ff7ae589..00000000 --- a/man/volumesList.Rd +++ /dev/null @@ -1,45 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/volumes.R -\name{volumesList} -\alias{volumesList} -\title{List Volumes.} -\usage{ -volumesList( - client, - catalog_name, - schema_name, - include_browse = NULL, - max_results = NULL, - page_token = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{catalog_name}{Required. The identifier of the catalog.} - -\item{schema_name}{Required. The identifier of the schema.} - -\item{include_browse}{Whether to include volumes in the response for which the principal can only access selective metadata for.} - -\item{max_results}{Maximum number of volumes to return (page length).} - -\item{page_token}{Opaque token returned by a previous request.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Gets an array of volumes for the current metastore under the parent catalog -and schema. -} -\details{ -The returned volumes are filtered based on the privileges of the calling -user. For example, the metastore admin is able to list all the volumes. A -regular user needs to be the owner or have the \strong{READ VOLUME} privilege on -the volume to recieve the volumes in the response. For the latter case, the -caller must also be the owner or have the \strong{USE_CATALOG} privilege on the -parent catalog and the \strong{USE_SCHEMA} privilege on the parent schema. - -There is no guarantee of a specific ordering of the elements in the array. -} diff --git a/man/volumesRead.Rd b/man/volumesRead.Rd deleted file mode 100644 index 766159e8..00000000 --- a/man/volumesRead.Rd +++ /dev/null @@ -1,24 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/volumes.R -\name{volumesRead} -\alias{volumesRead} -\title{Get a Volume.} -\usage{ -volumesRead(client, name, include_browse = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The three-level (fully qualified) name of the volume.} - -\item{include_browse}{Whether to include volumes in the response for which the principal can only access selective metadata for.} -} -\description{ -Gets a volume from the metastore for a specific catalog and schema. -} -\details{ -The caller must be a metastore admin or an owner of (or have the \strong{READ -VOLUME} privilege on) the volume. For the latter case, the caller must also -be the owner or have the \strong{USE_CATALOG} privilege on the parent catalog and -the \strong{USE_SCHEMA} privilege on the parent schema. -} diff --git a/man/volumesUpdate.Rd b/man/volumesUpdate.Rd deleted file mode 100644 index 4f191b96..00000000 --- a/man/volumesUpdate.Rd +++ /dev/null @@ -1,31 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/volumes.R -\name{volumesUpdate} -\alias{volumesUpdate} -\title{Update a Volume.} -\usage{ -volumesUpdate(client, name, comment = NULL, new_name = NULL, owner = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The three-level (fully qualified) name of the volume.} - -\item{comment}{The comment attached to the volume.} - -\item{new_name}{New name for the volume.} - -\item{owner}{The identifier of the user who owns the volume.} -} -\description{ -Updates the specified volume under the specified parent catalog and schema. -} -\details{ -The caller must be a metastore admin or an owner of the volume. For the -latter case, the caller must also be the owner or have the \strong{USE_CATALOG} -privilege on the parent catalog and the \strong{USE_SCHEMA} privilege on the -parent schema. - -Currently only the name, the owner or the comment of the volume could be -updated. -} diff --git a/man/warehousesCreate.Rd b/man/warehousesCreate.Rd deleted file mode 100644 index 48be7805..00000000 --- a/man/warehousesCreate.Rd +++ /dev/null @@ -1,55 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesCreate} -\alias{warehousesCreate} -\title{Create a warehouse.} -\usage{ -warehousesCreate( - client, - auto_stop_mins = NULL, - channel = NULL, - cluster_size = NULL, - creator_name = NULL, - enable_photon = NULL, - enable_serverless_compute = NULL, - instance_profile_arn = NULL, - max_num_clusters = NULL, - min_num_clusters = NULL, - name = NULL, - spot_instance_policy = NULL, - tags = NULL, - warehouse_type = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{auto_stop_mins}{The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped.} - -\item{channel}{Channel Details.} - -\item{cluster_size}{Size of the clusters allocated for this warehouse.} - -\item{creator_name}{warehouse creator name.} - -\item{enable_photon}{Configures whether the warehouse should use Photon optimized clusters.} - -\item{enable_serverless_compute}{Configures whether the warehouse should use serverless compute.} - -\item{instance_profile_arn}{Deprecated.} - -\item{max_num_clusters}{Maximum number of clusters that the autoscaler will create to handle concurrent queries.} - -\item{min_num_clusters}{Minimum number of available clusters that will be maintained for this SQL warehouse.} - -\item{name}{Logical name for the cluster.} - -\item{spot_instance_policy}{Configurations whether the warehouse should use spot instances.} - -\item{tags}{A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse.} - -\item{warehouse_type}{Warehouse type: \code{PRO} or \code{CLASSIC}.} -} -\description{ -Creates a new SQL warehouse. -} diff --git a/man/warehousesCreateAndWait.Rd b/man/warehousesCreateAndWait.Rd deleted file mode 100644 index 59d4cca8..00000000 --- a/man/warehousesCreateAndWait.Rd +++ /dev/null @@ -1,67 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesCreateAndWait} -\alias{warehousesCreateAndWait} -\title{Create a warehouse.} -\usage{ -warehousesCreateAndWait( - client, - auto_stop_mins = NULL, - channel = NULL, - cluster_size = NULL, - creator_name = NULL, - enable_photon = NULL, - enable_serverless_compute = NULL, - instance_profile_arn = NULL, - max_num_clusters = NULL, - min_num_clusters = NULL, - name = NULL, - spot_instance_policy = NULL, - tags = NULL, - warehouse_type = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{auto_stop_mins}{The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped.} - -\item{channel}{Channel Details.} - -\item{cluster_size}{Size of the clusters allocated for this warehouse.} - -\item{creator_name}{warehouse creator name.} - -\item{enable_photon}{Configures whether the warehouse should use Photon optimized clusters.} - -\item{enable_serverless_compute}{Configures whether the warehouse should use serverless compute.} - -\item{instance_profile_arn}{Deprecated.} - -\item{max_num_clusters}{Maximum number of clusters that the autoscaler will create to handle concurrent queries.} - -\item{min_num_clusters}{Minimum number of available clusters that will be maintained for this SQL warehouse.} - -\item{name}{Logical name for the cluster.} - -\item{spot_instance_policy}{Configurations whether the warehouse should use spot instances.} - -\item{tags}{A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse.} - -\item{warehouse_type}{Warehouse type: \code{PRO} or \code{CLASSIC}.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Warehouses on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Warehouses is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Creates a new SQL warehouse. -} diff --git a/man/warehousesDelete.Rd b/man/warehousesDelete.Rd deleted file mode 100644 index c9f89548..00000000 --- a/man/warehousesDelete.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesDelete} -\alias{warehousesDelete} -\title{Delete a warehouse.} -\usage{ -warehousesDelete(client, id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Required.} -} -\description{ -Deletes a SQL warehouse. -} diff --git a/man/warehousesEdit.Rd b/man/warehousesEdit.Rd deleted file mode 100644 index f1a13846..00000000 --- a/man/warehousesEdit.Rd +++ /dev/null @@ -1,58 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesEdit} -\alias{warehousesEdit} -\title{Update a warehouse.} -\usage{ -warehousesEdit( - client, - id, - auto_stop_mins = NULL, - channel = NULL, - cluster_size = NULL, - creator_name = NULL, - enable_photon = NULL, - enable_serverless_compute = NULL, - instance_profile_arn = NULL, - max_num_clusters = NULL, - min_num_clusters = NULL, - name = NULL, - spot_instance_policy = NULL, - tags = NULL, - warehouse_type = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Required.} - -\item{auto_stop_mins}{The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped.} - -\item{channel}{Channel Details.} - -\item{cluster_size}{Size of the clusters allocated for this warehouse.} - -\item{creator_name}{warehouse creator name.} - -\item{enable_photon}{Configures whether the warehouse should use Photon optimized clusters.} - -\item{enable_serverless_compute}{Configures whether the warehouse should use serverless compute.} - -\item{instance_profile_arn}{Deprecated.} - -\item{max_num_clusters}{Maximum number of clusters that the autoscaler will create to handle concurrent queries.} - -\item{min_num_clusters}{Minimum number of available clusters that will be maintained for this SQL warehouse.} - -\item{name}{Logical name for the cluster.} - -\item{spot_instance_policy}{Configurations whether the warehouse should use spot instances.} - -\item{tags}{A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse.} - -\item{warehouse_type}{Warehouse type: \code{PRO} or \code{CLASSIC}.} -} -\description{ -Updates the configuration for a SQL warehouse. -} diff --git a/man/warehousesEditAndWait.Rd b/man/warehousesEditAndWait.Rd deleted file mode 100644 index 69952810..00000000 --- a/man/warehousesEditAndWait.Rd +++ /dev/null @@ -1,70 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesEditAndWait} -\alias{warehousesEditAndWait} -\title{Update a warehouse.} -\usage{ -warehousesEditAndWait( - client, - id, - auto_stop_mins = NULL, - channel = NULL, - cluster_size = NULL, - creator_name = NULL, - enable_photon = NULL, - enable_serverless_compute = NULL, - instance_profile_arn = NULL, - max_num_clusters = NULL, - min_num_clusters = NULL, - name = NULL, - spot_instance_policy = NULL, - tags = NULL, - warehouse_type = NULL, - timeout = 20, - callback = cli_reporter -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Required.} - -\item{auto_stop_mins}{The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped.} - -\item{channel}{Channel Details.} - -\item{cluster_size}{Size of the clusters allocated for this warehouse.} - -\item{creator_name}{warehouse creator name.} - -\item{enable_photon}{Configures whether the warehouse should use Photon optimized clusters.} - -\item{enable_serverless_compute}{Configures whether the warehouse should use serverless compute.} - -\item{instance_profile_arn}{Deprecated.} - -\item{max_num_clusters}{Maximum number of clusters that the autoscaler will create to handle concurrent queries.} - -\item{min_num_clusters}{Minimum number of available clusters that will be maintained for this SQL warehouse.} - -\item{name}{Logical name for the cluster.} - -\item{spot_instance_policy}{Configurations whether the warehouse should use spot instances.} - -\item{tags}{A set of key-value pairs that will be tagged on all resources (e.g., AWS instances and EBS volumes) associated with this SQL warehouse.} - -\item{warehouse_type}{Warehouse type: \code{PRO} or \code{CLASSIC}.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Warehouses on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Warehouses is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Updates the configuration for a SQL warehouse. -} diff --git a/man/warehousesGet.Rd b/man/warehousesGet.Rd deleted file mode 100644 index 22028341..00000000 --- a/man/warehousesGet.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesGet} -\alias{warehousesGet} -\title{Get warehouse info.} -\usage{ -warehousesGet(client, id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Required.} -} -\description{ -Gets the information for a single SQL warehouse. -} diff --git a/man/warehousesGetPermissionLevels.Rd b/man/warehousesGetPermissionLevels.Rd deleted file mode 100644 index 08439606..00000000 --- a/man/warehousesGetPermissionLevels.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesGetPermissionLevels} -\alias{warehousesGetPermissionLevels} -\title{Get SQL warehouse permission levels.} -\usage{ -warehousesGetPermissionLevels(client, warehouse_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{warehouse_id}{Required. The SQL warehouse for which to get or manage permissions.} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/warehousesGetPermissions.Rd b/man/warehousesGetPermissions.Rd deleted file mode 100644 index 61dc06a0..00000000 --- a/man/warehousesGetPermissions.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesGetPermissions} -\alias{warehousesGetPermissions} -\title{Get SQL warehouse permissions.} -\usage{ -warehousesGetPermissions(client, warehouse_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{warehouse_id}{Required. The SQL warehouse for which to get or manage permissions.} -} -\description{ -Gets the permissions of a SQL warehouse. SQL warehouses can inherit -permissions from their root object. -} diff --git a/man/warehousesGetWorkspaceWarehouseConfig.Rd b/man/warehousesGetWorkspaceWarehouseConfig.Rd deleted file mode 100644 index 8b077293..00000000 --- a/man/warehousesGetWorkspaceWarehouseConfig.Rd +++ /dev/null @@ -1,15 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesGetWorkspaceWarehouseConfig} -\alias{warehousesGetWorkspaceWarehouseConfig} -\title{Get the workspace configuration.} -\usage{ -warehousesGetWorkspaceWarehouseConfig(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Gets the workspace level configuration that is shared by all SQL warehouses -in a workspace. -} diff --git a/man/warehousesList.Rd b/man/warehousesList.Rd deleted file mode 100644 index d370c692..00000000 --- a/man/warehousesList.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesList} -\alias{warehousesList} -\title{List warehouses.} -\usage{ -warehousesList(client, run_as_user_id = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{run_as_user_id}{Service Principal which will be used to fetch the list of warehouses.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Lists all SQL warehouses that a user has manager permissions on. -} diff --git a/man/warehousesSetPermissions.Rd b/man/warehousesSetPermissions.Rd deleted file mode 100644 index 95cdded2..00000000 --- a/man/warehousesSetPermissions.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesSetPermissions} -\alias{warehousesSetPermissions} -\title{Set SQL warehouse permissions.} -\usage{ -warehousesSetPermissions(client, warehouse_id, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{warehouse_id}{Required. The SQL warehouse for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on a SQL warehouse. SQL warehouses can inherit permissions -from their root object. -} diff --git a/man/warehousesSetWorkspaceWarehouseConfig.Rd b/man/warehousesSetWorkspaceWarehouseConfig.Rd deleted file mode 100644 index 583079e0..00000000 --- a/man/warehousesSetWorkspaceWarehouseConfig.Rd +++ /dev/null @@ -1,44 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesSetWorkspaceWarehouseConfig} -\alias{warehousesSetWorkspaceWarehouseConfig} -\title{Set the workspace configuration.} -\usage{ -warehousesSetWorkspaceWarehouseConfig( - client, - channel = NULL, - config_param = NULL, - data_access_config = NULL, - enabled_warehouse_types = NULL, - global_param = NULL, - google_service_account = NULL, - instance_profile_arn = NULL, - security_policy = NULL, - sql_configuration_parameters = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{channel}{Optional: Channel selection details.} - -\item{config_param}{Deprecated: Use sql_configuration_parameters.} - -\item{data_access_config}{Spark confs for external hive metastore configuration JSON serialized size must be less than <= 512K.} - -\item{enabled_warehouse_types}{List of Warehouse Types allowed in this workspace (limits allowed value of the type field in CreateWarehouse and EditWarehouse).} - -\item{global_param}{Deprecated: Use sql_configuration_parameters.} - -\item{google_service_account}{GCP only: Google Service Account used to pass to cluster to access Google Cloud Storage.} - -\item{instance_profile_arn}{AWS Only: Instance profile used to pass IAM role to the cluster.} - -\item{security_policy}{Security policy for warehouses.} - -\item{sql_configuration_parameters}{SQL configuration parameters.} -} -\description{ -Sets the workspace level configuration that is shared by all SQL warehouses -in a workspace. -} diff --git a/man/warehousesStart.Rd b/man/warehousesStart.Rd deleted file mode 100644 index 0fa6f51e..00000000 --- a/man/warehousesStart.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesStart} -\alias{warehousesStart} -\title{Start a warehouse.} -\usage{ -warehousesStart(client, id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Required.} -} -\description{ -Starts a SQL warehouse. -} diff --git a/man/warehousesStartAndWait.Rd b/man/warehousesStartAndWait.Rd deleted file mode 100644 index 3fa813ec..00000000 --- a/man/warehousesStartAndWait.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesStartAndWait} -\alias{warehousesStartAndWait} -\title{Start a warehouse.} -\usage{ -warehousesStartAndWait(client, id, timeout = 20, callback = cli_reporter) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Required.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Warehouses on Databricks reach -RUNNING state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Warehouses is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Starts a SQL warehouse. -} diff --git a/man/warehousesStop.Rd b/man/warehousesStop.Rd deleted file mode 100644 index 8760ad17..00000000 --- a/man/warehousesStop.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesStop} -\alias{warehousesStop} -\title{Stop a warehouse.} -\usage{ -warehousesStop(client, id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Required.} -} -\description{ -Stops a SQL warehouse. -} diff --git a/man/warehousesStopAndWait.Rd b/man/warehousesStopAndWait.Rd deleted file mode 100644 index 8b0b6f0e..00000000 --- a/man/warehousesStopAndWait.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesStopAndWait} -\alias{warehousesStopAndWait} -\title{Stop a warehouse.} -\usage{ -warehousesStopAndWait(client, id, timeout = 20, callback = cli_reporter) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{id}{Required. Required.} - -\item{timeout}{Time to wait for the operation to complete in minutes.} - -\item{callback}{Function to report the status of the operation. By default, it reports to console.} -} -\description{ -This is a long-running operation, which blocks until Warehouses on Databricks reach -STOPPED state with the timeout of 20 minutes, that you can change via \code{timeout} parameter. -By default, the state of Databricks Warehouses is reported to console. You can change this behavior -by changing the \code{callback} parameter. -} -\details{ -Stops a SQL warehouse. -} diff --git a/man/warehousesUpdatePermissions.Rd b/man/warehousesUpdatePermissions.Rd deleted file mode 100644 index e4bd9383..00000000 --- a/man/warehousesUpdatePermissions.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/warehouses.R -\name{warehousesUpdatePermissions} -\alias{warehousesUpdatePermissions} -\title{Update SQL warehouse permissions.} -\usage{ -warehousesUpdatePermissions(client, warehouse_id, access_control_list = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{warehouse_id}{Required. The SQL warehouse for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on a SQL warehouse. SQL warehouses can inherit -permissions from their root object. -} diff --git a/man/workspaceBindingsGet.Rd b/man/workspaceBindingsGet.Rd deleted file mode 100644 index 4479fcd0..00000000 --- a/man/workspaceBindingsGet.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace_bindings.R -\name{workspaceBindingsGet} -\alias{workspaceBindingsGet} -\title{Get catalog workspace bindings.} -\usage{ -workspaceBindingsGet(client, name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the catalog.} -} -\description{ -Gets workspace bindings of the catalog. The caller must be a metastore admin -or an owner of the catalog. -} diff --git a/man/workspaceBindingsGetBindings.Rd b/man/workspaceBindingsGetBindings.Rd deleted file mode 100644 index ffe66fb6..00000000 --- a/man/workspaceBindingsGetBindings.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace_bindings.R -\name{workspaceBindingsGetBindings} -\alias{workspaceBindingsGetBindings} -\title{Get securable workspace bindings.} -\usage{ -workspaceBindingsGetBindings(client, securable_type, securable_name) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{securable_type}{Required. The type of the securable.} - -\item{securable_name}{Required. The name of the securable.} -} -\description{ -Gets workspace bindings of the securable. The caller must be a metastore -admin or an owner of the securable. -} diff --git a/man/workspaceBindingsUpdate.Rd b/man/workspaceBindingsUpdate.Rd deleted file mode 100644 index 85417f2c..00000000 --- a/man/workspaceBindingsUpdate.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace_bindings.R -\name{workspaceBindingsUpdate} -\alias{workspaceBindingsUpdate} -\title{Update catalog workspace bindings.} -\usage{ -workspaceBindingsUpdate( - client, - name, - assign_workspaces = NULL, - unassign_workspaces = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{name}{Required. The name of the catalog.} - -\item{assign_workspaces}{A list of workspace IDs.} - -\item{unassign_workspaces}{A list of workspace IDs.} -} -\description{ -Updates workspace bindings of the catalog. The caller must be a metastore -admin or an owner of the catalog. -} diff --git a/man/workspaceBindingsUpdateBindings.Rd b/man/workspaceBindingsUpdateBindings.Rd deleted file mode 100644 index 889b268c..00000000 --- a/man/workspaceBindingsUpdateBindings.Rd +++ /dev/null @@ -1,29 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace_bindings.R -\name{workspaceBindingsUpdateBindings} -\alias{workspaceBindingsUpdateBindings} -\title{Update securable workspace bindings.} -\usage{ -workspaceBindingsUpdateBindings( - client, - securable_type, - securable_name, - add = NULL, - remove = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{securable_type}{Required. The type of the securable.} - -\item{securable_name}{Required. The name of the securable.} - -\item{add}{List of workspace bindings.} - -\item{remove}{List of workspace bindings.} -} -\description{ -Updates workspace bindings of the securable. The caller must be a metastore -admin or an owner of the securable. -} diff --git a/man/workspaceConfGetStatus.Rd b/man/workspaceConfGetStatus.Rd deleted file mode 100644 index c02b2102..00000000 --- a/man/workspaceConfGetStatus.Rd +++ /dev/null @@ -1,16 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace_conf.R -\name{workspaceConfGetStatus} -\alias{workspaceConfGetStatus} -\title{Check configuration status.} -\usage{ -workspaceConfGetStatus(client, keys) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{keys}{Required. This field has no description yet.} -} -\description{ -Gets the configuration status for a workspace. -} diff --git a/man/workspaceConfSetStatus.Rd b/man/workspaceConfSetStatus.Rd deleted file mode 100644 index 295b0028..00000000 --- a/man/workspaceConfSetStatus.Rd +++ /dev/null @@ -1,15 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace_conf.R -\name{workspaceConfSetStatus} -\alias{workspaceConfSetStatus} -\title{Enable/disable features.} -\usage{ -workspaceConfSetStatus(client) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} -} -\description{ -Sets the configuration status for a workspace, including enabling or -disabling it. -} diff --git a/man/workspaceDelete.Rd b/man/workspaceDelete.Rd deleted file mode 100644 index 75d539ff..00000000 --- a/man/workspaceDelete.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace.R -\name{workspaceDelete} -\alias{workspaceDelete} -\title{Delete a workspace object.} -\usage{ -workspaceDelete(client, path, recursive = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{path}{Required. The absolute path of the notebook or directory.} - -\item{recursive}{The flag that specifies whether to delete the object recursively.} -} -\description{ -Deletes an object or a directory (and optionally recursively deletes all -objects in the directory). * If \code{path} does not exist, this call returns an -error \code{RESOURCE_DOES_NOT_EXIST}. * If \code{path} is a non-empty directory and -\code{recursive} is set to \code{false}, this call returns an error -\code{DIRECTORY_NOT_EMPTY}. -} -\details{ -Object deletion cannot be undone and deleting a directory recursively is not -atomic. -} diff --git a/man/workspaceExport.Rd b/man/workspaceExport.Rd deleted file mode 100644 index 0b892fcf..00000000 --- a/man/workspaceExport.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace.R -\name{workspaceExport} -\alias{workspaceExport} -\title{Export a workspace object.} -\usage{ -workspaceExport(client, path, format = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{path}{Required. The absolute path of the object or directory.} - -\item{format}{This specifies the format of the exported file.} -} -\description{ -Exports an object or the contents of an entire directory. -} -\details{ -If \code{path} does not exist, this call returns an error -\code{RESOURCE_DOES_NOT_EXIST}. - -If the exported data would exceed size limit, this call returns -\code{MAX_NOTEBOOK_SIZE_EXCEEDED}. Currently, this API does not support exporting -a library. -} diff --git a/man/workspaceGetPermissionLevels.Rd b/man/workspaceGetPermissionLevels.Rd deleted file mode 100644 index 7300f77f..00000000 --- a/man/workspaceGetPermissionLevels.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace.R -\name{workspaceGetPermissionLevels} -\alias{workspaceGetPermissionLevels} -\title{Get workspace object permission levels.} -\usage{ -workspaceGetPermissionLevels( - client, - workspace_object_type, - workspace_object_id -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{workspace_object_type}{Required. The workspace object type for which to get or manage permissions.} - -\item{workspace_object_id}{Required. The workspace object for which to get or manage permissions.} -} -\description{ -Gets the permission levels that a user can have on an object. -} diff --git a/man/workspaceGetPermissions.Rd b/man/workspaceGetPermissions.Rd deleted file mode 100644 index 67e62310..00000000 --- a/man/workspaceGetPermissions.Rd +++ /dev/null @@ -1,19 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace.R -\name{workspaceGetPermissions} -\alias{workspaceGetPermissions} -\title{Get workspace object permissions.} -\usage{ -workspaceGetPermissions(client, workspace_object_type, workspace_object_id) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{workspace_object_type}{Required. The workspace object type for which to get or manage permissions.} - -\item{workspace_object_id}{Required. The workspace object for which to get or manage permissions.} -} -\description{ -Gets the permissions of a workspace object. Workspace objects can inherit -permissions from their parent objects or root object. -} diff --git a/man/workspaceGetStatus.Rd b/man/workspaceGetStatus.Rd deleted file mode 100644 index b1654f4b..00000000 --- a/man/workspaceGetStatus.Rd +++ /dev/null @@ -1,17 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace.R -\name{workspaceGetStatus} -\alias{workspaceGetStatus} -\title{Get status.} -\usage{ -workspaceGetStatus(client, path) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{path}{Required. The absolute path of the notebook or directory.} -} -\description{ -Gets the status of an object or a directory. If \code{path} does not exist, this -call returns an error \code{RESOURCE_DOES_NOT_EXIST}. -} diff --git a/man/workspaceImport.Rd b/man/workspaceImport.Rd deleted file mode 100644 index 82b8aa09..00000000 --- a/man/workspaceImport.Rd +++ /dev/null @@ -1,36 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace.R -\name{workspaceImport} -\alias{workspaceImport} -\title{Import a workspace object.} -\usage{ -workspaceImport( - client, - path, - content = NULL, - format = NULL, - language = NULL, - overwrite = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{path}{Required. The absolute path of the object or directory.} - -\item{content}{The base64-encoded content.} - -\item{format}{This specifies the format of the file to be imported.} - -\item{language}{The language of the object.} - -\item{overwrite}{The flag that specifies whether to overwrite existing object.} -} -\description{ -Imports a workspace object (for example, a notebook or file) or the contents -of an entire directory. If \code{path} already exists and \code{overwrite} is set to -\code{false}, this call returns an error \code{RESOURCE_ALREADY_EXISTS}. To import a -directory, you can use either the \code{DBC} format or the \code{SOURCE} format with -the \code{language} field unset. To import a single file as \code{SOURCE}, you must set -the \code{language} field. -} diff --git a/man/workspaceList.Rd b/man/workspaceList.Rd deleted file mode 100644 index f0cf3c42..00000000 --- a/man/workspaceList.Rd +++ /dev/null @@ -1,23 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace.R -\name{workspaceList} -\alias{workspaceList} -\title{List contents.} -\usage{ -workspaceList(client, path, notebooks_modified_after = NULL) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{path}{Required. The absolute path of the notebook or directory.} - -\item{notebooks_modified_after}{UTC timestamp in milliseconds.} -} -\value{ -\code{data.frame} with all of the response pages. -} -\description{ -Lists the contents of a directory, or the object if it is not a directory. If -the input path does not exist, this call returns an error -\code{RESOURCE_DOES_NOT_EXIST}. -} diff --git a/man/workspaceMkdirs.Rd b/man/workspaceMkdirs.Rd deleted file mode 100644 index dbddf301..00000000 --- a/man/workspaceMkdirs.Rd +++ /dev/null @@ -1,22 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace.R -\name{workspaceMkdirs} -\alias{workspaceMkdirs} -\title{Create a directory.} -\usage{ -workspaceMkdirs(client, path) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{path}{Required. The absolute path of the directory.} -} -\description{ -Creates the specified directory (and necessary parent directories if they do -not exist). If there is an object (not a directory) at any prefix of the -input path, this call returns an error \code{RESOURCE_ALREADY_EXISTS}. -} -\details{ -Note that if this operation fails it may have succeeded in creating some of -the necessary parent directories. -} diff --git a/man/workspaceSetPermissions.Rd b/man/workspaceSetPermissions.Rd deleted file mode 100644 index 828ddfb4..00000000 --- a/man/workspaceSetPermissions.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace.R -\name{workspaceSetPermissions} -\alias{workspaceSetPermissions} -\title{Set workspace object permissions.} -\usage{ -workspaceSetPermissions( - client, - workspace_object_type, - workspace_object_id, - access_control_list = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{workspace_object_type}{Required. The workspace object type for which to get or manage permissions.} - -\item{workspace_object_id}{Required. The workspace object for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Sets permissions on a workspace object. Workspace objects can inherit -permissions from their parent objects or root object. -} diff --git a/man/workspaceUpdatePermissions.Rd b/man/workspaceUpdatePermissions.Rd deleted file mode 100644 index 5bbe9810..00000000 --- a/man/workspaceUpdatePermissions.Rd +++ /dev/null @@ -1,26 +0,0 @@ -% Generated by roxygen2: do not edit by hand -% Please edit documentation in R/workspace.R -\name{workspaceUpdatePermissions} -\alias{workspaceUpdatePermissions} -\title{Update workspace object permissions.} -\usage{ -workspaceUpdatePermissions( - client, - workspace_object_type, - workspace_object_id, - access_control_list = NULL -) -} -\arguments{ -\item{client}{Required. Instance of DatabricksClient()} - -\item{workspace_object_type}{Required. The workspace object type for which to get or manage permissions.} - -\item{workspace_object_id}{Required. The workspace object for which to get or manage permissions.} - -\item{access_control_list}{This field has no description yet.} -} -\description{ -Updates the permissions on a workspace object. Workspace objects can inherit -permissions from their parent objects or root object. -}