From 768e853195ab2ab7cd699686bf0d1af1b140db91 Mon Sep 17 00:00:00 2001 From: Hector Castejon Diaz Date: Tue, 3 Jun 2025 06:42:17 +0000 Subject: [PATCH 1/8] Before --- .codegen/_openapi_sha | 2 +- .gitattributes | 16 ---- .../com/databricks/sdk/AccountClient.java | 11 ++- .../com/databricks/sdk/WorkspaceClient.java | 2 +- .../databricks/sdk/core/DatabricksConfig.java | 8 +- .../sdk/service/catalog/ConnectionInfo.java | 10 +-- .../sdk/service/catalog/ConnectionType.java | 9 +- .../sdk/service/catalog/ConnectionsAPI.java | 4 + .../sdk/service/catalog/CreateConnection.java | 2 +- .../sdk/service/catalog/CredentialType.java | 12 +-- .../sdk/service/catalog/SchemaInfo.java | 9 +- .../sdk/service/catalog/SchemasAPI.java | 4 + .../sdk/service/catalog/UpdateSchema.java | 2 +- .../service/compute/ClusterAttributes.java | 49 ----------- .../sdk/service/compute/ClusterDetails.java | 49 ----------- .../sdk/service/compute/ClusterSpec.java | 48 ----------- .../sdk/service/compute/CreateCluster.java | 49 ----------- .../sdk/service/compute/EditCluster.java | 48 ----------- .../compute/UpdateClusterResource.java | 49 ----------- .../sdk/service/iam/AccountGroupsAPI.java | 2 +- .../iam/AccountServicePrincipalsAPI.java | 2 +- .../sdk/service/iam/AccountUsersAPI.java | 2 +- .../databricks/sdk/service/iam/GroupsAPI.java | 2 +- .../sdk/service/iam/ServicePrincipalsAPI.java | 2 +- .../databricks/sdk/service/iam/UsersAPI.java | 2 +- .../sdk/service/pipelines/CreatePipeline.java | 20 ----- .../sdk/service/pipelines/EditPipeline.java | 20 ----- .../pipelines/IngestionSourceType.java | 1 - .../sdk/service/pipelines/PipelineSpec.java | 20 ----- .../service/serving/ServedEntityInput.java | 41 +-------- .../service/serving/ServedEntityOutput.java | 41 +-------- .../sdk/service/serving/ServedModelInput.java | 41 +-------- .../service/serving/ServedModelOutput.java | 41 +-------- .../settings/DashboardEmailSubscriptions.java | 86 ------------------- .../DashboardEmailSubscriptionsAPI.java | 70 --------------- .../DashboardEmailSubscriptionsImpl.java | 59 ------------- .../DashboardEmailSubscriptionsService.java | 40 --------- ...eteDashboardEmailSubscriptionsRequest.java | 54 ------------ ...teDashboardEmailSubscriptionsResponse.java | 52 ----------- .../DeleteSqlResultsDownloadRequest.java | 52 ----------- .../DeleteSqlResultsDownloadResponse.java | 50 ----------- ...GetDashboardEmailSubscriptionsRequest.java | 52 ----------- .../GetSqlResultsDownloadRequest.java | 52 ----------- .../GetWorkspaceNetworkOptionRequest.java | 2 +- .../sdk/service/settings/SettingsAPI.java | 24 ------ .../service/settings/SqlResultsDownload.java | 86 ------------------- .../settings/SqlResultsDownloadAPI.java | 68 --------------- .../settings/SqlResultsDownloadImpl.java | 58 ------------- .../settings/SqlResultsDownloadService.java | 37 -------- ...ateDashboardEmailSubscriptionsRequest.java | 85 ------------------ .../UpdateSqlResultsDownloadRequest.java | 85 ------------------ .../UpdateWorkspaceNetworkOptionRequest.java | 2 +- .../WorkspaceNetworkConfigurationAPI.java | 25 +++--- .../WorkspaceNetworkConfigurationService.java | 25 +++--- .../service/sharing/AuthenticationType.java | 1 - .../sdk/service/sql/AlertsV2API.java | 2 +- .../sdk/service/sql/AlertsV2Service.java | 2 +- 57 files changed, 72 insertions(+), 1617 deletions(-) delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java delete mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index a74101922..4347f1a0e 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -b142b72bea6f30d8efb36dfa8c58e0d63ae5329b \ No newline at end of file +file:./../openapi/2cee201b2e8d656f7306b2f9ec98edfa721e9829.json \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 706329a62..067333c32 100755 --- a/.gitattributes +++ b/.gitattributes @@ -2050,10 +2050,6 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CspEnablem databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CspEnablementAccountImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CspEnablementAccountService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CspEnablementAccountSetting.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DefaultNamespaceAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DefaultNamespaceImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DefaultNamespaceService.java linguist-generated=true @@ -2065,8 +2061,6 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteAibi databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteAibiDashboardEmbeddingAccessPolicySettingResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDefaultNamespaceSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDefaultNamespaceSettingResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDisableLegacyAccessRequest.java linguist-generated=true @@ -2089,8 +2083,6 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeletePriv databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteRestrictWorkspaceAdminsSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteRestrictWorkspaceAdminsSettingResponse.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteTokenManagementRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DestinationType.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DisableLegacyAccess.java linguist-generated=true @@ -2165,7 +2157,6 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetAibiDas databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetAutomaticClusterUpdateSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetComplianceSecurityProfileSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetCspEnablementAccountSettingRequest.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDefaultNamespaceSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDisableLegacyAccessRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDisableLegacyDbfsRequest.java linguist-generated=true @@ -2184,7 +2175,6 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetNotific databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetPersonalComputeSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetPrivateEndpointRuleRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetRestrictWorkspaceAdminsSettingRequest.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetStatusRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetTokenManagementRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetTokenPermissionLevelsResponse.java linguist-generated=true @@ -2264,10 +2254,6 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAP databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SlackConfig.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/StringMessage.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/TokenAccessControlRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/TokenAccessControlResponse.java linguist-generated=true @@ -2290,7 +2276,6 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateAibi databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateAutomaticClusterUpdateSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateComplianceSecurityProfileSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateCspEnablementAccountSettingRequest.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDefaultNamespaceSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDisableLegacyAccessRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDisableLegacyDbfsRequest.java linguist-generated=true @@ -2311,7 +2296,6 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdatePers databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdatePrivateEndpointRule.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateRestrictWorkspaceAdminsSettingRequest.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceConfAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceConfImpl.java linguist-generated=true diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java index be96caf24..1c813a589 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java @@ -574,12 +574,11 @@ public WorkspaceAssignmentAPI workspaceAssignment() { } /** - * These APIs allow configuration of network settings for Databricks workspaces by selecting which - * network policy to associate with the workspace. Each workspace is always associated with - * exactly one network policy that controls which network destinations can be accessed from the - * Databricks environment. By default, workspaces are associated with the 'default-policy' network - * policy. You cannot create or delete a workspace's network option, only update it to associate - * the workspace with a different policy + * These APIs allow configuration of network settings for Databricks workspaces. Each workspace is + * always associated with exactly one network policy that controls which network destinations can + * be accessed from the Databricks environment. By default, workspaces are associated with the + * 'default-policy' network policy. You cannot create or delete a workspace's network + * configuration, only update it to associate the workspace with a different policy. */ public WorkspaceNetworkConfigurationAPI workspaceNetworkConfiguration() { return workspaceNetworkConfigurationAPI; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java index d4c066a69..014c0b852 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java @@ -483,7 +483,7 @@ public AlertsLegacyAPI alertsLegacy() { return alertsLegacyAPI; } - /** New version of SQL Alerts */ + /** TODO: Add description */ public AlertsV2API alertsV2() { return alertsV2API; } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java index de6548982..df16ebae3 100644 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java @@ -410,13 +410,17 @@ public DatabricksConfig setAzureUseMsi(boolean azureUseMsi) { return this; } - /** @deprecated Use {@link #getAzureUseMsi()} instead. */ + /** + * @deprecated Use {@link #getAzureUseMsi()} instead. + */ @Deprecated() public boolean getAzureUseMSI() { return azureUseMsi; } - /** @deprecated Use {@link #setAzureUseMsi(boolean)} instead. */ + /** + * @deprecated Use {@link #setAzureUseMsi(boolean)} instead. + */ @Deprecated public DatabricksConfig setAzureUseMSI(boolean azureUseMsi) { this.azureUseMsi = azureUseMsi; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionInfo.java index 496800340..5e2e8d332 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionInfo.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionInfo.java @@ -54,7 +54,7 @@ public class ConnectionInfo { @JsonProperty("owner") private String owner; - /** A map of key-value properties attached to the securable. */ + /** An object containing map of key-value properties attached to the connection. */ @JsonProperty("properties") private Map properties; @@ -66,9 +66,9 @@ public class ConnectionInfo { @JsonProperty("read_only") private Boolean readOnly; - /** The type of Unity Catalog securable. */ + /** */ @JsonProperty("securable_type") - private SecurableType securableType; + private String securableType; /** Time at which this connection was updated, in epoch milliseconds. */ @JsonProperty("updated_at") @@ -208,12 +208,12 @@ public Boolean getReadOnly() { return readOnly; } - public ConnectionInfo setSecurableType(SecurableType securableType) { + public ConnectionInfo setSecurableType(String securableType) { this.securableType = securableType; return this; } - public SecurableType getSecurableType() { + public String getSecurableType() { return securableType; } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionType.java index c43cb89bd..b6e6a3e33 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionType.java @@ -4,27 +4,20 @@ import com.databricks.sdk.support.Generated; -/** Next Id: 30 */ +/** The type of connection. */ @Generated public enum ConnectionType { BIGQUERY, DATABRICKS, - GA4_RAW_DATA, GLUE, HIVE_METASTORE, HTTP, MYSQL, ORACLE, POSTGRESQL, - POWER_BI, REDSHIFT, - SALESFORCE, - SALESFORCE_DATA_CLOUD, - SERVICENOW, SNOWFLAKE, SQLDW, SQLSERVER, TERADATA, - UNKNOWN_CONNECTION_TYPE, - WORKDAY_RAAS, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java index ff8b2cda9..eb449c1dd 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java @@ -85,6 +85,10 @@ public ConnectionInfo get(GetConnectionRequest request) { *

List all connections. */ public Iterable list(ListConnectionsRequest request) { + + if (request.getMaxResults() == null) { + request.setMaxResults(0L); + } return new Paginator<>( request, impl::list, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java index 3eea7832c..2836337ce 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java @@ -26,7 +26,7 @@ public class CreateConnection { @JsonProperty("options") private Map options; - /** A map of key-value properties attached to the securable. */ + /** An object containing map of key-value properties attached to the connection. */ @JsonProperty("properties") private Map properties; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CredentialType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CredentialType.java index b5f06caf4..7f8868e05 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CredentialType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CredentialType.java @@ -4,19 +4,9 @@ import com.databricks.sdk.support.Generated; -/** Next Id: 12 */ +/** The type of credential. */ @Generated public enum CredentialType { BEARER_TOKEN, - OAUTH_ACCESS_TOKEN, - OAUTH_M2M, - OAUTH_REFRESH_TOKEN, - OAUTH_RESOURCE_OWNER_PASSWORD, - OAUTH_U2M, - OAUTH_U2M_MAPPING, - OIDC_TOKEN, - PEM_PRIVATE_KEY, - SERVICE_CREDENTIAL, - UNKNOWN_CREDENTIAL_TYPE, USERNAME_PASSWORD, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemaInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemaInfo.java index 6f7da5149..9358ca802 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemaInfo.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemaInfo.java @@ -8,7 +8,6 @@ import java.util.Map; import java.util.Objects; -/** Next ID: 40 */ @Generated public class SchemaInfo { /** @@ -24,7 +23,7 @@ public class SchemaInfo { /** The type of the parent catalog. */ @JsonProperty("catalog_type") - private CatalogType catalogType; + private String catalogType; /** User-provided free-form text description. */ @JsonProperty("comment") @@ -42,7 +41,7 @@ public class SchemaInfo { @JsonProperty("effective_predictive_optimization_flag") private EffectivePredictiveOptimizationFlag effectivePredictiveOptimizationFlag; - /** Whether predictive optimization should be enabled for this object and objects under it. */ + /** */ @JsonProperty("enable_predictive_optimization") private EnablePredictiveOptimization enablePredictiveOptimization; @@ -104,12 +103,12 @@ public String getCatalogName() { return catalogName; } - public SchemaInfo setCatalogType(CatalogType catalogType) { + public SchemaInfo setCatalogType(String catalogType) { this.catalogType = catalogType; return this; } - public CatalogType getCatalogType() { + public String getCatalogType() { return catalogType; } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java index acaa88214..c1345a265 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java @@ -85,6 +85,10 @@ public Iterable list(String catalogName) { * the array. */ public Iterable list(ListSchemasRequest request) { + + if (request.getMaxResults() == null) { + request.setMaxResults(0L); + } return new Paginator<>( request, impl::list, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateSchema.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateSchema.java index 5c910b1e7..0cb434d26 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateSchema.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateSchema.java @@ -15,7 +15,7 @@ public class UpdateSchema { @JsonProperty("comment") private String comment; - /** Whether predictive optimization should be enabled for this object and objects under it. */ + /** */ @JsonProperty("enable_predictive_optimization") private EnablePredictiveOptimization enablePredictiveOptimization; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java index c359b9f8a..ff9668106 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java @@ -191,14 +191,6 @@ public class ClusterAttributes { @JsonProperty("policy_id") private String policyId; - /** If set, what the configurable IOPS for the remote shuffle disk is. */ - @JsonProperty("remote_shuffle_disk_iops") - private Long remoteShuffleDiskIops; - - /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ - @JsonProperty("remote_shuffle_disk_throughput") - private Long remoteShuffleDiskThroughput; - /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -254,10 +246,6 @@ public class ClusterAttributes { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; - /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ - @JsonProperty("total_initial_remote_shuffle_disk_size") - private Long totalInitialRemoteShuffleDiskSize; - /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -442,24 +430,6 @@ public String getPolicyId() { return policyId; } - public ClusterAttributes setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { - this.remoteShuffleDiskIops = remoteShuffleDiskIops; - return this; - } - - public Long getRemoteShuffleDiskIops() { - return remoteShuffleDiskIops; - } - - public ClusterAttributes setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { - this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; - return this; - } - - public Long getRemoteShuffleDiskThroughput() { - return remoteShuffleDiskThroughput; - } - public ClusterAttributes setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -514,16 +484,6 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } - public ClusterAttributes setTotalInitialRemoteShuffleDiskSize( - Long totalInitialRemoteShuffleDiskSize) { - this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; - return this; - } - - public Long getTotalInitialRemoteShuffleDiskSize() { - return totalInitialRemoteShuffleDiskSize; - } - public ClusterAttributes setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -566,15 +526,12 @@ public boolean equals(Object o) { && Objects.equals(kind, that.kind) && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(policyId, that.policyId) - && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) - && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) - && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -601,15 +558,12 @@ public int hashCode() { kind, nodeTypeId, policyId, - remoteShuffleDiskIops, - remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, - totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -636,15 +590,12 @@ public String toString() { .add("kind", kind) .add("nodeTypeId", nodeTypeId) .add("policyId", policyId) - .add("remoteShuffleDiskIops", remoteShuffleDiskIops) - .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) - .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java index b2309f101..8b957e3dc 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java @@ -287,14 +287,6 @@ public class ClusterDetails { @JsonProperty("policy_id") private String policyId; - /** If set, what the configurable IOPS for the remote shuffle disk is. */ - @JsonProperty("remote_shuffle_disk_iops") - private Long remoteShuffleDiskIops; - - /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ - @JsonProperty("remote_shuffle_disk_throughput") - private Long remoteShuffleDiskThroughput; - /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -394,10 +386,6 @@ public class ClusterDetails { @JsonProperty("termination_reason") private TerminationReason terminationReason; - /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ - @JsonProperty("total_initial_remote_shuffle_disk_size") - private Long totalInitialRemoteShuffleDiskSize; - /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -708,24 +696,6 @@ public String getPolicyId() { return policyId; } - public ClusterDetails setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { - this.remoteShuffleDiskIops = remoteShuffleDiskIops; - return this; - } - - public Long getRemoteShuffleDiskIops() { - return remoteShuffleDiskIops; - } - - public ClusterDetails setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { - this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; - return this; - } - - public Long getRemoteShuffleDiskThroughput() { - return remoteShuffleDiskThroughput; - } - public ClusterDetails setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -843,16 +813,6 @@ public TerminationReason getTerminationReason() { return terminationReason; } - public ClusterDetails setTotalInitialRemoteShuffleDiskSize( - Long totalInitialRemoteShuffleDiskSize) { - this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; - return this; - } - - public Long getTotalInitialRemoteShuffleDiskSize() { - return totalInitialRemoteShuffleDiskSize; - } - public ClusterDetails setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -909,8 +869,6 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) - && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) - && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) @@ -924,7 +882,6 @@ public boolean equals(Object o) { && Objects.equals(stateMessage, that.stateMessage) && Objects.equals(terminatedTime, that.terminatedTime) && Objects.equals(terminationReason, that.terminationReason) - && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -965,8 +922,6 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, - remoteShuffleDiskIops, - remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, @@ -980,7 +935,6 @@ public int hashCode() { stateMessage, terminatedTime, terminationReason, - totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -1021,8 +975,6 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) - .add("remoteShuffleDiskIops", remoteShuffleDiskIops) - .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) @@ -1036,7 +988,6 @@ public String toString() { .add("stateMessage", stateMessage) .add("terminatedTime", terminatedTime) .add("terminationReason", terminationReason) - .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java index 78c7ddbfa..08cd8a715 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java @@ -218,14 +218,6 @@ public class ClusterSpec { @JsonProperty("policy_id") private String policyId; - /** If set, what the configurable IOPS for the remote shuffle disk is. */ - @JsonProperty("remote_shuffle_disk_iops") - private Long remoteShuffleDiskIops; - - /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ - @JsonProperty("remote_shuffle_disk_throughput") - private Long remoteShuffleDiskThroughput; - /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -281,10 +273,6 @@ public class ClusterSpec { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; - /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ - @JsonProperty("total_initial_remote_shuffle_disk_size") - private Long totalInitialRemoteShuffleDiskSize; - /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -496,24 +484,6 @@ public String getPolicyId() { return policyId; } - public ClusterSpec setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { - this.remoteShuffleDiskIops = remoteShuffleDiskIops; - return this; - } - - public Long getRemoteShuffleDiskIops() { - return remoteShuffleDiskIops; - } - - public ClusterSpec setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { - this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; - return this; - } - - public Long getRemoteShuffleDiskThroughput() { - return remoteShuffleDiskThroughput; - } - public ClusterSpec setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -568,15 +538,6 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } - public ClusterSpec setTotalInitialRemoteShuffleDiskSize(Long totalInitialRemoteShuffleDiskSize) { - this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; - return this; - } - - public Long getTotalInitialRemoteShuffleDiskSize() { - return totalInitialRemoteShuffleDiskSize; - } - public ClusterSpec setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -622,15 +583,12 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) - && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) - && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) - && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -660,15 +618,12 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, - remoteShuffleDiskIops, - remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, - totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -698,15 +653,12 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) - .add("remoteShuffleDiskIops", remoteShuffleDiskIops) - .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) - .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java index 027bae1c8..79853eda0 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java @@ -221,14 +221,6 @@ public class CreateCluster { @JsonProperty("policy_id") private String policyId; - /** If set, what the configurable IOPS for the remote shuffle disk is. */ - @JsonProperty("remote_shuffle_disk_iops") - private Long remoteShuffleDiskIops; - - /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ - @JsonProperty("remote_shuffle_disk_throughput") - private Long remoteShuffleDiskThroughput; - /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -284,10 +276,6 @@ public class CreateCluster { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; - /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ - @JsonProperty("total_initial_remote_shuffle_disk_size") - private Long totalInitialRemoteShuffleDiskSize; - /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -508,24 +496,6 @@ public String getPolicyId() { return policyId; } - public CreateCluster setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { - this.remoteShuffleDiskIops = remoteShuffleDiskIops; - return this; - } - - public Long getRemoteShuffleDiskIops() { - return remoteShuffleDiskIops; - } - - public CreateCluster setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { - this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; - return this; - } - - public Long getRemoteShuffleDiskThroughput() { - return remoteShuffleDiskThroughput; - } - public CreateCluster setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -580,16 +550,6 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } - public CreateCluster setTotalInitialRemoteShuffleDiskSize( - Long totalInitialRemoteShuffleDiskSize) { - this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; - return this; - } - - public Long getTotalInitialRemoteShuffleDiskSize() { - return totalInitialRemoteShuffleDiskSize; - } - public CreateCluster setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -636,15 +596,12 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) - && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) - && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) - && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -675,15 +632,12 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, - remoteShuffleDiskIops, - remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, - totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -714,15 +668,12 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) - .add("remoteShuffleDiskIops", remoteShuffleDiskIops) - .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) - .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java index bbf12f00d..81c1b7e85 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java @@ -218,14 +218,6 @@ public class EditCluster { @JsonProperty("policy_id") private String policyId; - /** If set, what the configurable IOPS for the remote shuffle disk is. */ - @JsonProperty("remote_shuffle_disk_iops") - private Long remoteShuffleDiskIops; - - /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ - @JsonProperty("remote_shuffle_disk_throughput") - private Long remoteShuffleDiskThroughput; - /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -281,10 +273,6 @@ public class EditCluster { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; - /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ - @JsonProperty("total_initial_remote_shuffle_disk_size") - private Long totalInitialRemoteShuffleDiskSize; - /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -505,24 +493,6 @@ public String getPolicyId() { return policyId; } - public EditCluster setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { - this.remoteShuffleDiskIops = remoteShuffleDiskIops; - return this; - } - - public Long getRemoteShuffleDiskIops() { - return remoteShuffleDiskIops; - } - - public EditCluster setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { - this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; - return this; - } - - public Long getRemoteShuffleDiskThroughput() { - return remoteShuffleDiskThroughput; - } - public EditCluster setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -577,15 +547,6 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } - public EditCluster setTotalInitialRemoteShuffleDiskSize(Long totalInitialRemoteShuffleDiskSize) { - this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; - return this; - } - - public Long getTotalInitialRemoteShuffleDiskSize() { - return totalInitialRemoteShuffleDiskSize; - } - public EditCluster setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -632,15 +593,12 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) - && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) - && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) - && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -671,15 +629,12 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, - remoteShuffleDiskIops, - remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, - totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -710,15 +665,12 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) - .add("remoteShuffleDiskIops", remoteShuffleDiskIops) - .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) - .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java index 7d3e13c7d..151d44359 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java @@ -207,14 +207,6 @@ public class UpdateClusterResource { @JsonProperty("policy_id") private String policyId; - /** If set, what the configurable IOPS for the remote shuffle disk is. */ - @JsonProperty("remote_shuffle_disk_iops") - private Long remoteShuffleDiskIops; - - /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ - @JsonProperty("remote_shuffle_disk_throughput") - private Long remoteShuffleDiskThroughput; - /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -270,10 +262,6 @@ public class UpdateClusterResource { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; - /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ - @JsonProperty("total_initial_remote_shuffle_disk_size") - private Long totalInitialRemoteShuffleDiskSize; - /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -476,24 +464,6 @@ public String getPolicyId() { return policyId; } - public UpdateClusterResource setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { - this.remoteShuffleDiskIops = remoteShuffleDiskIops; - return this; - } - - public Long getRemoteShuffleDiskIops() { - return remoteShuffleDiskIops; - } - - public UpdateClusterResource setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { - this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; - return this; - } - - public Long getRemoteShuffleDiskThroughput() { - return remoteShuffleDiskThroughput; - } - public UpdateClusterResource setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -548,16 +518,6 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } - public UpdateClusterResource setTotalInitialRemoteShuffleDiskSize( - Long totalInitialRemoteShuffleDiskSize) { - this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; - return this; - } - - public Long getTotalInitialRemoteShuffleDiskSize() { - return totalInitialRemoteShuffleDiskSize; - } - public UpdateClusterResource setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -602,15 +562,12 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) - && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) - && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) - && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -639,15 +596,12 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, - remoteShuffleDiskIops, - remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, - totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -676,15 +630,12 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) - .add("remoteShuffleDiskIops", remoteShuffleDiskIops) - .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) - .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountGroupsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountGroupsAPI.java index 634e2397a..7016a0673 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountGroupsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountGroupsAPI.java @@ -75,7 +75,7 @@ public Group get(GetAccountGroupRequest request) { public Iterable list(ListAccountGroupsRequest request) { request.setStartIndex(1L); if (request.getCount() == null) { - request.setCount(100L); + request.setCount(10000L); } return new Paginator<>( request, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountServicePrincipalsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountServicePrincipalsAPI.java index ff6280873..415577a5a 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountServicePrincipalsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountServicePrincipalsAPI.java @@ -73,7 +73,7 @@ public ServicePrincipal get(GetAccountServicePrincipalRequest request) { public Iterable list(ListAccountServicePrincipalsRequest request) { request.setStartIndex(1L); if (request.getCount() == null) { - request.setCount(100L); + request.setCount(10000L); } return new Paginator<>( request, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountUsersAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountUsersAPI.java index b378db43c..77249ac5e 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountUsersAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountUsersAPI.java @@ -79,7 +79,7 @@ public User get(GetAccountUserRequest request) { public Iterable list(ListAccountUsersRequest request) { request.setStartIndex(1L); if (request.getCount() == null) { - request.setCount(100L); + request.setCount(10000L); } return new Paginator<>( request, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GroupsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GroupsAPI.java index 01a61454b..a6b7414d2 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GroupsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GroupsAPI.java @@ -75,7 +75,7 @@ public Group get(GetGroupRequest request) { public Iterable list(ListGroupsRequest request) { request.setStartIndex(1L); if (request.getCount() == null) { - request.setCount(100L); + request.setCount(10000L); } return new Paginator<>( request, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/ServicePrincipalsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/ServicePrincipalsAPI.java index 28d5c5511..f28627207 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/ServicePrincipalsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/ServicePrincipalsAPI.java @@ -73,7 +73,7 @@ public ServicePrincipal get(GetServicePrincipalRequest request) { public Iterable list(ListServicePrincipalsRequest request) { request.setStartIndex(1L); if (request.getCount() == null) { - request.setCount(100L); + request.setCount(10000L); } return new Paginator<>( request, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/UsersAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/UsersAPI.java index d079aba02..d5797926d 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/UsersAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/UsersAPI.java @@ -98,7 +98,7 @@ public PasswordPermissions getPermissions() { public Iterable list(ListUsersRequest request) { request.setStartIndex(1L); if (request.getCount() == null) { - request.setCount(100L); + request.setCount(10000L); } return new Paginator<>( request, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipeline.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipeline.java index b81984eca..7533ab6c4 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipeline.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipeline.java @@ -134,14 +134,6 @@ public class CreatePipeline { @JsonProperty("storage") private String storage; - /** - * A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, - * and are therefore subject to the same limitations. A maximum of 25 tags can be added to the - * pipeline. - */ - @JsonProperty("tags") - private Map tags; - /** * Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` * must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is @@ -388,15 +380,6 @@ public String getStorage() { return storage; } - public CreatePipeline setTags(Map tags) { - this.tags = tags; - return this; - } - - public Map getTags() { - return tags; - } - public CreatePipeline setTarget(String target) { this.target = target; return this; @@ -446,7 +429,6 @@ public boolean equals(Object o) { && Objects.equals(schema, that.schema) && Objects.equals(serverless, that.serverless) && Objects.equals(storage, that.storage) - && Objects.equals(tags, that.tags) && Objects.equals(target, that.target) && Objects.equals(trigger, that.trigger); } @@ -480,7 +462,6 @@ public int hashCode() { schema, serverless, storage, - tags, target, trigger); } @@ -514,7 +495,6 @@ public String toString() { .add("schema", schema) .add("serverless", serverless) .add("storage", storage) - .add("tags", tags) .add("target", target) .add("trigger", trigger) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/EditPipeline.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/EditPipeline.java index 776b17166..444759473 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/EditPipeline.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/EditPipeline.java @@ -143,14 +143,6 @@ public class EditPipeline { @JsonProperty("storage") private String storage; - /** - * A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, - * and are therefore subject to the same limitations. A maximum of 25 tags can be added to the - * pipeline. - */ - @JsonProperty("tags") - private Map tags; - /** * Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` * must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is @@ -406,15 +398,6 @@ public String getStorage() { return storage; } - public EditPipeline setTags(Map tags) { - this.tags = tags; - return this; - } - - public Map getTags() { - return tags; - } - public EditPipeline setTarget(String target) { this.target = target; return this; @@ -465,7 +448,6 @@ public boolean equals(Object o) { && Objects.equals(schema, that.schema) && Objects.equals(serverless, that.serverless) && Objects.equals(storage, that.storage) - && Objects.equals(tags, that.tags) && Objects.equals(target, that.target) && Objects.equals(trigger, that.trigger); } @@ -500,7 +482,6 @@ public int hashCode() { schema, serverless, storage, - tags, target, trigger); } @@ -535,7 +516,6 @@ public String toString() { .add("schema", schema) .add("serverless", serverless) .add("storage", storage) - .add("tags", tags) .add("target", target) .add("trigger", trigger) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/IngestionSourceType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/IngestionSourceType.java index 272a8235d..c7620bc7f 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/IngestionSourceType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/IngestionSourceType.java @@ -17,6 +17,5 @@ public enum IngestionSourceType { SERVICENOW, SHAREPOINT, SQLSERVER, - TERADATA, WORKDAY_RAAS, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineSpec.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineSpec.java index b4c5c4d8e..913972a57 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineSpec.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineSpec.java @@ -115,14 +115,6 @@ public class PipelineSpec { @JsonProperty("storage") private String storage; - /** - * A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, - * and are therefore subject to the same limitations. A maximum of 25 tags can be added to the - * pipeline. - */ - @JsonProperty("tags") - private Map tags; - /** * Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` * must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is @@ -342,15 +334,6 @@ public String getStorage() { return storage; } - public PipelineSpec setTags(Map tags) { - this.tags = tags; - return this; - } - - public Map getTags() { - return tags; - } - public PipelineSpec setTarget(String target) { this.target = target; return this; @@ -397,7 +380,6 @@ public boolean equals(Object o) { && Objects.equals(schema, that.schema) && Objects.equals(serverless, that.serverless) && Objects.equals(storage, that.storage) - && Objects.equals(tags, that.tags) && Objects.equals(target, that.target) && Objects.equals(trigger, that.trigger); } @@ -428,7 +410,6 @@ public int hashCode() { schema, serverless, storage, - tags, target, trigger); } @@ -459,7 +440,6 @@ public String toString() { .add("schema", schema) .add("serverless", serverless) .add("storage", storage) - .add("tags", tags) .add("target", target) .add("trigger", trigger) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityInput.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityInput.java index ca9ccf251..9e9593df2 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityInput.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityInput.java @@ -48,24 +48,10 @@ public class ServedEntityInput { @JsonProperty("instance_profile_arn") private String instanceProfileArn; - /** - * The maximum provisioned concurrency that the endpoint can scale up to. Do not use if - * workload_size is specified. - */ - @JsonProperty("max_provisioned_concurrency") - private Long maxProvisionedConcurrency; - /** The maximum tokens per second that the endpoint can scale up to. */ @JsonProperty("max_provisioned_throughput") private Long maxProvisionedThroughput; - /** - * The minimum provisioned concurrency that the endpoint can scale down to. Do not use if - * workload_size is specified. - */ - @JsonProperty("min_provisioned_concurrency") - private Long minProvisionedConcurrency; - /** The minimum tokens per second that the endpoint can scale down to. */ @JsonProperty("min_provisioned_throughput") private Long minProvisionedThroughput; @@ -94,8 +80,7 @@ public class ServedEntityInput { * concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned * concurrency). Additional custom workload sizes can also be used when available in the * workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each - * workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency - * are specified. + * workload size is 0. */ @JsonProperty("workload_size") private String workloadSize; @@ -157,15 +142,6 @@ public String getInstanceProfileArn() { return instanceProfileArn; } - public ServedEntityInput setMaxProvisionedConcurrency(Long maxProvisionedConcurrency) { - this.maxProvisionedConcurrency = maxProvisionedConcurrency; - return this; - } - - public Long getMaxProvisionedConcurrency() { - return maxProvisionedConcurrency; - } - public ServedEntityInput setMaxProvisionedThroughput(Long maxProvisionedThroughput) { this.maxProvisionedThroughput = maxProvisionedThroughput; return this; @@ -175,15 +151,6 @@ public Long getMaxProvisionedThroughput() { return maxProvisionedThroughput; } - public ServedEntityInput setMinProvisionedConcurrency(Long minProvisionedConcurrency) { - this.minProvisionedConcurrency = minProvisionedConcurrency; - return this; - } - - public Long getMinProvisionedConcurrency() { - return minProvisionedConcurrency; - } - public ServedEntityInput setMinProvisionedThroughput(Long minProvisionedThroughput) { this.minProvisionedThroughput = minProvisionedThroughput; return this; @@ -248,9 +215,7 @@ public boolean equals(Object o) { && Objects.equals(environmentVars, that.environmentVars) && Objects.equals(externalModel, that.externalModel) && Objects.equals(instanceProfileArn, that.instanceProfileArn) - && Objects.equals(maxProvisionedConcurrency, that.maxProvisionedConcurrency) && Objects.equals(maxProvisionedThroughput, that.maxProvisionedThroughput) - && Objects.equals(minProvisionedConcurrency, that.minProvisionedConcurrency) && Objects.equals(minProvisionedThroughput, that.minProvisionedThroughput) && Objects.equals(name, that.name) && Objects.equals(provisionedModelUnits, that.provisionedModelUnits) @@ -267,9 +232,7 @@ public int hashCode() { environmentVars, externalModel, instanceProfileArn, - maxProvisionedConcurrency, maxProvisionedThroughput, - minProvisionedConcurrency, minProvisionedThroughput, name, provisionedModelUnits, @@ -286,9 +249,7 @@ public String toString() { .add("environmentVars", environmentVars) .add("externalModel", externalModel) .add("instanceProfileArn", instanceProfileArn) - .add("maxProvisionedConcurrency", maxProvisionedConcurrency) .add("maxProvisionedThroughput", maxProvisionedThroughput) - .add("minProvisionedConcurrency", minProvisionedConcurrency) .add("minProvisionedThroughput", minProvisionedThroughput) .add("name", name) .add("provisionedModelUnits", provisionedModelUnits) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityOutput.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityOutput.java index 129841ac9..74b58f742 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityOutput.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityOutput.java @@ -63,24 +63,10 @@ public class ServedEntityOutput { @JsonProperty("instance_profile_arn") private String instanceProfileArn; - /** - * The maximum provisioned concurrency that the endpoint can scale up to. Do not use if - * workload_size is specified. - */ - @JsonProperty("max_provisioned_concurrency") - private Long maxProvisionedConcurrency; - /** The maximum tokens per second that the endpoint can scale up to. */ @JsonProperty("max_provisioned_throughput") private Long maxProvisionedThroughput; - /** - * The minimum provisioned concurrency that the endpoint can scale down to. Do not use if - * workload_size is specified. - */ - @JsonProperty("min_provisioned_concurrency") - private Long minProvisionedConcurrency; - /** The minimum tokens per second that the endpoint can scale down to. */ @JsonProperty("min_provisioned_throughput") private Long minProvisionedThroughput; @@ -113,8 +99,7 @@ public class ServedEntityOutput { * concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned * concurrency). Additional custom workload sizes can also be used when available in the * workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each - * workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency - * are specified. + * workload size is 0. */ @JsonProperty("workload_size") private String workloadSize; @@ -203,15 +188,6 @@ public String getInstanceProfileArn() { return instanceProfileArn; } - public ServedEntityOutput setMaxProvisionedConcurrency(Long maxProvisionedConcurrency) { - this.maxProvisionedConcurrency = maxProvisionedConcurrency; - return this; - } - - public Long getMaxProvisionedConcurrency() { - return maxProvisionedConcurrency; - } - public ServedEntityOutput setMaxProvisionedThroughput(Long maxProvisionedThroughput) { this.maxProvisionedThroughput = maxProvisionedThroughput; return this; @@ -221,15 +197,6 @@ public Long getMaxProvisionedThroughput() { return maxProvisionedThroughput; } - public ServedEntityOutput setMinProvisionedConcurrency(Long minProvisionedConcurrency) { - this.minProvisionedConcurrency = minProvisionedConcurrency; - return this; - } - - public Long getMinProvisionedConcurrency() { - return minProvisionedConcurrency; - } - public ServedEntityOutput setMinProvisionedThroughput(Long minProvisionedThroughput) { this.minProvisionedThroughput = minProvisionedThroughput; return this; @@ -306,9 +273,7 @@ public boolean equals(Object o) { && Objects.equals(externalModel, that.externalModel) && Objects.equals(foundationModel, that.foundationModel) && Objects.equals(instanceProfileArn, that.instanceProfileArn) - && Objects.equals(maxProvisionedConcurrency, that.maxProvisionedConcurrency) && Objects.equals(maxProvisionedThroughput, that.maxProvisionedThroughput) - && Objects.equals(minProvisionedConcurrency, that.minProvisionedConcurrency) && Objects.equals(minProvisionedThroughput, that.minProvisionedThroughput) && Objects.equals(name, that.name) && Objects.equals(provisionedModelUnits, that.provisionedModelUnits) @@ -329,9 +294,7 @@ public int hashCode() { externalModel, foundationModel, instanceProfileArn, - maxProvisionedConcurrency, maxProvisionedThroughput, - minProvisionedConcurrency, minProvisionedThroughput, name, provisionedModelUnits, @@ -352,9 +315,7 @@ public String toString() { .add("externalModel", externalModel) .add("foundationModel", foundationModel) .add("instanceProfileArn", instanceProfileArn) - .add("maxProvisionedConcurrency", maxProvisionedConcurrency) .add("maxProvisionedThroughput", maxProvisionedThroughput) - .add("minProvisionedConcurrency", minProvisionedConcurrency) .add("minProvisionedThroughput", minProvisionedThroughput) .add("name", name) .add("provisionedModelUnits", provisionedModelUnits) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelInput.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelInput.java index 93b608063..907d88d17 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelInput.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelInput.java @@ -23,24 +23,10 @@ public class ServedModelInput { @JsonProperty("instance_profile_arn") private String instanceProfileArn; - /** - * The maximum provisioned concurrency that the endpoint can scale up to. Do not use if - * workload_size is specified. - */ - @JsonProperty("max_provisioned_concurrency") - private Long maxProvisionedConcurrency; - /** The maximum tokens per second that the endpoint can scale up to. */ @JsonProperty("max_provisioned_throughput") private Long maxProvisionedThroughput; - /** - * The minimum provisioned concurrency that the endpoint can scale down to. Do not use if - * workload_size is specified. - */ - @JsonProperty("min_provisioned_concurrency") - private Long minProvisionedConcurrency; - /** The minimum tokens per second that the endpoint can scale down to. */ @JsonProperty("min_provisioned_throughput") private Long minProvisionedThroughput; @@ -77,8 +63,7 @@ public class ServedModelInput { * concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned * concurrency). Additional custom workload sizes can also be used when available in the * workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each - * workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency - * are specified. + * workload size is 0. */ @JsonProperty("workload_size") private String workloadSize; @@ -113,15 +98,6 @@ public String getInstanceProfileArn() { return instanceProfileArn; } - public ServedModelInput setMaxProvisionedConcurrency(Long maxProvisionedConcurrency) { - this.maxProvisionedConcurrency = maxProvisionedConcurrency; - return this; - } - - public Long getMaxProvisionedConcurrency() { - return maxProvisionedConcurrency; - } - public ServedModelInput setMaxProvisionedThroughput(Long maxProvisionedThroughput) { this.maxProvisionedThroughput = maxProvisionedThroughput; return this; @@ -131,15 +107,6 @@ public Long getMaxProvisionedThroughput() { return maxProvisionedThroughput; } - public ServedModelInput setMinProvisionedConcurrency(Long minProvisionedConcurrency) { - this.minProvisionedConcurrency = minProvisionedConcurrency; - return this; - } - - public Long getMinProvisionedConcurrency() { - return minProvisionedConcurrency; - } - public ServedModelInput setMinProvisionedThroughput(Long minProvisionedThroughput) { this.minProvisionedThroughput = minProvisionedThroughput; return this; @@ -219,9 +186,7 @@ public boolean equals(Object o) { ServedModelInput that = (ServedModelInput) o; return Objects.equals(environmentVars, that.environmentVars) && Objects.equals(instanceProfileArn, that.instanceProfileArn) - && Objects.equals(maxProvisionedConcurrency, that.maxProvisionedConcurrency) && Objects.equals(maxProvisionedThroughput, that.maxProvisionedThroughput) - && Objects.equals(minProvisionedConcurrency, that.minProvisionedConcurrency) && Objects.equals(minProvisionedThroughput, that.minProvisionedThroughput) && Objects.equals(modelName, that.modelName) && Objects.equals(modelVersion, that.modelVersion) @@ -237,9 +202,7 @@ public int hashCode() { return Objects.hash( environmentVars, instanceProfileArn, - maxProvisionedConcurrency, maxProvisionedThroughput, - minProvisionedConcurrency, minProvisionedThroughput, modelName, modelVersion, @@ -255,9 +218,7 @@ public String toString() { return new ToStringer(ServedModelInput.class) .add("environmentVars", environmentVars) .add("instanceProfileArn", instanceProfileArn) - .add("maxProvisionedConcurrency", maxProvisionedConcurrency) .add("maxProvisionedThroughput", maxProvisionedThroughput) - .add("minProvisionedConcurrency", minProvisionedConcurrency) .add("minProvisionedThroughput", minProvisionedThroughput) .add("modelName", modelName) .add("modelVersion", modelVersion) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelOutput.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelOutput.java index dfdc57241..eabfc4a48 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelOutput.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelOutput.java @@ -31,20 +31,6 @@ public class ServedModelOutput { @JsonProperty("instance_profile_arn") private String instanceProfileArn; - /** - * The maximum provisioned concurrency that the endpoint can scale up to. Do not use if - * workload_size is specified. - */ - @JsonProperty("max_provisioned_concurrency") - private Long maxProvisionedConcurrency; - - /** - * The minimum provisioned concurrency that the endpoint can scale down to. Do not use if - * workload_size is specified. - */ - @JsonProperty("min_provisioned_concurrency") - private Long minProvisionedConcurrency; - /** */ @JsonProperty("model_name") private String modelName; @@ -81,8 +67,7 @@ public class ServedModelOutput { * concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned * concurrency). Additional custom workload sizes can also be used when available in the * workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each - * workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency - * are specified. + * workload size is 0. */ @JsonProperty("workload_size") private String workloadSize; @@ -135,24 +120,6 @@ public String getInstanceProfileArn() { return instanceProfileArn; } - public ServedModelOutput setMaxProvisionedConcurrency(Long maxProvisionedConcurrency) { - this.maxProvisionedConcurrency = maxProvisionedConcurrency; - return this; - } - - public Long getMaxProvisionedConcurrency() { - return maxProvisionedConcurrency; - } - - public ServedModelOutput setMinProvisionedConcurrency(Long minProvisionedConcurrency) { - this.minProvisionedConcurrency = minProvisionedConcurrency; - return this; - } - - public Long getMinProvisionedConcurrency() { - return minProvisionedConcurrency; - } - public ServedModelOutput setModelName(String modelName) { this.modelName = modelName; return this; @@ -234,8 +201,6 @@ public boolean equals(Object o) { && Objects.equals(creator, that.creator) && Objects.equals(environmentVars, that.environmentVars) && Objects.equals(instanceProfileArn, that.instanceProfileArn) - && Objects.equals(maxProvisionedConcurrency, that.maxProvisionedConcurrency) - && Objects.equals(minProvisionedConcurrency, that.minProvisionedConcurrency) && Objects.equals(modelName, that.modelName) && Objects.equals(modelVersion, that.modelVersion) && Objects.equals(name, that.name) @@ -253,8 +218,6 @@ public int hashCode() { creator, environmentVars, instanceProfileArn, - maxProvisionedConcurrency, - minProvisionedConcurrency, modelName, modelVersion, name, @@ -272,8 +235,6 @@ public String toString() { .add("creator", creator) .add("environmentVars", environmentVars) .add("instanceProfileArn", instanceProfileArn) - .add("maxProvisionedConcurrency", maxProvisionedConcurrency) - .add("minProvisionedConcurrency", minProvisionedConcurrency) .add("modelName", modelName) .add("modelVersion", modelVersion) .add("name", name) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java deleted file mode 100755 index 1ba9dcb49..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java +++ /dev/null @@ -1,86 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.support.Generated; -import com.databricks.sdk.support.ToStringer; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -@Generated -public class DashboardEmailSubscriptions { - /** */ - @JsonProperty("boolean_val") - private BooleanMessage booleanVal; - - /** - * etag used for versioning. The response is at least as fresh as the eTag provided. This is used - * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting - * overwriting each other. It is strongly suggested that systems make use of the etag in the read - * -> update pattern to perform setting updates in order to avoid race conditions. That is, get an - * etag from a GET request, and pass it with the PATCH request to identify the setting version you - * are updating. - */ - @JsonProperty("etag") - private String etag; - - /** - * Name of the corresponding setting. This field is populated in the response, but it will not be - * respected even if it's set in the request body. The setting name in the path parameter will be - * respected instead. Setting name is required to be 'default' if the setting only has one - * instance per workspace. - */ - @JsonProperty("setting_name") - private String settingName; - - public DashboardEmailSubscriptions setBooleanVal(BooleanMessage booleanVal) { - this.booleanVal = booleanVal; - return this; - } - - public BooleanMessage getBooleanVal() { - return booleanVal; - } - - public DashboardEmailSubscriptions setEtag(String etag) { - this.etag = etag; - return this; - } - - public String getEtag() { - return etag; - } - - public DashboardEmailSubscriptions setSettingName(String settingName) { - this.settingName = settingName; - return this; - } - - public String getSettingName() { - return settingName; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - DashboardEmailSubscriptions that = (DashboardEmailSubscriptions) o; - return Objects.equals(booleanVal, that.booleanVal) - && Objects.equals(etag, that.etag) - && Objects.equals(settingName, that.settingName); - } - - @Override - public int hashCode() { - return Objects.hash(booleanVal, etag, settingName); - } - - @Override - public String toString() { - return new ToStringer(DashboardEmailSubscriptions.class) - .add("booleanVal", booleanVal) - .add("etag", etag) - .add("settingName", settingName) - .toString(); - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java deleted file mode 100755 index daf85f77f..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java +++ /dev/null @@ -1,70 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.core.ApiClient; -import com.databricks.sdk.support.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace can - * send subscription emails containing PDFs and/or images of the dashboard. By default, this setting - * is enabled (set to `true`) - */ -@Generated -public class DashboardEmailSubscriptionsAPI { - private static final Logger LOG = LoggerFactory.getLogger(DashboardEmailSubscriptionsAPI.class); - - private final DashboardEmailSubscriptionsService impl; - - /** Regular-use constructor */ - public DashboardEmailSubscriptionsAPI(ApiClient apiClient) { - impl = new DashboardEmailSubscriptionsImpl(apiClient); - } - - /** Constructor for mocks */ - public DashboardEmailSubscriptionsAPI(DashboardEmailSubscriptionsService mock) { - impl = mock; - } - - /** - * Delete the Dashboard Email Subscriptions setting. - * - *

Reverts the Dashboard Email Subscriptions setting to its default value. - */ - public DeleteDashboardEmailSubscriptionsResponse delete( - DeleteDashboardEmailSubscriptionsRequest request) { - return impl.delete(request); - } - - /** - * Get the Dashboard Email Subscriptions setting. - * - *

Gets the Dashboard Email Subscriptions setting. - */ - public DashboardEmailSubscriptions get(GetDashboardEmailSubscriptionsRequest request) { - return impl.get(request); - } - - public DashboardEmailSubscriptions update( - boolean allowMissing, DashboardEmailSubscriptions setting, String fieldMask) { - return update( - new UpdateDashboardEmailSubscriptionsRequest() - .setAllowMissing(allowMissing) - .setSetting(setting) - .setFieldMask(fieldMask)); - } - - /** - * Update the Dashboard Email Subscriptions setting. - * - *

Updates the Dashboard Email Subscriptions setting. - */ - public DashboardEmailSubscriptions update(UpdateDashboardEmailSubscriptionsRequest request) { - return impl.update(request); - } - - public DashboardEmailSubscriptionsService impl() { - return impl; - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java deleted file mode 100755 index 767cb5e75..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java +++ /dev/null @@ -1,59 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.core.ApiClient; -import com.databricks.sdk.core.DatabricksException; -import com.databricks.sdk.core.http.Request; -import com.databricks.sdk.support.Generated; -import java.io.IOException; - -/** Package-local implementation of DashboardEmailSubscriptions */ -@Generated -class DashboardEmailSubscriptionsImpl implements DashboardEmailSubscriptionsService { - private final ApiClient apiClient; - - public DashboardEmailSubscriptionsImpl(ApiClient apiClient) { - this.apiClient = apiClient; - } - - @Override - public DeleteDashboardEmailSubscriptionsResponse delete( - DeleteDashboardEmailSubscriptionsRequest request) { - String path = "/api/2.0/settings/types/dashboard_email_subscriptions/names/default"; - try { - Request req = new Request("DELETE", path); - ApiClient.setQuery(req, request); - req.withHeader("Accept", "application/json"); - return apiClient.execute(req, DeleteDashboardEmailSubscriptionsResponse.class); - } catch (IOException e) { - throw new DatabricksException("IO error: " + e.getMessage(), e); - } - } - - @Override - public DashboardEmailSubscriptions get(GetDashboardEmailSubscriptionsRequest request) { - String path = "/api/2.0/settings/types/dashboard_email_subscriptions/names/default"; - try { - Request req = new Request("GET", path); - ApiClient.setQuery(req, request); - req.withHeader("Accept", "application/json"); - return apiClient.execute(req, DashboardEmailSubscriptions.class); - } catch (IOException e) { - throw new DatabricksException("IO error: " + e.getMessage(), e); - } - } - - @Override - public DashboardEmailSubscriptions update(UpdateDashboardEmailSubscriptionsRequest request) { - String path = "/api/2.0/settings/types/dashboard_email_subscriptions/names/default"; - try { - Request req = new Request("PATCH", path, apiClient.serialize(request)); - ApiClient.setQuery(req, request); - req.withHeader("Accept", "application/json"); - req.withHeader("Content-Type", "application/json"); - return apiClient.execute(req, DashboardEmailSubscriptions.class); - } catch (IOException e) { - throw new DatabricksException("IO error: " + e.getMessage(), e); - } - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java deleted file mode 100755 index 1dbc66188..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java +++ /dev/null @@ -1,40 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.support.Generated; - -/** - * Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace can - * send subscription emails containing PDFs and/or images of the dashboard. By default, this setting - * is enabled (set to `true`) - * - *

This is the high-level interface, that contains generated methods. - * - *

Evolving: this interface is under development. Method signatures may change. - */ -@Generated -public interface DashboardEmailSubscriptionsService { - /** - * Delete the Dashboard Email Subscriptions setting. - * - *

Reverts the Dashboard Email Subscriptions setting to its default value. - */ - DeleteDashboardEmailSubscriptionsResponse delete( - DeleteDashboardEmailSubscriptionsRequest deleteDashboardEmailSubscriptionsRequest); - - /** - * Get the Dashboard Email Subscriptions setting. - * - *

Gets the Dashboard Email Subscriptions setting. - */ - DashboardEmailSubscriptions get( - GetDashboardEmailSubscriptionsRequest getDashboardEmailSubscriptionsRequest); - - /** - * Update the Dashboard Email Subscriptions setting. - * - *

Updates the Dashboard Email Subscriptions setting. - */ - DashboardEmailSubscriptions update( - UpdateDashboardEmailSubscriptionsRequest updateDashboardEmailSubscriptionsRequest); -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java deleted file mode 100755 index 8d3d36912..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java +++ /dev/null @@ -1,54 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.support.Generated; -import com.databricks.sdk.support.QueryParam; -import com.databricks.sdk.support.ToStringer; -import com.fasterxml.jackson.annotation.JsonIgnore; -import java.util.Objects; - -/** Delete the Dashboard Email Subscriptions setting */ -@Generated -public class DeleteDashboardEmailSubscriptionsRequest { - /** - * etag used for versioning. The response is at least as fresh as the eTag provided. This is used - * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting - * overwriting each other. It is strongly suggested that systems make use of the etag in the read - * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get - * an etag from a GET request, and pass it with the DELETE request to identify the rule set - * version you are deleting. - */ - @JsonIgnore - @QueryParam("etag") - private String etag; - - public DeleteDashboardEmailSubscriptionsRequest setEtag(String etag) { - this.etag = etag; - return this; - } - - public String getEtag() { - return etag; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - DeleteDashboardEmailSubscriptionsRequest that = (DeleteDashboardEmailSubscriptionsRequest) o; - return Objects.equals(etag, that.etag); - } - - @Override - public int hashCode() { - return Objects.hash(etag); - } - - @Override - public String toString() { - return new ToStringer(DeleteDashboardEmailSubscriptionsRequest.class) - .add("etag", etag) - .toString(); - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java deleted file mode 100755 index 1cfa511ae..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java +++ /dev/null @@ -1,52 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.support.Generated; -import com.databricks.sdk.support.ToStringer; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -/** The etag is returned. */ -@Generated -public class DeleteDashboardEmailSubscriptionsResponse { - /** - * etag used for versioning. The response is at least as fresh as the eTag provided. This is used - * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting - * overwriting each other. It is strongly suggested that systems make use of the etag in the read - * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get - * an etag from a GET request, and pass it with the DELETE request to identify the rule set - * version you are deleting. - */ - @JsonProperty("etag") - private String etag; - - public DeleteDashboardEmailSubscriptionsResponse setEtag(String etag) { - this.etag = etag; - return this; - } - - public String getEtag() { - return etag; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - DeleteDashboardEmailSubscriptionsResponse that = (DeleteDashboardEmailSubscriptionsResponse) o; - return Objects.equals(etag, that.etag); - } - - @Override - public int hashCode() { - return Objects.hash(etag); - } - - @Override - public String toString() { - return new ToStringer(DeleteDashboardEmailSubscriptionsResponse.class) - .add("etag", etag) - .toString(); - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java deleted file mode 100755 index 3a5c3214a..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java +++ /dev/null @@ -1,52 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.support.Generated; -import com.databricks.sdk.support.QueryParam; -import com.databricks.sdk.support.ToStringer; -import com.fasterxml.jackson.annotation.JsonIgnore; -import java.util.Objects; - -/** Delete the SQL Results Download setting */ -@Generated -public class DeleteSqlResultsDownloadRequest { - /** - * etag used for versioning. The response is at least as fresh as the eTag provided. This is used - * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting - * overwriting each other. It is strongly suggested that systems make use of the etag in the read - * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get - * an etag from a GET request, and pass it with the DELETE request to identify the rule set - * version you are deleting. - */ - @JsonIgnore - @QueryParam("etag") - private String etag; - - public DeleteSqlResultsDownloadRequest setEtag(String etag) { - this.etag = etag; - return this; - } - - public String getEtag() { - return etag; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - DeleteSqlResultsDownloadRequest that = (DeleteSqlResultsDownloadRequest) o; - return Objects.equals(etag, that.etag); - } - - @Override - public int hashCode() { - return Objects.hash(etag); - } - - @Override - public String toString() { - return new ToStringer(DeleteSqlResultsDownloadRequest.class).add("etag", etag).toString(); - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java deleted file mode 100755 index bc2957210..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java +++ /dev/null @@ -1,50 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.support.Generated; -import com.databricks.sdk.support.ToStringer; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -/** The etag is returned. */ -@Generated -public class DeleteSqlResultsDownloadResponse { - /** - * etag used for versioning. The response is at least as fresh as the eTag provided. This is used - * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting - * overwriting each other. It is strongly suggested that systems make use of the etag in the read - * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get - * an etag from a GET request, and pass it with the DELETE request to identify the rule set - * version you are deleting. - */ - @JsonProperty("etag") - private String etag; - - public DeleteSqlResultsDownloadResponse setEtag(String etag) { - this.etag = etag; - return this; - } - - public String getEtag() { - return etag; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - DeleteSqlResultsDownloadResponse that = (DeleteSqlResultsDownloadResponse) o; - return Objects.equals(etag, that.etag); - } - - @Override - public int hashCode() { - return Objects.hash(etag); - } - - @Override - public String toString() { - return new ToStringer(DeleteSqlResultsDownloadResponse.class).add("etag", etag).toString(); - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java deleted file mode 100755 index 0c545ca9b..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java +++ /dev/null @@ -1,52 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.support.Generated; -import com.databricks.sdk.support.QueryParam; -import com.databricks.sdk.support.ToStringer; -import com.fasterxml.jackson.annotation.JsonIgnore; -import java.util.Objects; - -/** Get the Dashboard Email Subscriptions setting */ -@Generated -public class GetDashboardEmailSubscriptionsRequest { - /** - * etag used for versioning. The response is at least as fresh as the eTag provided. This is used - * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting - * overwriting each other. It is strongly suggested that systems make use of the etag in the read - * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get - * an etag from a GET request, and pass it with the DELETE request to identify the rule set - * version you are deleting. - */ - @JsonIgnore - @QueryParam("etag") - private String etag; - - public GetDashboardEmailSubscriptionsRequest setEtag(String etag) { - this.etag = etag; - return this; - } - - public String getEtag() { - return etag; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - GetDashboardEmailSubscriptionsRequest that = (GetDashboardEmailSubscriptionsRequest) o; - return Objects.equals(etag, that.etag); - } - - @Override - public int hashCode() { - return Objects.hash(etag); - } - - @Override - public String toString() { - return new ToStringer(GetDashboardEmailSubscriptionsRequest.class).add("etag", etag).toString(); - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java deleted file mode 100755 index c9cb75cc7..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java +++ /dev/null @@ -1,52 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.support.Generated; -import com.databricks.sdk.support.QueryParam; -import com.databricks.sdk.support.ToStringer; -import com.fasterxml.jackson.annotation.JsonIgnore; -import java.util.Objects; - -/** Get the SQL Results Download setting */ -@Generated -public class GetSqlResultsDownloadRequest { - /** - * etag used for versioning. The response is at least as fresh as the eTag provided. This is used - * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting - * overwriting each other. It is strongly suggested that systems make use of the etag in the read - * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get - * an etag from a GET request, and pass it with the DELETE request to identify the rule set - * version you are deleting. - */ - @JsonIgnore - @QueryParam("etag") - private String etag; - - public GetSqlResultsDownloadRequest setEtag(String etag) { - this.etag = etag; - return this; - } - - public String getEtag() { - return etag; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - GetSqlResultsDownloadRequest that = (GetSqlResultsDownloadRequest) o; - return Objects.equals(etag, that.etag); - } - - @Override - public int hashCode() { - return Objects.hash(etag); - } - - @Override - public String toString() { - return new ToStringer(GetSqlResultsDownloadRequest.class).add("etag", etag).toString(); - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetWorkspaceNetworkOptionRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetWorkspaceNetworkOptionRequest.java index c3f99bf5d..5344f3325 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetWorkspaceNetworkOptionRequest.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetWorkspaceNetworkOptionRequest.java @@ -7,7 +7,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import java.util.Objects; -/** Get workspace network option */ +/** Get workspace network configuration */ @Generated public class GetWorkspaceNetworkOptionRequest { /** The workspace ID. */ diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java index 16fa226ef..f5eb3d0a5 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java @@ -21,8 +21,6 @@ public class SettingsAPI { private ComplianceSecurityProfileAPI complianceSecurityProfileAPI; - private DashboardEmailSubscriptionsAPI dashboardEmailSubscriptionsAPI; - private DefaultNamespaceAPI defaultNamespaceAPI; private DisableLegacyAccessAPI disableLegacyAccessAPI; @@ -41,8 +39,6 @@ public class SettingsAPI { private RestrictWorkspaceAdminsAPI restrictWorkspaceAdminsAPI; - private SqlResultsDownloadAPI sqlResultsDownloadAPI; - /** Regular-use constructor */ public SettingsAPI(ApiClient apiClient) { impl = new SettingsImpl(apiClient); @@ -56,8 +52,6 @@ public SettingsAPI(ApiClient apiClient) { complianceSecurityProfileAPI = new ComplianceSecurityProfileAPI(apiClient); - dashboardEmailSubscriptionsAPI = new DashboardEmailSubscriptionsAPI(apiClient); - defaultNamespaceAPI = new DefaultNamespaceAPI(apiClient); disableLegacyAccessAPI = new DisableLegacyAccessAPI(apiClient); @@ -75,8 +69,6 @@ public SettingsAPI(ApiClient apiClient) { llmProxyPartnerPoweredWorkspaceAPI = new LlmProxyPartnerPoweredWorkspaceAPI(apiClient); restrictWorkspaceAdminsAPI = new RestrictWorkspaceAdminsAPI(apiClient); - - sqlResultsDownloadAPI = new SqlResultsDownloadAPI(apiClient); } /** Constructor for mocks */ @@ -107,14 +99,6 @@ public ComplianceSecurityProfileAPI ComplianceSecurityProfile() { return complianceSecurityProfileAPI; } - /** - * Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace - * can send subscription emails containing PDFs and/or images of the dashboard. - */ - public DashboardEmailSubscriptionsAPI DashboardEmailSubscriptions() { - return dashboardEmailSubscriptionsAPI; - } - /** * The default namespace setting API allows users to configure the default namespace for a * Databricks workspace. @@ -165,14 +149,6 @@ public RestrictWorkspaceAdminsAPI RestrictWorkspaceAdmins() { return restrictWorkspaceAdminsAPI; } - /** - * Controls whether users within the workspace are allowed to download results from the SQL Editor - * and AI/BI Dashboards UIs. - */ - public SqlResultsDownloadAPI SqlResultsDownload() { - return sqlResultsDownloadAPI; - } - public SettingsService impl() { return impl; } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java deleted file mode 100755 index b15b7f669..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java +++ /dev/null @@ -1,86 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.support.Generated; -import com.databricks.sdk.support.ToStringer; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -@Generated -public class SqlResultsDownload { - /** */ - @JsonProperty("boolean_val") - private BooleanMessage booleanVal; - - /** - * etag used for versioning. The response is at least as fresh as the eTag provided. This is used - * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting - * overwriting each other. It is strongly suggested that systems make use of the etag in the read - * -> update pattern to perform setting updates in order to avoid race conditions. That is, get an - * etag from a GET request, and pass it with the PATCH request to identify the setting version you - * are updating. - */ - @JsonProperty("etag") - private String etag; - - /** - * Name of the corresponding setting. This field is populated in the response, but it will not be - * respected even if it's set in the request body. The setting name in the path parameter will be - * respected instead. Setting name is required to be 'default' if the setting only has one - * instance per workspace. - */ - @JsonProperty("setting_name") - private String settingName; - - public SqlResultsDownload setBooleanVal(BooleanMessage booleanVal) { - this.booleanVal = booleanVal; - return this; - } - - public BooleanMessage getBooleanVal() { - return booleanVal; - } - - public SqlResultsDownload setEtag(String etag) { - this.etag = etag; - return this; - } - - public String getEtag() { - return etag; - } - - public SqlResultsDownload setSettingName(String settingName) { - this.settingName = settingName; - return this; - } - - public String getSettingName() { - return settingName; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - SqlResultsDownload that = (SqlResultsDownload) o; - return Objects.equals(booleanVal, that.booleanVal) - && Objects.equals(etag, that.etag) - && Objects.equals(settingName, that.settingName); - } - - @Override - public int hashCode() { - return Objects.hash(booleanVal, etag, settingName); - } - - @Override - public String toString() { - return new ToStringer(SqlResultsDownload.class) - .add("booleanVal", booleanVal) - .add("etag", etag) - .add("settingName", settingName) - .toString(); - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java deleted file mode 100755 index 7bc8f49d1..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java +++ /dev/null @@ -1,68 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.core.ApiClient; -import com.databricks.sdk.support.Generated; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Controls whether users within the workspace are allowed to download results from the SQL Editor - * and AI/BI Dashboards UIs. By default, this setting is enabled (set to `true`) - */ -@Generated -public class SqlResultsDownloadAPI { - private static final Logger LOG = LoggerFactory.getLogger(SqlResultsDownloadAPI.class); - - private final SqlResultsDownloadService impl; - - /** Regular-use constructor */ - public SqlResultsDownloadAPI(ApiClient apiClient) { - impl = new SqlResultsDownloadImpl(apiClient); - } - - /** Constructor for mocks */ - public SqlResultsDownloadAPI(SqlResultsDownloadService mock) { - impl = mock; - } - - /** - * Delete the SQL Results Download setting. - * - *

Reverts the SQL Results Download setting to its default value. - */ - public DeleteSqlResultsDownloadResponse delete(DeleteSqlResultsDownloadRequest request) { - return impl.delete(request); - } - - /** - * Get the SQL Results Download setting. - * - *

Gets the SQL Results Download setting. - */ - public SqlResultsDownload get(GetSqlResultsDownloadRequest request) { - return impl.get(request); - } - - public SqlResultsDownload update( - boolean allowMissing, SqlResultsDownload setting, String fieldMask) { - return update( - new UpdateSqlResultsDownloadRequest() - .setAllowMissing(allowMissing) - .setSetting(setting) - .setFieldMask(fieldMask)); - } - - /** - * Update the SQL Results Download setting. - * - *

Updates the SQL Results Download setting. - */ - public SqlResultsDownload update(UpdateSqlResultsDownloadRequest request) { - return impl.update(request); - } - - public SqlResultsDownloadService impl() { - return impl; - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java deleted file mode 100755 index db09dc70e..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java +++ /dev/null @@ -1,58 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.core.ApiClient; -import com.databricks.sdk.core.DatabricksException; -import com.databricks.sdk.core.http.Request; -import com.databricks.sdk.support.Generated; -import java.io.IOException; - -/** Package-local implementation of SqlResultsDownload */ -@Generated -class SqlResultsDownloadImpl implements SqlResultsDownloadService { - private final ApiClient apiClient; - - public SqlResultsDownloadImpl(ApiClient apiClient) { - this.apiClient = apiClient; - } - - @Override - public DeleteSqlResultsDownloadResponse delete(DeleteSqlResultsDownloadRequest request) { - String path = "/api/2.0/settings/types/sql_results_download/names/default"; - try { - Request req = new Request("DELETE", path); - ApiClient.setQuery(req, request); - req.withHeader("Accept", "application/json"); - return apiClient.execute(req, DeleteSqlResultsDownloadResponse.class); - } catch (IOException e) { - throw new DatabricksException("IO error: " + e.getMessage(), e); - } - } - - @Override - public SqlResultsDownload get(GetSqlResultsDownloadRequest request) { - String path = "/api/2.0/settings/types/sql_results_download/names/default"; - try { - Request req = new Request("GET", path); - ApiClient.setQuery(req, request); - req.withHeader("Accept", "application/json"); - return apiClient.execute(req, SqlResultsDownload.class); - } catch (IOException e) { - throw new DatabricksException("IO error: " + e.getMessage(), e); - } - } - - @Override - public SqlResultsDownload update(UpdateSqlResultsDownloadRequest request) { - String path = "/api/2.0/settings/types/sql_results_download/names/default"; - try { - Request req = new Request("PATCH", path, apiClient.serialize(request)); - ApiClient.setQuery(req, request); - req.withHeader("Accept", "application/json"); - req.withHeader("Content-Type", "application/json"); - return apiClient.execute(req, SqlResultsDownload.class); - } catch (IOException e) { - throw new DatabricksException("IO error: " + e.getMessage(), e); - } - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java deleted file mode 100755 index 0929fba03..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java +++ /dev/null @@ -1,37 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.support.Generated; - -/** - * Controls whether users within the workspace are allowed to download results from the SQL Editor - * and AI/BI Dashboards UIs. By default, this setting is enabled (set to `true`) - * - *

This is the high-level interface, that contains generated methods. - * - *

Evolving: this interface is under development. Method signatures may change. - */ -@Generated -public interface SqlResultsDownloadService { - /** - * Delete the SQL Results Download setting. - * - *

Reverts the SQL Results Download setting to its default value. - */ - DeleteSqlResultsDownloadResponse delete( - DeleteSqlResultsDownloadRequest deleteSqlResultsDownloadRequest); - - /** - * Get the SQL Results Download setting. - * - *

Gets the SQL Results Download setting. - */ - SqlResultsDownload get(GetSqlResultsDownloadRequest getSqlResultsDownloadRequest); - - /** - * Update the SQL Results Download setting. - * - *

Updates the SQL Results Download setting. - */ - SqlResultsDownload update(UpdateSqlResultsDownloadRequest updateSqlResultsDownloadRequest); -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java deleted file mode 100755 index 37613e037..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.support.Generated; -import com.databricks.sdk.support.ToStringer; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -/** Details required to update a setting. */ -@Generated -public class UpdateDashboardEmailSubscriptionsRequest { - /** This should always be set to true for Settings API. Added for AIP compliance. */ - @JsonProperty("allow_missing") - private Boolean allowMissing; - - /** - * The field mask must be a single string, with multiple fields separated by commas (no spaces). - * The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields - * (e.g., `author.given_name`). Specification of elements in sequence or map fields is not - * allowed, as only the entire collection field can be specified. Field names must exactly match - * the resource field names. - * - *

A field mask of `*` indicates full replacement. It’s recommended to always explicitly list - * the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if - * the API changes in the future. - */ - @JsonProperty("field_mask") - private String fieldMask; - - /** */ - @JsonProperty("setting") - private DashboardEmailSubscriptions setting; - - public UpdateDashboardEmailSubscriptionsRequest setAllowMissing(Boolean allowMissing) { - this.allowMissing = allowMissing; - return this; - } - - public Boolean getAllowMissing() { - return allowMissing; - } - - public UpdateDashboardEmailSubscriptionsRequest setFieldMask(String fieldMask) { - this.fieldMask = fieldMask; - return this; - } - - public String getFieldMask() { - return fieldMask; - } - - public UpdateDashboardEmailSubscriptionsRequest setSetting(DashboardEmailSubscriptions setting) { - this.setting = setting; - return this; - } - - public DashboardEmailSubscriptions getSetting() { - return setting; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - UpdateDashboardEmailSubscriptionsRequest that = (UpdateDashboardEmailSubscriptionsRequest) o; - return Objects.equals(allowMissing, that.allowMissing) - && Objects.equals(fieldMask, that.fieldMask) - && Objects.equals(setting, that.setting); - } - - @Override - public int hashCode() { - return Objects.hash(allowMissing, fieldMask, setting); - } - - @Override - public String toString() { - return new ToStringer(UpdateDashboardEmailSubscriptionsRequest.class) - .add("allowMissing", allowMissing) - .add("fieldMask", fieldMask) - .add("setting", setting) - .toString(); - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java deleted file mode 100755 index a0d263a52..000000000 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java +++ /dev/null @@ -1,85 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package com.databricks.sdk.service.settings; - -import com.databricks.sdk.support.Generated; -import com.databricks.sdk.support.ToStringer; -import com.fasterxml.jackson.annotation.JsonProperty; -import java.util.Objects; - -/** Details required to update a setting. */ -@Generated -public class UpdateSqlResultsDownloadRequest { - /** This should always be set to true for Settings API. Added for AIP compliance. */ - @JsonProperty("allow_missing") - private Boolean allowMissing; - - /** - * The field mask must be a single string, with multiple fields separated by commas (no spaces). - * The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields - * (e.g., `author.given_name`). Specification of elements in sequence or map fields is not - * allowed, as only the entire collection field can be specified. Field names must exactly match - * the resource field names. - * - *

A field mask of `*` indicates full replacement. It’s recommended to always explicitly list - * the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if - * the API changes in the future. - */ - @JsonProperty("field_mask") - private String fieldMask; - - /** */ - @JsonProperty("setting") - private SqlResultsDownload setting; - - public UpdateSqlResultsDownloadRequest setAllowMissing(Boolean allowMissing) { - this.allowMissing = allowMissing; - return this; - } - - public Boolean getAllowMissing() { - return allowMissing; - } - - public UpdateSqlResultsDownloadRequest setFieldMask(String fieldMask) { - this.fieldMask = fieldMask; - return this; - } - - public String getFieldMask() { - return fieldMask; - } - - public UpdateSqlResultsDownloadRequest setSetting(SqlResultsDownload setting) { - this.setting = setting; - return this; - } - - public SqlResultsDownload getSetting() { - return setting; - } - - @Override - public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - UpdateSqlResultsDownloadRequest that = (UpdateSqlResultsDownloadRequest) o; - return Objects.equals(allowMissing, that.allowMissing) - && Objects.equals(fieldMask, that.fieldMask) - && Objects.equals(setting, that.setting); - } - - @Override - public int hashCode() { - return Objects.hash(allowMissing, fieldMask, setting); - } - - @Override - public String toString() { - return new ToStringer(UpdateSqlResultsDownloadRequest.class) - .add("allowMissing", allowMissing) - .add("fieldMask", fieldMask) - .add("setting", setting) - .toString(); - } -} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java index 3dbcb2ba5..84c39c6b0 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java @@ -8,7 +8,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Objects; -/** Update workspace network option */ +/** Update workspace network configuration */ @Generated public class UpdateWorkspaceNetworkOptionRequest { /** The workspace ID. */ diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationAPI.java index 825cf4f15..90fe8feba 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationAPI.java @@ -7,12 +7,11 @@ import org.slf4j.LoggerFactory; /** - * These APIs allow configuration of network settings for Databricks workspaces by selecting which - * network policy to associate with the workspace. Each workspace is always associated with exactly - * one network policy that controls which network destinations can be accessed from the Databricks - * environment. By default, workspaces are associated with the 'default-policy' network policy. You - * cannot create or delete a workspace's network option, only update it to associate the workspace - * with a different policy + * These APIs allow configuration of network settings for Databricks workspaces. Each workspace is + * always associated with exactly one network policy that controls which network destinations can be + * accessed from the Databricks environment. By default, workspaces are associated with the + * 'default-policy' network policy. You cannot create or delete a workspace's network configuration, + * only update it to associate the workspace with a different policy. */ @Generated public class WorkspaceNetworkConfigurationAPI { @@ -36,10 +35,10 @@ public WorkspaceNetworkOption getWorkspaceNetworkOptionRpc(long workspaceId) { } /** - * Get workspace network option. + * Get workspace network configuration. * - *

Gets the network option for a workspace. Every workspace has exactly one network policy - * binding, with 'default-policy' used if no explicit assignment exists. + *

Gets the network configuration for a workspace. Every workspace has exactly one network + * policy binding, with 'default-policy' used if no explicit assignment exists. */ public WorkspaceNetworkOption getWorkspaceNetworkOptionRpc( GetWorkspaceNetworkOptionRequest request) { @@ -55,11 +54,11 @@ public WorkspaceNetworkOption updateWorkspaceNetworkOptionRpc( } /** - * Update workspace network option. + * Update workspace network configuration. * - *

Updates the network option for a workspace. This operation associates the workspace with the - * specified network policy. To revert to the default policy, specify 'default-policy' as the - * network_policy_id. + *

Updates the network configuration for a workspace. This operation associates the workspace + * with the specified network policy. To revert to the default policy, specify 'default-policy' as + * the network_policy_id. */ public WorkspaceNetworkOption updateWorkspaceNetworkOptionRpc( UpdateWorkspaceNetworkOptionRequest request) { diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationService.java index 0a45ac324..7c414aa6d 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationService.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationService.java @@ -4,12 +4,11 @@ import com.databricks.sdk.support.Generated; /** - * These APIs allow configuration of network settings for Databricks workspaces by selecting which - * network policy to associate with the workspace. Each workspace is always associated with exactly - * one network policy that controls which network destinations can be accessed from the Databricks - * environment. By default, workspaces are associated with the 'default-policy' network policy. You - * cannot create or delete a workspace's network option, only update it to associate the workspace - * with a different policy + * These APIs allow configuration of network settings for Databricks workspaces. Each workspace is + * always associated with exactly one network policy that controls which network destinations can be + * accessed from the Databricks environment. By default, workspaces are associated with the + * 'default-policy' network policy. You cannot create or delete a workspace's network configuration, + * only update it to associate the workspace with a different policy. * *

This is the high-level interface, that contains generated methods. * @@ -18,20 +17,20 @@ @Generated public interface WorkspaceNetworkConfigurationService { /** - * Get workspace network option. + * Get workspace network configuration. * - *

Gets the network option for a workspace. Every workspace has exactly one network policy - * binding, with 'default-policy' used if no explicit assignment exists. + *

Gets the network configuration for a workspace. Every workspace has exactly one network + * policy binding, with 'default-policy' used if no explicit assignment exists. */ WorkspaceNetworkOption getWorkspaceNetworkOptionRpc( GetWorkspaceNetworkOptionRequest getWorkspaceNetworkOptionRequest); /** - * Update workspace network option. + * Update workspace network configuration. * - *

Updates the network option for a workspace. This operation associates the workspace with the - * specified network policy. To revert to the default policy, specify 'default-policy' as the - * network_policy_id. + *

Updates the network configuration for a workspace. This operation associates the workspace + * with the specified network policy. To revert to the default policy, specify 'default-policy' as + * the network_policy_id. */ WorkspaceNetworkOption updateWorkspaceNetworkOptionRpc( UpdateWorkspaceNetworkOptionRequest updateWorkspaceNetworkOptionRequest); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sharing/AuthenticationType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sharing/AuthenticationType.java index 05d2bd9c4..6b48f36b2 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sharing/AuthenticationType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sharing/AuthenticationType.java @@ -9,6 +9,5 @@ public enum AuthenticationType { DATABRICKS, OAUTH_CLIENT_CREDENTIALS, - OIDC_FEDERATION, TOKEN, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2API.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2API.java index e868999eb..756895733 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2API.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2API.java @@ -7,7 +7,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** New version of SQL Alerts */ +/** TODO: Add description */ @Generated public class AlertsV2API { private static final Logger LOG = LoggerFactory.getLogger(AlertsV2API.class); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2Service.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2Service.java index cb9ec351a..f8740fa39 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2Service.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2Service.java @@ -4,7 +4,7 @@ import com.databricks.sdk.support.Generated; /** - * New version of SQL Alerts + * TODO: Add description * *

This is the high-level interface, that contains generated methods. * From da1ae5525034ea353de31e78b855592c4fbeb83c Mon Sep 17 00:00:00 2001 From: Hector Castejon Diaz Date: Thu, 19 Jun 2025 11:38:56 +0000 Subject: [PATCH 2/8] WIP --- .../serialization/DurationDeserializer.java | 23 +++++++++++++++++++ .../serialization/DurationSerializer.java | 21 +++++++++++++++++ .../serialization/FieldMaskDeserializer.java | 23 +++++++++++++++++++ .../serialization/FieldMaskSerializer.java | 21 +++++++++++++++++ .../serialization/TimestampDeserializer.java | 23 +++++++++++++++++++ .../serialization/TimestampSerializer.java | 21 +++++++++++++++++ 6 files changed, 132 insertions(+) create mode 100644 databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/DurationDeserializer.java create mode 100644 databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/DurationSerializer.java create mode 100644 databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/FieldMaskDeserializer.java create mode 100644 databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/FieldMaskSerializer.java create mode 100644 databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/TimestampDeserializer.java create mode 100644 databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/TimestampSerializer.java diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/DurationDeserializer.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/DurationDeserializer.java new file mode 100644 index 000000000..765dccd66 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/DurationDeserializer.java @@ -0,0 +1,23 @@ +package com.databricks.sdk.core.serialization; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.google.protobuf.Duration; +import com.google.protobuf.util.Durations; +import java.io.IOException; + +public class DurationDeserializer extends JsonDeserializer { + @Override + public Duration deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + String durationStr = p.getValueAsString(); + if (durationStr == null || durationStr.isEmpty()) { + return null; + } + try { + return Durations.parse(durationStr); // Parses duration format like "3.000s" + } catch (Exception e) { + throw new IOException("Failed to parse duration: " + durationStr, e); + } + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/DurationSerializer.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/DurationSerializer.java new file mode 100644 index 000000000..27fde56b6 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/DurationSerializer.java @@ -0,0 +1,21 @@ +package com.databricks.sdk.core.serialization; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.google.protobuf.Duration; +import com.google.protobuf.util.Durations; +import java.io.IOException; + +public class DurationSerializer extends JsonSerializer { + @Override + public void serialize(Duration value, JsonGenerator gen, SerializerProvider serializers) + throws IOException { + if (value != null) { + String durationStr = Durations.toString(value); // Converts to "3.000s" + gen.writeString(durationStr); + } else { + gen.writeNull(); + } + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/FieldMaskDeserializer.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/FieldMaskDeserializer.java new file mode 100644 index 000000000..5061f4b3e --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/FieldMaskDeserializer.java @@ -0,0 +1,23 @@ +package com.databricks.sdk.core.serialization; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.google.protobuf.FieldMask; +import com.google.protobuf.util.FieldMaskUtil; +import java.io.IOException; + +public class FieldMaskDeserializer extends JsonDeserializer { + @Override + public FieldMask deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + String fieldMaskStr = p.getValueAsString(); + if (fieldMaskStr == null || fieldMaskStr.isEmpty()) { + return null; + } + try { + return FieldMaskUtil.fromJsonString(fieldMaskStr); // Parses JSON string format + } catch (Exception e) { + throw new IOException("Failed to parse field mask: " + fieldMaskStr, e); + } + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/FieldMaskSerializer.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/FieldMaskSerializer.java new file mode 100644 index 000000000..301f0b5bb --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/FieldMaskSerializer.java @@ -0,0 +1,21 @@ +package com.databricks.sdk.core.serialization; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.google.protobuf.FieldMask; +import com.google.protobuf.util.FieldMaskUtil; +import java.io.IOException; + +public class FieldMaskSerializer extends JsonSerializer { + @Override + public void serialize(FieldMask value, JsonGenerator gen, SerializerProvider serializers) + throws IOException { + if (value != null) { + String fieldMaskStr = FieldMaskUtil.toJsonString(value); // Converts to JSON string format + gen.writeString(fieldMaskStr); + } else { + gen.writeNull(); + } + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/TimestampDeserializer.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/TimestampDeserializer.java new file mode 100644 index 000000000..b5474bdc6 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/TimestampDeserializer.java @@ -0,0 +1,23 @@ +package com.databricks.sdk.core.serialization; + +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.databind.DeserializationContext; +import com.fasterxml.jackson.databind.JsonDeserializer; +import com.google.protobuf.Timestamp; +import com.google.protobuf.util.Timestamps; +import java.io.IOException; + +public class TimestampDeserializer extends JsonDeserializer { + @Override + public Timestamp deserialize(JsonParser p, DeserializationContext ctxt) throws IOException { + String timestampStr = p.getValueAsString(); + if (timestampStr == null || timestampStr.isEmpty()) { + return null; + } + try { + return Timestamps.parse(timestampStr); // Parses RFC 3339 format + } catch (Exception e) { + throw new IOException("Failed to parse timestamp: " + timestampStr, e); + } + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/TimestampSerializer.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/TimestampSerializer.java new file mode 100644 index 000000000..a760b4749 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/serialization/TimestampSerializer.java @@ -0,0 +1,21 @@ +package com.databricks.sdk.core.serialization; + +import com.fasterxml.jackson.core.JsonGenerator; +import com.fasterxml.jackson.databind.JsonSerializer; +import com.fasterxml.jackson.databind.SerializerProvider; +import com.google.protobuf.Timestamp; +import com.google.protobuf.util.Timestamps; +import java.io.IOException; + +public class TimestampSerializer extends JsonSerializer { + @Override + public void serialize(Timestamp value, JsonGenerator gen, SerializerProvider serializers) + throws IOException { + if (value != null) { + String timestampStr = Timestamps.toString(value); // Converts to RFC 3339 format + gen.writeString(timestampStr); + } else { + gen.writeNull(); + } + } +} From c2bc729624e877475c019082bee70f97c5a12498 Mon Sep 17 00:00:00 2001 From: Hector Castejon Diaz Date: Thu, 19 Jun 2025 11:40:08 +0000 Subject: [PATCH 3/8] Revert "Before" This reverts commit 768e853195ab2ab7cd699686bf0d1af1b140db91. --- .codegen/_openapi_sha | 2 +- .gitattributes | 16 ++++ .../com/databricks/sdk/AccountClient.java | 11 +-- .../com/databricks/sdk/WorkspaceClient.java | 2 +- .../databricks/sdk/core/DatabricksConfig.java | 8 +- .../sdk/service/catalog/ConnectionInfo.java | 10 +-- .../sdk/service/catalog/ConnectionType.java | 9 +- .../sdk/service/catalog/ConnectionsAPI.java | 4 - .../sdk/service/catalog/CreateConnection.java | 2 +- .../sdk/service/catalog/CredentialType.java | 12 ++- .../sdk/service/catalog/SchemaInfo.java | 9 +- .../sdk/service/catalog/SchemasAPI.java | 4 - .../sdk/service/catalog/UpdateSchema.java | 2 +- .../service/compute/ClusterAttributes.java | 49 +++++++++++ .../sdk/service/compute/ClusterDetails.java | 49 +++++++++++ .../sdk/service/compute/ClusterSpec.java | 48 +++++++++++ .../sdk/service/compute/CreateCluster.java | 49 +++++++++++ .../sdk/service/compute/EditCluster.java | 48 +++++++++++ .../compute/UpdateClusterResource.java | 49 +++++++++++ .../sdk/service/iam/AccountGroupsAPI.java | 2 +- .../iam/AccountServicePrincipalsAPI.java | 2 +- .../sdk/service/iam/AccountUsersAPI.java | 2 +- .../databricks/sdk/service/iam/GroupsAPI.java | 2 +- .../sdk/service/iam/ServicePrincipalsAPI.java | 2 +- .../databricks/sdk/service/iam/UsersAPI.java | 2 +- .../sdk/service/pipelines/CreatePipeline.java | 20 +++++ .../sdk/service/pipelines/EditPipeline.java | 20 +++++ .../pipelines/IngestionSourceType.java | 1 + .../sdk/service/pipelines/PipelineSpec.java | 20 +++++ .../service/serving/ServedEntityInput.java | 41 ++++++++- .../service/serving/ServedEntityOutput.java | 41 ++++++++- .../sdk/service/serving/ServedModelInput.java | 41 ++++++++- .../service/serving/ServedModelOutput.java | 41 ++++++++- .../settings/DashboardEmailSubscriptions.java | 86 +++++++++++++++++++ .../DashboardEmailSubscriptionsAPI.java | 70 +++++++++++++++ .../DashboardEmailSubscriptionsImpl.java | 59 +++++++++++++ .../DashboardEmailSubscriptionsService.java | 40 +++++++++ ...eteDashboardEmailSubscriptionsRequest.java | 54 ++++++++++++ ...teDashboardEmailSubscriptionsResponse.java | 52 +++++++++++ .../DeleteSqlResultsDownloadRequest.java | 52 +++++++++++ .../DeleteSqlResultsDownloadResponse.java | 50 +++++++++++ ...GetDashboardEmailSubscriptionsRequest.java | 52 +++++++++++ .../GetSqlResultsDownloadRequest.java | 52 +++++++++++ .../GetWorkspaceNetworkOptionRequest.java | 2 +- .../sdk/service/settings/SettingsAPI.java | 24 ++++++ .../service/settings/SqlResultsDownload.java | 86 +++++++++++++++++++ .../settings/SqlResultsDownloadAPI.java | 68 +++++++++++++++ .../settings/SqlResultsDownloadImpl.java | 58 +++++++++++++ .../settings/SqlResultsDownloadService.java | 37 ++++++++ ...ateDashboardEmailSubscriptionsRequest.java | 85 ++++++++++++++++++ .../UpdateSqlResultsDownloadRequest.java | 85 ++++++++++++++++++ .../UpdateWorkspaceNetworkOptionRequest.java | 2 +- .../WorkspaceNetworkConfigurationAPI.java | 25 +++--- .../WorkspaceNetworkConfigurationService.java | 25 +++--- .../service/sharing/AuthenticationType.java | 1 + .../sdk/service/sql/AlertsV2API.java | 2 +- .../sdk/service/sql/AlertsV2Service.java | 2 +- 57 files changed, 1617 insertions(+), 72 deletions(-) create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java create mode 100755 databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 4347f1a0e..a74101922 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -file:./../openapi/2cee201b2e8d656f7306b2f9ec98edfa721e9829.json \ No newline at end of file +b142b72bea6f30d8efb36dfa8c58e0d63ae5329b \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 067333c32..706329a62 100755 --- a/.gitattributes +++ b/.gitattributes @@ -2050,6 +2050,10 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CspEnablem databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CspEnablementAccountImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CspEnablementAccountService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/CspEnablementAccountSetting.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DefaultNamespaceAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DefaultNamespaceImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DefaultNamespaceService.java linguist-generated=true @@ -2061,6 +2065,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteAibi databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteAibiDashboardEmbeddingAccessPolicySettingResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDefaultNamespaceSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDefaultNamespaceSettingResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDisableLegacyAccessRequest.java linguist-generated=true @@ -2083,6 +2089,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeletePriv databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteRestrictWorkspaceAdminsSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteRestrictWorkspaceAdminsSettingResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteTokenManagementRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DestinationType.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DisableLegacyAccess.java linguist-generated=true @@ -2157,6 +2165,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetAibiDas databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetAutomaticClusterUpdateSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetComplianceSecurityProfileSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetCspEnablementAccountSettingRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDefaultNamespaceSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDisableLegacyAccessRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDisableLegacyDbfsRequest.java linguist-generated=true @@ -2175,6 +2184,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetNotific databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetPersonalComputeSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetPrivateEndpointRuleRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetRestrictWorkspaceAdminsSettingRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetStatusRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetTokenManagementRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetTokenPermissionLevelsResponse.java linguist-generated=true @@ -2254,6 +2264,10 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAP databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsImpl.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SlackConfig.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/StringMessage.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/TokenAccessControlRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/TokenAccessControlResponse.java linguist-generated=true @@ -2276,6 +2290,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateAibi databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateAutomaticClusterUpdateSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateComplianceSecurityProfileSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateCspEnablementAccountSettingRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDefaultNamespaceSettingRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDisableLegacyAccessRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDisableLegacyDbfsRequest.java linguist-generated=true @@ -2296,6 +2311,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdatePers databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdatePrivateEndpointRule.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateRestrictWorkspaceAdminsSettingRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceConfAPI.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceConfImpl.java linguist-generated=true diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java index 1c813a589..be96caf24 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/AccountClient.java @@ -574,11 +574,12 @@ public WorkspaceAssignmentAPI workspaceAssignment() { } /** - * These APIs allow configuration of network settings for Databricks workspaces. Each workspace is - * always associated with exactly one network policy that controls which network destinations can - * be accessed from the Databricks environment. By default, workspaces are associated with the - * 'default-policy' network policy. You cannot create or delete a workspace's network - * configuration, only update it to associate the workspace with a different policy. + * These APIs allow configuration of network settings for Databricks workspaces by selecting which + * network policy to associate with the workspace. Each workspace is always associated with + * exactly one network policy that controls which network destinations can be accessed from the + * Databricks environment. By default, workspaces are associated with the 'default-policy' network + * policy. You cannot create or delete a workspace's network option, only update it to associate + * the workspace with a different policy */ public WorkspaceNetworkConfigurationAPI workspaceNetworkConfiguration() { return workspaceNetworkConfigurationAPI; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java index 014c0b852..d4c066a69 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java @@ -483,7 +483,7 @@ public AlertsLegacyAPI alertsLegacy() { return alertsLegacyAPI; } - /** TODO: Add description */ + /** New version of SQL Alerts */ public AlertsV2API alertsV2() { return alertsV2API; } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java index df16ebae3..de6548982 100644 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java @@ -410,17 +410,13 @@ public DatabricksConfig setAzureUseMsi(boolean azureUseMsi) { return this; } - /** - * @deprecated Use {@link #getAzureUseMsi()} instead. - */ + /** @deprecated Use {@link #getAzureUseMsi()} instead. */ @Deprecated() public boolean getAzureUseMSI() { return azureUseMsi; } - /** - * @deprecated Use {@link #setAzureUseMsi(boolean)} instead. - */ + /** @deprecated Use {@link #setAzureUseMsi(boolean)} instead. */ @Deprecated public DatabricksConfig setAzureUseMSI(boolean azureUseMsi) { this.azureUseMsi = azureUseMsi; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionInfo.java index 5e2e8d332..496800340 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionInfo.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionInfo.java @@ -54,7 +54,7 @@ public class ConnectionInfo { @JsonProperty("owner") private String owner; - /** An object containing map of key-value properties attached to the connection. */ + /** A map of key-value properties attached to the securable. */ @JsonProperty("properties") private Map properties; @@ -66,9 +66,9 @@ public class ConnectionInfo { @JsonProperty("read_only") private Boolean readOnly; - /** */ + /** The type of Unity Catalog securable. */ @JsonProperty("securable_type") - private String securableType; + private SecurableType securableType; /** Time at which this connection was updated, in epoch milliseconds. */ @JsonProperty("updated_at") @@ -208,12 +208,12 @@ public Boolean getReadOnly() { return readOnly; } - public ConnectionInfo setSecurableType(String securableType) { + public ConnectionInfo setSecurableType(SecurableType securableType) { this.securableType = securableType; return this; } - public String getSecurableType() { + public SecurableType getSecurableType() { return securableType; } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionType.java index b6e6a3e33..c43cb89bd 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionType.java @@ -4,20 +4,27 @@ import com.databricks.sdk.support.Generated; -/** The type of connection. */ +/** Next Id: 30 */ @Generated public enum ConnectionType { BIGQUERY, DATABRICKS, + GA4_RAW_DATA, GLUE, HIVE_METASTORE, HTTP, MYSQL, ORACLE, POSTGRESQL, + POWER_BI, REDSHIFT, + SALESFORCE, + SALESFORCE_DATA_CLOUD, + SERVICENOW, SNOWFLAKE, SQLDW, SQLSERVER, TERADATA, + UNKNOWN_CONNECTION_TYPE, + WORKDAY_RAAS, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java index eb449c1dd..ff8b2cda9 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ConnectionsAPI.java @@ -85,10 +85,6 @@ public ConnectionInfo get(GetConnectionRequest request) { *

List all connections. */ public Iterable list(ListConnectionsRequest request) { - - if (request.getMaxResults() == null) { - request.setMaxResults(0L); - } return new Paginator<>( request, impl::list, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java index 2836337ce..3eea7832c 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java @@ -26,7 +26,7 @@ public class CreateConnection { @JsonProperty("options") private Map options; - /** An object containing map of key-value properties attached to the connection. */ + /** A map of key-value properties attached to the securable. */ @JsonProperty("properties") private Map properties; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CredentialType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CredentialType.java index 7f8868e05..b5f06caf4 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CredentialType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CredentialType.java @@ -4,9 +4,19 @@ import com.databricks.sdk.support.Generated; -/** The type of credential. */ +/** Next Id: 12 */ @Generated public enum CredentialType { BEARER_TOKEN, + OAUTH_ACCESS_TOKEN, + OAUTH_M2M, + OAUTH_REFRESH_TOKEN, + OAUTH_RESOURCE_OWNER_PASSWORD, + OAUTH_U2M, + OAUTH_U2M_MAPPING, + OIDC_TOKEN, + PEM_PRIVATE_KEY, + SERVICE_CREDENTIAL, + UNKNOWN_CREDENTIAL_TYPE, USERNAME_PASSWORD, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemaInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemaInfo.java index 9358ca802..6f7da5149 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemaInfo.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemaInfo.java @@ -8,6 +8,7 @@ import java.util.Map; import java.util.Objects; +/** Next ID: 40 */ @Generated public class SchemaInfo { /** @@ -23,7 +24,7 @@ public class SchemaInfo { /** The type of the parent catalog. */ @JsonProperty("catalog_type") - private String catalogType; + private CatalogType catalogType; /** User-provided free-form text description. */ @JsonProperty("comment") @@ -41,7 +42,7 @@ public class SchemaInfo { @JsonProperty("effective_predictive_optimization_flag") private EffectivePredictiveOptimizationFlag effectivePredictiveOptimizationFlag; - /** */ + /** Whether predictive optimization should be enabled for this object and objects under it. */ @JsonProperty("enable_predictive_optimization") private EnablePredictiveOptimization enablePredictiveOptimization; @@ -103,12 +104,12 @@ public String getCatalogName() { return catalogName; } - public SchemaInfo setCatalogType(String catalogType) { + public SchemaInfo setCatalogType(CatalogType catalogType) { this.catalogType = catalogType; return this; } - public String getCatalogType() { + public CatalogType getCatalogType() { return catalogType; } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java index c1345a265..acaa88214 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SchemasAPI.java @@ -85,10 +85,6 @@ public Iterable list(String catalogName) { * the array. */ public Iterable list(ListSchemasRequest request) { - - if (request.getMaxResults() == null) { - request.setMaxResults(0L); - } return new Paginator<>( request, impl::list, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateSchema.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateSchema.java index 0cb434d26..5c910b1e7 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateSchema.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateSchema.java @@ -15,7 +15,7 @@ public class UpdateSchema { @JsonProperty("comment") private String comment; - /** */ + /** Whether predictive optimization should be enabled for this object and objects under it. */ @JsonProperty("enable_predictive_optimization") private EnablePredictiveOptimization enablePredictiveOptimization; diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java index ff9668106..c359b9f8a 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterAttributes.java @@ -191,6 +191,14 @@ public class ClusterAttributes { @JsonProperty("policy_id") private String policyId; + /** If set, what the configurable IOPS for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_iops") + private Long remoteShuffleDiskIops; + + /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_throughput") + private Long remoteShuffleDiskThroughput; + /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -246,6 +254,10 @@ public class ClusterAttributes { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ + @JsonProperty("total_initial_remote_shuffle_disk_size") + private Long totalInitialRemoteShuffleDiskSize; + /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -430,6 +442,24 @@ public String getPolicyId() { return policyId; } + public ClusterAttributes setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { + this.remoteShuffleDiskIops = remoteShuffleDiskIops; + return this; + } + + public Long getRemoteShuffleDiskIops() { + return remoteShuffleDiskIops; + } + + public ClusterAttributes setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { + this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; + return this; + } + + public Long getRemoteShuffleDiskThroughput() { + return remoteShuffleDiskThroughput; + } + public ClusterAttributes setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -484,6 +514,16 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public ClusterAttributes setTotalInitialRemoteShuffleDiskSize( + Long totalInitialRemoteShuffleDiskSize) { + this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; + return this; + } + + public Long getTotalInitialRemoteShuffleDiskSize() { + return totalInitialRemoteShuffleDiskSize; + } + public ClusterAttributes setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -526,12 +566,15 @@ public boolean equals(Object o) { && Objects.equals(kind, that.kind) && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(policyId, that.policyId) + && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) + && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -558,12 +601,15 @@ public int hashCode() { kind, nodeTypeId, policyId, + remoteShuffleDiskIops, + remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, + totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -590,12 +636,15 @@ public String toString() { .add("kind", kind) .add("nodeTypeId", nodeTypeId) .add("policyId", policyId) + .add("remoteShuffleDiskIops", remoteShuffleDiskIops) + .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java index 8b957e3dc..b2309f101 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterDetails.java @@ -287,6 +287,14 @@ public class ClusterDetails { @JsonProperty("policy_id") private String policyId; + /** If set, what the configurable IOPS for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_iops") + private Long remoteShuffleDiskIops; + + /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_throughput") + private Long remoteShuffleDiskThroughput; + /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -386,6 +394,10 @@ public class ClusterDetails { @JsonProperty("termination_reason") private TerminationReason terminationReason; + /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ + @JsonProperty("total_initial_remote_shuffle_disk_size") + private Long totalInitialRemoteShuffleDiskSize; + /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -696,6 +708,24 @@ public String getPolicyId() { return policyId; } + public ClusterDetails setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { + this.remoteShuffleDiskIops = remoteShuffleDiskIops; + return this; + } + + public Long getRemoteShuffleDiskIops() { + return remoteShuffleDiskIops; + } + + public ClusterDetails setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { + this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; + return this; + } + + public Long getRemoteShuffleDiskThroughput() { + return remoteShuffleDiskThroughput; + } + public ClusterDetails setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -813,6 +843,16 @@ public TerminationReason getTerminationReason() { return terminationReason; } + public ClusterDetails setTotalInitialRemoteShuffleDiskSize( + Long totalInitialRemoteShuffleDiskSize) { + this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; + return this; + } + + public Long getTotalInitialRemoteShuffleDiskSize() { + return totalInitialRemoteShuffleDiskSize; + } + public ClusterDetails setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -869,6 +909,8 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) + && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) + && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) @@ -882,6 +924,7 @@ public boolean equals(Object o) { && Objects.equals(stateMessage, that.stateMessage) && Objects.equals(terminatedTime, that.terminatedTime) && Objects.equals(terminationReason, that.terminationReason) + && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -922,6 +965,8 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, + remoteShuffleDiskIops, + remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, @@ -935,6 +980,7 @@ public int hashCode() { stateMessage, terminatedTime, terminationReason, + totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -975,6 +1021,8 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) + .add("remoteShuffleDiskIops", remoteShuffleDiskIops) + .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) @@ -988,6 +1036,7 @@ public String toString() { .add("stateMessage", stateMessage) .add("terminatedTime", terminatedTime) .add("terminationReason", terminationReason) + .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java index 08cd8a715..78c7ddbfa 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/ClusterSpec.java @@ -218,6 +218,14 @@ public class ClusterSpec { @JsonProperty("policy_id") private String policyId; + /** If set, what the configurable IOPS for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_iops") + private Long remoteShuffleDiskIops; + + /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_throughput") + private Long remoteShuffleDiskThroughput; + /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -273,6 +281,10 @@ public class ClusterSpec { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ + @JsonProperty("total_initial_remote_shuffle_disk_size") + private Long totalInitialRemoteShuffleDiskSize; + /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -484,6 +496,24 @@ public String getPolicyId() { return policyId; } + public ClusterSpec setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { + this.remoteShuffleDiskIops = remoteShuffleDiskIops; + return this; + } + + public Long getRemoteShuffleDiskIops() { + return remoteShuffleDiskIops; + } + + public ClusterSpec setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { + this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; + return this; + } + + public Long getRemoteShuffleDiskThroughput() { + return remoteShuffleDiskThroughput; + } + public ClusterSpec setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -538,6 +568,15 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public ClusterSpec setTotalInitialRemoteShuffleDiskSize(Long totalInitialRemoteShuffleDiskSize) { + this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; + return this; + } + + public Long getTotalInitialRemoteShuffleDiskSize() { + return totalInitialRemoteShuffleDiskSize; + } + public ClusterSpec setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -583,12 +622,15 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) + && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) + && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -618,12 +660,15 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, + remoteShuffleDiskIops, + remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, + totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -653,12 +698,15 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) + .add("remoteShuffleDiskIops", remoteShuffleDiskIops) + .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java index 79853eda0..027bae1c8 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/CreateCluster.java @@ -221,6 +221,14 @@ public class CreateCluster { @JsonProperty("policy_id") private String policyId; + /** If set, what the configurable IOPS for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_iops") + private Long remoteShuffleDiskIops; + + /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_throughput") + private Long remoteShuffleDiskThroughput; + /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -276,6 +284,10 @@ public class CreateCluster { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ + @JsonProperty("total_initial_remote_shuffle_disk_size") + private Long totalInitialRemoteShuffleDiskSize; + /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -496,6 +508,24 @@ public String getPolicyId() { return policyId; } + public CreateCluster setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { + this.remoteShuffleDiskIops = remoteShuffleDiskIops; + return this; + } + + public Long getRemoteShuffleDiskIops() { + return remoteShuffleDiskIops; + } + + public CreateCluster setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { + this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; + return this; + } + + public Long getRemoteShuffleDiskThroughput() { + return remoteShuffleDiskThroughput; + } + public CreateCluster setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -550,6 +580,16 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public CreateCluster setTotalInitialRemoteShuffleDiskSize( + Long totalInitialRemoteShuffleDiskSize) { + this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; + return this; + } + + public Long getTotalInitialRemoteShuffleDiskSize() { + return totalInitialRemoteShuffleDiskSize; + } + public CreateCluster setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -596,12 +636,15 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) + && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) + && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -632,12 +675,15 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, + remoteShuffleDiskIops, + remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, + totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -668,12 +714,15 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) + .add("remoteShuffleDiskIops", remoteShuffleDiskIops) + .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java index 81c1b7e85..bbf12f00d 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditCluster.java @@ -218,6 +218,14 @@ public class EditCluster { @JsonProperty("policy_id") private String policyId; + /** If set, what the configurable IOPS for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_iops") + private Long remoteShuffleDiskIops; + + /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_throughput") + private Long remoteShuffleDiskThroughput; + /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -273,6 +281,10 @@ public class EditCluster { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ + @JsonProperty("total_initial_remote_shuffle_disk_size") + private Long totalInitialRemoteShuffleDiskSize; + /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -493,6 +505,24 @@ public String getPolicyId() { return policyId; } + public EditCluster setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { + this.remoteShuffleDiskIops = remoteShuffleDiskIops; + return this; + } + + public Long getRemoteShuffleDiskIops() { + return remoteShuffleDiskIops; + } + + public EditCluster setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { + this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; + return this; + } + + public Long getRemoteShuffleDiskThroughput() { + return remoteShuffleDiskThroughput; + } + public EditCluster setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -547,6 +577,15 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public EditCluster setTotalInitialRemoteShuffleDiskSize(Long totalInitialRemoteShuffleDiskSize) { + this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; + return this; + } + + public Long getTotalInitialRemoteShuffleDiskSize() { + return totalInitialRemoteShuffleDiskSize; + } + public EditCluster setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -593,12 +632,15 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) + && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) + && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -629,12 +671,15 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, + remoteShuffleDiskIops, + remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, + totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -665,12 +710,15 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) + .add("remoteShuffleDiskIops", remoteShuffleDiskIops) + .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java index 151d44359..7d3e13c7d 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/UpdateClusterResource.java @@ -207,6 +207,14 @@ public class UpdateClusterResource { @JsonProperty("policy_id") private String policyId; + /** If set, what the configurable IOPS for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_iops") + private Long remoteShuffleDiskIops; + + /** If set, what the configurable throughput (in Mb/s) for the remote shuffle disk is. */ + @JsonProperty("remote_shuffle_disk_throughput") + private Long remoteShuffleDiskThroughput; + /** * Determines the cluster's runtime engine, either standard or Photon. * @@ -262,6 +270,10 @@ public class UpdateClusterResource { @JsonProperty("ssh_public_keys") private Collection sshPublicKeys; + /** If set, what the total initial volume size (in GB) of the remote shuffle disks should be. */ + @JsonProperty("total_initial_remote_shuffle_disk_size") + private Long totalInitialRemoteShuffleDiskSize; + /** * This field can only be used when `kind = CLASSIC_PREVIEW`. * @@ -464,6 +476,24 @@ public String getPolicyId() { return policyId; } + public UpdateClusterResource setRemoteShuffleDiskIops(Long remoteShuffleDiskIops) { + this.remoteShuffleDiskIops = remoteShuffleDiskIops; + return this; + } + + public Long getRemoteShuffleDiskIops() { + return remoteShuffleDiskIops; + } + + public UpdateClusterResource setRemoteShuffleDiskThroughput(Long remoteShuffleDiskThroughput) { + this.remoteShuffleDiskThroughput = remoteShuffleDiskThroughput; + return this; + } + + public Long getRemoteShuffleDiskThroughput() { + return remoteShuffleDiskThroughput; + } + public UpdateClusterResource setRuntimeEngine(RuntimeEngine runtimeEngine) { this.runtimeEngine = runtimeEngine; return this; @@ -518,6 +548,16 @@ public Collection getSshPublicKeys() { return sshPublicKeys; } + public UpdateClusterResource setTotalInitialRemoteShuffleDiskSize( + Long totalInitialRemoteShuffleDiskSize) { + this.totalInitialRemoteShuffleDiskSize = totalInitialRemoteShuffleDiskSize; + return this; + } + + public Long getTotalInitialRemoteShuffleDiskSize() { + return totalInitialRemoteShuffleDiskSize; + } + public UpdateClusterResource setUseMlRuntime(Boolean useMlRuntime) { this.useMlRuntime = useMlRuntime; return this; @@ -562,12 +602,15 @@ public boolean equals(Object o) { && Objects.equals(nodeTypeId, that.nodeTypeId) && Objects.equals(numWorkers, that.numWorkers) && Objects.equals(policyId, that.policyId) + && Objects.equals(remoteShuffleDiskIops, that.remoteShuffleDiskIops) + && Objects.equals(remoteShuffleDiskThroughput, that.remoteShuffleDiskThroughput) && Objects.equals(runtimeEngine, that.runtimeEngine) && Objects.equals(singleUserName, that.singleUserName) && Objects.equals(sparkConf, that.sparkConf) && Objects.equals(sparkEnvVars, that.sparkEnvVars) && Objects.equals(sparkVersion, that.sparkVersion) && Objects.equals(sshPublicKeys, that.sshPublicKeys) + && Objects.equals(totalInitialRemoteShuffleDiskSize, that.totalInitialRemoteShuffleDiskSize) && Objects.equals(useMlRuntime, that.useMlRuntime) && Objects.equals(workloadType, that.workloadType); } @@ -596,12 +639,15 @@ public int hashCode() { nodeTypeId, numWorkers, policyId, + remoteShuffleDiskIops, + remoteShuffleDiskThroughput, runtimeEngine, singleUserName, sparkConf, sparkEnvVars, sparkVersion, sshPublicKeys, + totalInitialRemoteShuffleDiskSize, useMlRuntime, workloadType); } @@ -630,12 +676,15 @@ public String toString() { .add("nodeTypeId", nodeTypeId) .add("numWorkers", numWorkers) .add("policyId", policyId) + .add("remoteShuffleDiskIops", remoteShuffleDiskIops) + .add("remoteShuffleDiskThroughput", remoteShuffleDiskThroughput) .add("runtimeEngine", runtimeEngine) .add("singleUserName", singleUserName) .add("sparkConf", sparkConf) .add("sparkEnvVars", sparkEnvVars) .add("sparkVersion", sparkVersion) .add("sshPublicKeys", sshPublicKeys) + .add("totalInitialRemoteShuffleDiskSize", totalInitialRemoteShuffleDiskSize) .add("useMlRuntime", useMlRuntime) .add("workloadType", workloadType) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountGroupsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountGroupsAPI.java index 7016a0673..634e2397a 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountGroupsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountGroupsAPI.java @@ -75,7 +75,7 @@ public Group get(GetAccountGroupRequest request) { public Iterable list(ListAccountGroupsRequest request) { request.setStartIndex(1L); if (request.getCount() == null) { - request.setCount(10000L); + request.setCount(100L); } return new Paginator<>( request, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountServicePrincipalsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountServicePrincipalsAPI.java index 415577a5a..ff6280873 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountServicePrincipalsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountServicePrincipalsAPI.java @@ -73,7 +73,7 @@ public ServicePrincipal get(GetAccountServicePrincipalRequest request) { public Iterable list(ListAccountServicePrincipalsRequest request) { request.setStartIndex(1L); if (request.getCount() == null) { - request.setCount(10000L); + request.setCount(100L); } return new Paginator<>( request, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountUsersAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountUsersAPI.java index 77249ac5e..b378db43c 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountUsersAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/AccountUsersAPI.java @@ -79,7 +79,7 @@ public User get(GetAccountUserRequest request) { public Iterable list(ListAccountUsersRequest request) { request.setStartIndex(1L); if (request.getCount() == null) { - request.setCount(10000L); + request.setCount(100L); } return new Paginator<>( request, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GroupsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GroupsAPI.java index a6b7414d2..01a61454b 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GroupsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GroupsAPI.java @@ -75,7 +75,7 @@ public Group get(GetGroupRequest request) { public Iterable list(ListGroupsRequest request) { request.setStartIndex(1L); if (request.getCount() == null) { - request.setCount(10000L); + request.setCount(100L); } return new Paginator<>( request, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/ServicePrincipalsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/ServicePrincipalsAPI.java index f28627207..28d5c5511 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/ServicePrincipalsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/ServicePrincipalsAPI.java @@ -73,7 +73,7 @@ public ServicePrincipal get(GetServicePrincipalRequest request) { public Iterable list(ListServicePrincipalsRequest request) { request.setStartIndex(1L); if (request.getCount() == null) { - request.setCount(10000L); + request.setCount(100L); } return new Paginator<>( request, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/UsersAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/UsersAPI.java index d5797926d..d079aba02 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/UsersAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/UsersAPI.java @@ -98,7 +98,7 @@ public PasswordPermissions getPermissions() { public Iterable list(ListUsersRequest request) { request.setStartIndex(1L); if (request.getCount() == null) { - request.setCount(10000L); + request.setCount(100L); } return new Paginator<>( request, diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipeline.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipeline.java index 7533ab6c4..b81984eca 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipeline.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/CreatePipeline.java @@ -134,6 +134,14 @@ public class CreatePipeline { @JsonProperty("storage") private String storage; + /** + * A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, + * and are therefore subject to the same limitations. A maximum of 25 tags can be added to the + * pipeline. + */ + @JsonProperty("tags") + private Map tags; + /** * Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` * must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is @@ -380,6 +388,15 @@ public String getStorage() { return storage; } + public CreatePipeline setTags(Map tags) { + this.tags = tags; + return this; + } + + public Map getTags() { + return tags; + } + public CreatePipeline setTarget(String target) { this.target = target; return this; @@ -429,6 +446,7 @@ public boolean equals(Object o) { && Objects.equals(schema, that.schema) && Objects.equals(serverless, that.serverless) && Objects.equals(storage, that.storage) + && Objects.equals(tags, that.tags) && Objects.equals(target, that.target) && Objects.equals(trigger, that.trigger); } @@ -462,6 +480,7 @@ public int hashCode() { schema, serverless, storage, + tags, target, trigger); } @@ -495,6 +514,7 @@ public String toString() { .add("schema", schema) .add("serverless", serverless) .add("storage", storage) + .add("tags", tags) .add("target", target) .add("trigger", trigger) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/EditPipeline.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/EditPipeline.java index 444759473..776b17166 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/EditPipeline.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/EditPipeline.java @@ -143,6 +143,14 @@ public class EditPipeline { @JsonProperty("storage") private String storage; + /** + * A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, + * and are therefore subject to the same limitations. A maximum of 25 tags can be added to the + * pipeline. + */ + @JsonProperty("tags") + private Map tags; + /** * Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` * must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is @@ -398,6 +406,15 @@ public String getStorage() { return storage; } + public EditPipeline setTags(Map tags) { + this.tags = tags; + return this; + } + + public Map getTags() { + return tags; + } + public EditPipeline setTarget(String target) { this.target = target; return this; @@ -448,6 +465,7 @@ public boolean equals(Object o) { && Objects.equals(schema, that.schema) && Objects.equals(serverless, that.serverless) && Objects.equals(storage, that.storage) + && Objects.equals(tags, that.tags) && Objects.equals(target, that.target) && Objects.equals(trigger, that.trigger); } @@ -482,6 +500,7 @@ public int hashCode() { schema, serverless, storage, + tags, target, trigger); } @@ -516,6 +535,7 @@ public String toString() { .add("schema", schema) .add("serverless", serverless) .add("storage", storage) + .add("tags", tags) .add("target", target) .add("trigger", trigger) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/IngestionSourceType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/IngestionSourceType.java index c7620bc7f..272a8235d 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/IngestionSourceType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/IngestionSourceType.java @@ -17,5 +17,6 @@ public enum IngestionSourceType { SERVICENOW, SHAREPOINT, SQLSERVER, + TERADATA, WORKDAY_RAAS, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineSpec.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineSpec.java index 913972a57..b4c5c4d8e 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineSpec.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/PipelineSpec.java @@ -115,6 +115,14 @@ public class PipelineSpec { @JsonProperty("storage") private String storage; + /** + * A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, + * and are therefore subject to the same limitations. A maximum of 25 tags can be added to the + * pipeline. + */ + @JsonProperty("tags") + private Map tags; + /** * Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` * must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is @@ -334,6 +342,15 @@ public String getStorage() { return storage; } + public PipelineSpec setTags(Map tags) { + this.tags = tags; + return this; + } + + public Map getTags() { + return tags; + } + public PipelineSpec setTarget(String target) { this.target = target; return this; @@ -380,6 +397,7 @@ public boolean equals(Object o) { && Objects.equals(schema, that.schema) && Objects.equals(serverless, that.serverless) && Objects.equals(storage, that.storage) + && Objects.equals(tags, that.tags) && Objects.equals(target, that.target) && Objects.equals(trigger, that.trigger); } @@ -410,6 +428,7 @@ public int hashCode() { schema, serverless, storage, + tags, target, trigger); } @@ -440,6 +459,7 @@ public String toString() { .add("schema", schema) .add("serverless", serverless) .add("storage", storage) + .add("tags", tags) .add("target", target) .add("trigger", trigger) .toString(); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityInput.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityInput.java index 9e9593df2..ca9ccf251 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityInput.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityInput.java @@ -48,10 +48,24 @@ public class ServedEntityInput { @JsonProperty("instance_profile_arn") private String instanceProfileArn; + /** + * The maximum provisioned concurrency that the endpoint can scale up to. Do not use if + * workload_size is specified. + */ + @JsonProperty("max_provisioned_concurrency") + private Long maxProvisionedConcurrency; + /** The maximum tokens per second that the endpoint can scale up to. */ @JsonProperty("max_provisioned_throughput") private Long maxProvisionedThroughput; + /** + * The minimum provisioned concurrency that the endpoint can scale down to. Do not use if + * workload_size is specified. + */ + @JsonProperty("min_provisioned_concurrency") + private Long minProvisionedConcurrency; + /** The minimum tokens per second that the endpoint can scale down to. */ @JsonProperty("min_provisioned_throughput") private Long minProvisionedThroughput; @@ -80,7 +94,8 @@ public class ServedEntityInput { * concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned * concurrency). Additional custom workload sizes can also be used when available in the * workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each - * workload size is 0. + * workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency + * are specified. */ @JsonProperty("workload_size") private String workloadSize; @@ -142,6 +157,15 @@ public String getInstanceProfileArn() { return instanceProfileArn; } + public ServedEntityInput setMaxProvisionedConcurrency(Long maxProvisionedConcurrency) { + this.maxProvisionedConcurrency = maxProvisionedConcurrency; + return this; + } + + public Long getMaxProvisionedConcurrency() { + return maxProvisionedConcurrency; + } + public ServedEntityInput setMaxProvisionedThroughput(Long maxProvisionedThroughput) { this.maxProvisionedThroughput = maxProvisionedThroughput; return this; @@ -151,6 +175,15 @@ public Long getMaxProvisionedThroughput() { return maxProvisionedThroughput; } + public ServedEntityInput setMinProvisionedConcurrency(Long minProvisionedConcurrency) { + this.minProvisionedConcurrency = minProvisionedConcurrency; + return this; + } + + public Long getMinProvisionedConcurrency() { + return minProvisionedConcurrency; + } + public ServedEntityInput setMinProvisionedThroughput(Long minProvisionedThroughput) { this.minProvisionedThroughput = minProvisionedThroughput; return this; @@ -215,7 +248,9 @@ public boolean equals(Object o) { && Objects.equals(environmentVars, that.environmentVars) && Objects.equals(externalModel, that.externalModel) && Objects.equals(instanceProfileArn, that.instanceProfileArn) + && Objects.equals(maxProvisionedConcurrency, that.maxProvisionedConcurrency) && Objects.equals(maxProvisionedThroughput, that.maxProvisionedThroughput) + && Objects.equals(minProvisionedConcurrency, that.minProvisionedConcurrency) && Objects.equals(minProvisionedThroughput, that.minProvisionedThroughput) && Objects.equals(name, that.name) && Objects.equals(provisionedModelUnits, that.provisionedModelUnits) @@ -232,7 +267,9 @@ public int hashCode() { environmentVars, externalModel, instanceProfileArn, + maxProvisionedConcurrency, maxProvisionedThroughput, + minProvisionedConcurrency, minProvisionedThroughput, name, provisionedModelUnits, @@ -249,7 +286,9 @@ public String toString() { .add("environmentVars", environmentVars) .add("externalModel", externalModel) .add("instanceProfileArn", instanceProfileArn) + .add("maxProvisionedConcurrency", maxProvisionedConcurrency) .add("maxProvisionedThroughput", maxProvisionedThroughput) + .add("minProvisionedConcurrency", minProvisionedConcurrency) .add("minProvisionedThroughput", minProvisionedThroughput) .add("name", name) .add("provisionedModelUnits", provisionedModelUnits) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityOutput.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityOutput.java index 74b58f742..129841ac9 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityOutput.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedEntityOutput.java @@ -63,10 +63,24 @@ public class ServedEntityOutput { @JsonProperty("instance_profile_arn") private String instanceProfileArn; + /** + * The maximum provisioned concurrency that the endpoint can scale up to. Do not use if + * workload_size is specified. + */ + @JsonProperty("max_provisioned_concurrency") + private Long maxProvisionedConcurrency; + /** The maximum tokens per second that the endpoint can scale up to. */ @JsonProperty("max_provisioned_throughput") private Long maxProvisionedThroughput; + /** + * The minimum provisioned concurrency that the endpoint can scale down to. Do not use if + * workload_size is specified. + */ + @JsonProperty("min_provisioned_concurrency") + private Long minProvisionedConcurrency; + /** The minimum tokens per second that the endpoint can scale down to. */ @JsonProperty("min_provisioned_throughput") private Long minProvisionedThroughput; @@ -99,7 +113,8 @@ public class ServedEntityOutput { * concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned * concurrency). Additional custom workload sizes can also be used when available in the * workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each - * workload size is 0. + * workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency + * are specified. */ @JsonProperty("workload_size") private String workloadSize; @@ -188,6 +203,15 @@ public String getInstanceProfileArn() { return instanceProfileArn; } + public ServedEntityOutput setMaxProvisionedConcurrency(Long maxProvisionedConcurrency) { + this.maxProvisionedConcurrency = maxProvisionedConcurrency; + return this; + } + + public Long getMaxProvisionedConcurrency() { + return maxProvisionedConcurrency; + } + public ServedEntityOutput setMaxProvisionedThroughput(Long maxProvisionedThroughput) { this.maxProvisionedThroughput = maxProvisionedThroughput; return this; @@ -197,6 +221,15 @@ public Long getMaxProvisionedThroughput() { return maxProvisionedThroughput; } + public ServedEntityOutput setMinProvisionedConcurrency(Long minProvisionedConcurrency) { + this.minProvisionedConcurrency = minProvisionedConcurrency; + return this; + } + + public Long getMinProvisionedConcurrency() { + return minProvisionedConcurrency; + } + public ServedEntityOutput setMinProvisionedThroughput(Long minProvisionedThroughput) { this.minProvisionedThroughput = minProvisionedThroughput; return this; @@ -273,7 +306,9 @@ public boolean equals(Object o) { && Objects.equals(externalModel, that.externalModel) && Objects.equals(foundationModel, that.foundationModel) && Objects.equals(instanceProfileArn, that.instanceProfileArn) + && Objects.equals(maxProvisionedConcurrency, that.maxProvisionedConcurrency) && Objects.equals(maxProvisionedThroughput, that.maxProvisionedThroughput) + && Objects.equals(minProvisionedConcurrency, that.minProvisionedConcurrency) && Objects.equals(minProvisionedThroughput, that.minProvisionedThroughput) && Objects.equals(name, that.name) && Objects.equals(provisionedModelUnits, that.provisionedModelUnits) @@ -294,7 +329,9 @@ public int hashCode() { externalModel, foundationModel, instanceProfileArn, + maxProvisionedConcurrency, maxProvisionedThroughput, + minProvisionedConcurrency, minProvisionedThroughput, name, provisionedModelUnits, @@ -315,7 +352,9 @@ public String toString() { .add("externalModel", externalModel) .add("foundationModel", foundationModel) .add("instanceProfileArn", instanceProfileArn) + .add("maxProvisionedConcurrency", maxProvisionedConcurrency) .add("maxProvisionedThroughput", maxProvisionedThroughput) + .add("minProvisionedConcurrency", minProvisionedConcurrency) .add("minProvisionedThroughput", minProvisionedThroughput) .add("name", name) .add("provisionedModelUnits", provisionedModelUnits) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelInput.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelInput.java index 907d88d17..93b608063 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelInput.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelInput.java @@ -23,10 +23,24 @@ public class ServedModelInput { @JsonProperty("instance_profile_arn") private String instanceProfileArn; + /** + * The maximum provisioned concurrency that the endpoint can scale up to. Do not use if + * workload_size is specified. + */ + @JsonProperty("max_provisioned_concurrency") + private Long maxProvisionedConcurrency; + /** The maximum tokens per second that the endpoint can scale up to. */ @JsonProperty("max_provisioned_throughput") private Long maxProvisionedThroughput; + /** + * The minimum provisioned concurrency that the endpoint can scale down to. Do not use if + * workload_size is specified. + */ + @JsonProperty("min_provisioned_concurrency") + private Long minProvisionedConcurrency; + /** The minimum tokens per second that the endpoint can scale down to. */ @JsonProperty("min_provisioned_throughput") private Long minProvisionedThroughput; @@ -63,7 +77,8 @@ public class ServedModelInput { * concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned * concurrency). Additional custom workload sizes can also be used when available in the * workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each - * workload size is 0. + * workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency + * are specified. */ @JsonProperty("workload_size") private String workloadSize; @@ -98,6 +113,15 @@ public String getInstanceProfileArn() { return instanceProfileArn; } + public ServedModelInput setMaxProvisionedConcurrency(Long maxProvisionedConcurrency) { + this.maxProvisionedConcurrency = maxProvisionedConcurrency; + return this; + } + + public Long getMaxProvisionedConcurrency() { + return maxProvisionedConcurrency; + } + public ServedModelInput setMaxProvisionedThroughput(Long maxProvisionedThroughput) { this.maxProvisionedThroughput = maxProvisionedThroughput; return this; @@ -107,6 +131,15 @@ public Long getMaxProvisionedThroughput() { return maxProvisionedThroughput; } + public ServedModelInput setMinProvisionedConcurrency(Long minProvisionedConcurrency) { + this.minProvisionedConcurrency = minProvisionedConcurrency; + return this; + } + + public Long getMinProvisionedConcurrency() { + return minProvisionedConcurrency; + } + public ServedModelInput setMinProvisionedThroughput(Long minProvisionedThroughput) { this.minProvisionedThroughput = minProvisionedThroughput; return this; @@ -186,7 +219,9 @@ public boolean equals(Object o) { ServedModelInput that = (ServedModelInput) o; return Objects.equals(environmentVars, that.environmentVars) && Objects.equals(instanceProfileArn, that.instanceProfileArn) + && Objects.equals(maxProvisionedConcurrency, that.maxProvisionedConcurrency) && Objects.equals(maxProvisionedThroughput, that.maxProvisionedThroughput) + && Objects.equals(minProvisionedConcurrency, that.minProvisionedConcurrency) && Objects.equals(minProvisionedThroughput, that.minProvisionedThroughput) && Objects.equals(modelName, that.modelName) && Objects.equals(modelVersion, that.modelVersion) @@ -202,7 +237,9 @@ public int hashCode() { return Objects.hash( environmentVars, instanceProfileArn, + maxProvisionedConcurrency, maxProvisionedThroughput, + minProvisionedConcurrency, minProvisionedThroughput, modelName, modelVersion, @@ -218,7 +255,9 @@ public String toString() { return new ToStringer(ServedModelInput.class) .add("environmentVars", environmentVars) .add("instanceProfileArn", instanceProfileArn) + .add("maxProvisionedConcurrency", maxProvisionedConcurrency) .add("maxProvisionedThroughput", maxProvisionedThroughput) + .add("minProvisionedConcurrency", minProvisionedConcurrency) .add("minProvisionedThroughput", minProvisionedThroughput) .add("modelName", modelName) .add("modelVersion", modelVersion) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelOutput.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelOutput.java index eabfc4a48..dfdc57241 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelOutput.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServedModelOutput.java @@ -31,6 +31,20 @@ public class ServedModelOutput { @JsonProperty("instance_profile_arn") private String instanceProfileArn; + /** + * The maximum provisioned concurrency that the endpoint can scale up to. Do not use if + * workload_size is specified. + */ + @JsonProperty("max_provisioned_concurrency") + private Long maxProvisionedConcurrency; + + /** + * The minimum provisioned concurrency that the endpoint can scale down to. Do not use if + * workload_size is specified. + */ + @JsonProperty("min_provisioned_concurrency") + private Long minProvisionedConcurrency; + /** */ @JsonProperty("model_name") private String modelName; @@ -67,7 +81,8 @@ public class ServedModelOutput { * concurrency), "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned * concurrency). Additional custom workload sizes can also be used when available in the * workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each - * workload size is 0. + * workload size is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency + * are specified. */ @JsonProperty("workload_size") private String workloadSize; @@ -120,6 +135,24 @@ public String getInstanceProfileArn() { return instanceProfileArn; } + public ServedModelOutput setMaxProvisionedConcurrency(Long maxProvisionedConcurrency) { + this.maxProvisionedConcurrency = maxProvisionedConcurrency; + return this; + } + + public Long getMaxProvisionedConcurrency() { + return maxProvisionedConcurrency; + } + + public ServedModelOutput setMinProvisionedConcurrency(Long minProvisionedConcurrency) { + this.minProvisionedConcurrency = minProvisionedConcurrency; + return this; + } + + public Long getMinProvisionedConcurrency() { + return minProvisionedConcurrency; + } + public ServedModelOutput setModelName(String modelName) { this.modelName = modelName; return this; @@ -201,6 +234,8 @@ public boolean equals(Object o) { && Objects.equals(creator, that.creator) && Objects.equals(environmentVars, that.environmentVars) && Objects.equals(instanceProfileArn, that.instanceProfileArn) + && Objects.equals(maxProvisionedConcurrency, that.maxProvisionedConcurrency) + && Objects.equals(minProvisionedConcurrency, that.minProvisionedConcurrency) && Objects.equals(modelName, that.modelName) && Objects.equals(modelVersion, that.modelVersion) && Objects.equals(name, that.name) @@ -218,6 +253,8 @@ public int hashCode() { creator, environmentVars, instanceProfileArn, + maxProvisionedConcurrency, + minProvisionedConcurrency, modelName, modelVersion, name, @@ -235,6 +272,8 @@ public String toString() { .add("creator", creator) .add("environmentVars", environmentVars) .add("instanceProfileArn", instanceProfileArn) + .add("maxProvisionedConcurrency", maxProvisionedConcurrency) + .add("minProvisionedConcurrency", minProvisionedConcurrency) .add("modelName", modelName) .add("modelVersion", modelVersion) .add("name", name) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java new file mode 100755 index 000000000..1ba9dcb49 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptions.java @@ -0,0 +1,86 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +@Generated +public class DashboardEmailSubscriptions { + /** */ + @JsonProperty("boolean_val") + private BooleanMessage booleanVal; + + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> update pattern to perform setting updates in order to avoid race conditions. That is, get an + * etag from a GET request, and pass it with the PATCH request to identify the setting version you + * are updating. + */ + @JsonProperty("etag") + private String etag; + + /** + * Name of the corresponding setting. This field is populated in the response, but it will not be + * respected even if it's set in the request body. The setting name in the path parameter will be + * respected instead. Setting name is required to be 'default' if the setting only has one + * instance per workspace. + */ + @JsonProperty("setting_name") + private String settingName; + + public DashboardEmailSubscriptions setBooleanVal(BooleanMessage booleanVal) { + this.booleanVal = booleanVal; + return this; + } + + public BooleanMessage getBooleanVal() { + return booleanVal; + } + + public DashboardEmailSubscriptions setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + public DashboardEmailSubscriptions setSettingName(String settingName) { + this.settingName = settingName; + return this; + } + + public String getSettingName() { + return settingName; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DashboardEmailSubscriptions that = (DashboardEmailSubscriptions) o; + return Objects.equals(booleanVal, that.booleanVal) + && Objects.equals(etag, that.etag) + && Objects.equals(settingName, that.settingName); + } + + @Override + public int hashCode() { + return Objects.hash(booleanVal, etag, settingName); + } + + @Override + public String toString() { + return new ToStringer(DashboardEmailSubscriptions.class) + .add("booleanVal", booleanVal) + .add("etag", etag) + .add("settingName", settingName) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java new file mode 100755 index 000000000..daf85f77f --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsAPI.java @@ -0,0 +1,70 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.core.ApiClient; +import com.databricks.sdk.support.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace can + * send subscription emails containing PDFs and/or images of the dashboard. By default, this setting + * is enabled (set to `true`) + */ +@Generated +public class DashboardEmailSubscriptionsAPI { + private static final Logger LOG = LoggerFactory.getLogger(DashboardEmailSubscriptionsAPI.class); + + private final DashboardEmailSubscriptionsService impl; + + /** Regular-use constructor */ + public DashboardEmailSubscriptionsAPI(ApiClient apiClient) { + impl = new DashboardEmailSubscriptionsImpl(apiClient); + } + + /** Constructor for mocks */ + public DashboardEmailSubscriptionsAPI(DashboardEmailSubscriptionsService mock) { + impl = mock; + } + + /** + * Delete the Dashboard Email Subscriptions setting. + * + *

Reverts the Dashboard Email Subscriptions setting to its default value. + */ + public DeleteDashboardEmailSubscriptionsResponse delete( + DeleteDashboardEmailSubscriptionsRequest request) { + return impl.delete(request); + } + + /** + * Get the Dashboard Email Subscriptions setting. + * + *

Gets the Dashboard Email Subscriptions setting. + */ + public DashboardEmailSubscriptions get(GetDashboardEmailSubscriptionsRequest request) { + return impl.get(request); + } + + public DashboardEmailSubscriptions update( + boolean allowMissing, DashboardEmailSubscriptions setting, String fieldMask) { + return update( + new UpdateDashboardEmailSubscriptionsRequest() + .setAllowMissing(allowMissing) + .setSetting(setting) + .setFieldMask(fieldMask)); + } + + /** + * Update the Dashboard Email Subscriptions setting. + * + *

Updates the Dashboard Email Subscriptions setting. + */ + public DashboardEmailSubscriptions update(UpdateDashboardEmailSubscriptionsRequest request) { + return impl.update(request); + } + + public DashboardEmailSubscriptionsService impl() { + return impl; + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java new file mode 100755 index 000000000..767cb5e75 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsImpl.java @@ -0,0 +1,59 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.core.ApiClient; +import com.databricks.sdk.core.DatabricksException; +import com.databricks.sdk.core.http.Request; +import com.databricks.sdk.support.Generated; +import java.io.IOException; + +/** Package-local implementation of DashboardEmailSubscriptions */ +@Generated +class DashboardEmailSubscriptionsImpl implements DashboardEmailSubscriptionsService { + private final ApiClient apiClient; + + public DashboardEmailSubscriptionsImpl(ApiClient apiClient) { + this.apiClient = apiClient; + } + + @Override + public DeleteDashboardEmailSubscriptionsResponse delete( + DeleteDashboardEmailSubscriptionsRequest request) { + String path = "/api/2.0/settings/types/dashboard_email_subscriptions/names/default"; + try { + Request req = new Request("DELETE", path); + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + return apiClient.execute(req, DeleteDashboardEmailSubscriptionsResponse.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + + @Override + public DashboardEmailSubscriptions get(GetDashboardEmailSubscriptionsRequest request) { + String path = "/api/2.0/settings/types/dashboard_email_subscriptions/names/default"; + try { + Request req = new Request("GET", path); + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + return apiClient.execute(req, DashboardEmailSubscriptions.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + + @Override + public DashboardEmailSubscriptions update(UpdateDashboardEmailSubscriptionsRequest request) { + String path = "/api/2.0/settings/types/dashboard_email_subscriptions/names/default"; + try { + Request req = new Request("PATCH", path, apiClient.serialize(request)); + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + req.withHeader("Content-Type", "application/json"); + return apiClient.execute(req, DashboardEmailSubscriptions.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java new file mode 100755 index 000000000..1dbc66188 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DashboardEmailSubscriptionsService.java @@ -0,0 +1,40 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; + +/** + * Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace can + * send subscription emails containing PDFs and/or images of the dashboard. By default, this setting + * is enabled (set to `true`) + * + *

This is the high-level interface, that contains generated methods. + * + *

Evolving: this interface is under development. Method signatures may change. + */ +@Generated +public interface DashboardEmailSubscriptionsService { + /** + * Delete the Dashboard Email Subscriptions setting. + * + *

Reverts the Dashboard Email Subscriptions setting to its default value. + */ + DeleteDashboardEmailSubscriptionsResponse delete( + DeleteDashboardEmailSubscriptionsRequest deleteDashboardEmailSubscriptionsRequest); + + /** + * Get the Dashboard Email Subscriptions setting. + * + *

Gets the Dashboard Email Subscriptions setting. + */ + DashboardEmailSubscriptions get( + GetDashboardEmailSubscriptionsRequest getDashboardEmailSubscriptionsRequest); + + /** + * Update the Dashboard Email Subscriptions setting. + * + *

Updates the Dashboard Email Subscriptions setting. + */ + DashboardEmailSubscriptions update( + UpdateDashboardEmailSubscriptionsRequest updateDashboardEmailSubscriptionsRequest); +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java new file mode 100755 index 000000000..8d3d36912 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsRequest.java @@ -0,0 +1,54 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** Delete the Dashboard Email Subscriptions setting */ +@Generated +public class DeleteDashboardEmailSubscriptionsRequest { + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + * an etag from a GET request, and pass it with the DELETE request to identify the rule set + * version you are deleting. + */ + @JsonIgnore + @QueryParam("etag") + private String etag; + + public DeleteDashboardEmailSubscriptionsRequest setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteDashboardEmailSubscriptionsRequest that = (DeleteDashboardEmailSubscriptionsRequest) o; + return Objects.equals(etag, that.etag); + } + + @Override + public int hashCode() { + return Objects.hash(etag); + } + + @Override + public String toString() { + return new ToStringer(DeleteDashboardEmailSubscriptionsRequest.class) + .add("etag", etag) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java new file mode 100755 index 000000000..1cfa511ae --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteDashboardEmailSubscriptionsResponse.java @@ -0,0 +1,52 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** The etag is returned. */ +@Generated +public class DeleteDashboardEmailSubscriptionsResponse { + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + * an etag from a GET request, and pass it with the DELETE request to identify the rule set + * version you are deleting. + */ + @JsonProperty("etag") + private String etag; + + public DeleteDashboardEmailSubscriptionsResponse setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteDashboardEmailSubscriptionsResponse that = (DeleteDashboardEmailSubscriptionsResponse) o; + return Objects.equals(etag, that.etag); + } + + @Override + public int hashCode() { + return Objects.hash(etag); + } + + @Override + public String toString() { + return new ToStringer(DeleteDashboardEmailSubscriptionsResponse.class) + .add("etag", etag) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java new file mode 100755 index 000000000..3a5c3214a --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadRequest.java @@ -0,0 +1,52 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** Delete the SQL Results Download setting */ +@Generated +public class DeleteSqlResultsDownloadRequest { + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + * an etag from a GET request, and pass it with the DELETE request to identify the rule set + * version you are deleting. + */ + @JsonIgnore + @QueryParam("etag") + private String etag; + + public DeleteSqlResultsDownloadRequest setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteSqlResultsDownloadRequest that = (DeleteSqlResultsDownloadRequest) o; + return Objects.equals(etag, that.etag); + } + + @Override + public int hashCode() { + return Objects.hash(etag); + } + + @Override + public String toString() { + return new ToStringer(DeleteSqlResultsDownloadRequest.class).add("etag", etag).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java new file mode 100755 index 000000000..bc2957210 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/DeleteSqlResultsDownloadResponse.java @@ -0,0 +1,50 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** The etag is returned. */ +@Generated +public class DeleteSqlResultsDownloadResponse { + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + * an etag from a GET request, and pass it with the DELETE request to identify the rule set + * version you are deleting. + */ + @JsonProperty("etag") + private String etag; + + public DeleteSqlResultsDownloadResponse setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + DeleteSqlResultsDownloadResponse that = (DeleteSqlResultsDownloadResponse) o; + return Objects.equals(etag, that.etag); + } + + @Override + public int hashCode() { + return Objects.hash(etag); + } + + @Override + public String toString() { + return new ToStringer(DeleteSqlResultsDownloadResponse.class).add("etag", etag).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java new file mode 100755 index 000000000..0c545ca9b --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetDashboardEmailSubscriptionsRequest.java @@ -0,0 +1,52 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** Get the Dashboard Email Subscriptions setting */ +@Generated +public class GetDashboardEmailSubscriptionsRequest { + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + * an etag from a GET request, and pass it with the DELETE request to identify the rule set + * version you are deleting. + */ + @JsonIgnore + @QueryParam("etag") + private String etag; + + public GetDashboardEmailSubscriptionsRequest setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetDashboardEmailSubscriptionsRequest that = (GetDashboardEmailSubscriptionsRequest) o; + return Objects.equals(etag, that.etag); + } + + @Override + public int hashCode() { + return Objects.hash(etag); + } + + @Override + public String toString() { + return new ToStringer(GetDashboardEmailSubscriptionsRequest.class).add("etag", etag).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java new file mode 100755 index 000000000..c9cb75cc7 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetSqlResultsDownloadRequest.java @@ -0,0 +1,52 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.QueryParam; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonIgnore; +import java.util.Objects; + +/** Get the SQL Results Download setting */ +@Generated +public class GetSqlResultsDownloadRequest { + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + * an etag from a GET request, and pass it with the DELETE request to identify the rule set + * version you are deleting. + */ + @JsonIgnore + @QueryParam("etag") + private String etag; + + public GetSqlResultsDownloadRequest setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + GetSqlResultsDownloadRequest that = (GetSqlResultsDownloadRequest) o; + return Objects.equals(etag, that.etag); + } + + @Override + public int hashCode() { + return Objects.hash(etag); + } + + @Override + public String toString() { + return new ToStringer(GetSqlResultsDownloadRequest.class).add("etag", etag).toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetWorkspaceNetworkOptionRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetWorkspaceNetworkOptionRequest.java index 5344f3325..c3f99bf5d 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetWorkspaceNetworkOptionRequest.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/GetWorkspaceNetworkOptionRequest.java @@ -7,7 +7,7 @@ import com.fasterxml.jackson.annotation.JsonIgnore; import java.util.Objects; -/** Get workspace network configuration */ +/** Get workspace network option */ @Generated public class GetWorkspaceNetworkOptionRequest { /** The workspace ID. */ diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java index f5eb3d0a5..16fa226ef 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java @@ -21,6 +21,8 @@ public class SettingsAPI { private ComplianceSecurityProfileAPI complianceSecurityProfileAPI; + private DashboardEmailSubscriptionsAPI dashboardEmailSubscriptionsAPI; + private DefaultNamespaceAPI defaultNamespaceAPI; private DisableLegacyAccessAPI disableLegacyAccessAPI; @@ -39,6 +41,8 @@ public class SettingsAPI { private RestrictWorkspaceAdminsAPI restrictWorkspaceAdminsAPI; + private SqlResultsDownloadAPI sqlResultsDownloadAPI; + /** Regular-use constructor */ public SettingsAPI(ApiClient apiClient) { impl = new SettingsImpl(apiClient); @@ -52,6 +56,8 @@ public SettingsAPI(ApiClient apiClient) { complianceSecurityProfileAPI = new ComplianceSecurityProfileAPI(apiClient); + dashboardEmailSubscriptionsAPI = new DashboardEmailSubscriptionsAPI(apiClient); + defaultNamespaceAPI = new DefaultNamespaceAPI(apiClient); disableLegacyAccessAPI = new DisableLegacyAccessAPI(apiClient); @@ -69,6 +75,8 @@ public SettingsAPI(ApiClient apiClient) { llmProxyPartnerPoweredWorkspaceAPI = new LlmProxyPartnerPoweredWorkspaceAPI(apiClient); restrictWorkspaceAdminsAPI = new RestrictWorkspaceAdminsAPI(apiClient); + + sqlResultsDownloadAPI = new SqlResultsDownloadAPI(apiClient); } /** Constructor for mocks */ @@ -99,6 +107,14 @@ public ComplianceSecurityProfileAPI ComplianceSecurityProfile() { return complianceSecurityProfileAPI; } + /** + * Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace + * can send subscription emails containing PDFs and/or images of the dashboard. + */ + public DashboardEmailSubscriptionsAPI DashboardEmailSubscriptions() { + return dashboardEmailSubscriptionsAPI; + } + /** * The default namespace setting API allows users to configure the default namespace for a * Databricks workspace. @@ -149,6 +165,14 @@ public RestrictWorkspaceAdminsAPI RestrictWorkspaceAdmins() { return restrictWorkspaceAdminsAPI; } + /** + * Controls whether users within the workspace are allowed to download results from the SQL Editor + * and AI/BI Dashboards UIs. + */ + public SqlResultsDownloadAPI SqlResultsDownload() { + return sqlResultsDownloadAPI; + } + public SettingsService impl() { return impl; } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java new file mode 100755 index 000000000..b15b7f669 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownload.java @@ -0,0 +1,86 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +@Generated +public class SqlResultsDownload { + /** */ + @JsonProperty("boolean_val") + private BooleanMessage booleanVal; + + /** + * etag used for versioning. The response is at least as fresh as the eTag provided. This is used + * for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + * overwriting each other. It is strongly suggested that systems make use of the etag in the read + * -> update pattern to perform setting updates in order to avoid race conditions. That is, get an + * etag from a GET request, and pass it with the PATCH request to identify the setting version you + * are updating. + */ + @JsonProperty("etag") + private String etag; + + /** + * Name of the corresponding setting. This field is populated in the response, but it will not be + * respected even if it's set in the request body. The setting name in the path parameter will be + * respected instead. Setting name is required to be 'default' if the setting only has one + * instance per workspace. + */ + @JsonProperty("setting_name") + private String settingName; + + public SqlResultsDownload setBooleanVal(BooleanMessage booleanVal) { + this.booleanVal = booleanVal; + return this; + } + + public BooleanMessage getBooleanVal() { + return booleanVal; + } + + public SqlResultsDownload setEtag(String etag) { + this.etag = etag; + return this; + } + + public String getEtag() { + return etag; + } + + public SqlResultsDownload setSettingName(String settingName) { + this.settingName = settingName; + return this; + } + + public String getSettingName() { + return settingName; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + SqlResultsDownload that = (SqlResultsDownload) o; + return Objects.equals(booleanVal, that.booleanVal) + && Objects.equals(etag, that.etag) + && Objects.equals(settingName, that.settingName); + } + + @Override + public int hashCode() { + return Objects.hash(booleanVal, etag, settingName); + } + + @Override + public String toString() { + return new ToStringer(SqlResultsDownload.class) + .add("booleanVal", booleanVal) + .add("etag", etag) + .add("settingName", settingName) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java new file mode 100755 index 000000000..7bc8f49d1 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadAPI.java @@ -0,0 +1,68 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.core.ApiClient; +import com.databricks.sdk.support.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Controls whether users within the workspace are allowed to download results from the SQL Editor + * and AI/BI Dashboards UIs. By default, this setting is enabled (set to `true`) + */ +@Generated +public class SqlResultsDownloadAPI { + private static final Logger LOG = LoggerFactory.getLogger(SqlResultsDownloadAPI.class); + + private final SqlResultsDownloadService impl; + + /** Regular-use constructor */ + public SqlResultsDownloadAPI(ApiClient apiClient) { + impl = new SqlResultsDownloadImpl(apiClient); + } + + /** Constructor for mocks */ + public SqlResultsDownloadAPI(SqlResultsDownloadService mock) { + impl = mock; + } + + /** + * Delete the SQL Results Download setting. + * + *

Reverts the SQL Results Download setting to its default value. + */ + public DeleteSqlResultsDownloadResponse delete(DeleteSqlResultsDownloadRequest request) { + return impl.delete(request); + } + + /** + * Get the SQL Results Download setting. + * + *

Gets the SQL Results Download setting. + */ + public SqlResultsDownload get(GetSqlResultsDownloadRequest request) { + return impl.get(request); + } + + public SqlResultsDownload update( + boolean allowMissing, SqlResultsDownload setting, String fieldMask) { + return update( + new UpdateSqlResultsDownloadRequest() + .setAllowMissing(allowMissing) + .setSetting(setting) + .setFieldMask(fieldMask)); + } + + /** + * Update the SQL Results Download setting. + * + *

Updates the SQL Results Download setting. + */ + public SqlResultsDownload update(UpdateSqlResultsDownloadRequest request) { + return impl.update(request); + } + + public SqlResultsDownloadService impl() { + return impl; + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java new file mode 100755 index 000000000..db09dc70e --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadImpl.java @@ -0,0 +1,58 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.core.ApiClient; +import com.databricks.sdk.core.DatabricksException; +import com.databricks.sdk.core.http.Request; +import com.databricks.sdk.support.Generated; +import java.io.IOException; + +/** Package-local implementation of SqlResultsDownload */ +@Generated +class SqlResultsDownloadImpl implements SqlResultsDownloadService { + private final ApiClient apiClient; + + public SqlResultsDownloadImpl(ApiClient apiClient) { + this.apiClient = apiClient; + } + + @Override + public DeleteSqlResultsDownloadResponse delete(DeleteSqlResultsDownloadRequest request) { + String path = "/api/2.0/settings/types/sql_results_download/names/default"; + try { + Request req = new Request("DELETE", path); + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + return apiClient.execute(req, DeleteSqlResultsDownloadResponse.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + + @Override + public SqlResultsDownload get(GetSqlResultsDownloadRequest request) { + String path = "/api/2.0/settings/types/sql_results_download/names/default"; + try { + Request req = new Request("GET", path); + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + return apiClient.execute(req, SqlResultsDownload.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } + + @Override + public SqlResultsDownload update(UpdateSqlResultsDownloadRequest request) { + String path = "/api/2.0/settings/types/sql_results_download/names/default"; + try { + Request req = new Request("PATCH", path, apiClient.serialize(request)); + ApiClient.setQuery(req, request); + req.withHeader("Accept", "application/json"); + req.withHeader("Content-Type", "application/json"); + return apiClient.execute(req, SqlResultsDownload.class); + } catch (IOException e) { + throw new DatabricksException("IO error: " + e.getMessage(), e); + } + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java new file mode 100755 index 000000000..0929fba03 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SqlResultsDownloadService.java @@ -0,0 +1,37 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; + +/** + * Controls whether users within the workspace are allowed to download results from the SQL Editor + * and AI/BI Dashboards UIs. By default, this setting is enabled (set to `true`) + * + *

This is the high-level interface, that contains generated methods. + * + *

Evolving: this interface is under development. Method signatures may change. + */ +@Generated +public interface SqlResultsDownloadService { + /** + * Delete the SQL Results Download setting. + * + *

Reverts the SQL Results Download setting to its default value. + */ + DeleteSqlResultsDownloadResponse delete( + DeleteSqlResultsDownloadRequest deleteSqlResultsDownloadRequest); + + /** + * Get the SQL Results Download setting. + * + *

Gets the SQL Results Download setting. + */ + SqlResultsDownload get(GetSqlResultsDownloadRequest getSqlResultsDownloadRequest); + + /** + * Update the SQL Results Download setting. + * + *

Updates the SQL Results Download setting. + */ + SqlResultsDownload update(UpdateSqlResultsDownloadRequest updateSqlResultsDownloadRequest); +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java new file mode 100755 index 000000000..37613e037 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateDashboardEmailSubscriptionsRequest.java @@ -0,0 +1,85 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** Details required to update a setting. */ +@Generated +public class UpdateDashboardEmailSubscriptionsRequest { + /** This should always be set to true for Settings API. Added for AIP compliance. */ + @JsonProperty("allow_missing") + private Boolean allowMissing; + + /** + * The field mask must be a single string, with multiple fields separated by commas (no spaces). + * The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields + * (e.g., `author.given_name`). Specification of elements in sequence or map fields is not + * allowed, as only the entire collection field can be specified. Field names must exactly match + * the resource field names. + * + *

A field mask of `*` indicates full replacement. It’s recommended to always explicitly list + * the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if + * the API changes in the future. + */ + @JsonProperty("field_mask") + private String fieldMask; + + /** */ + @JsonProperty("setting") + private DashboardEmailSubscriptions setting; + + public UpdateDashboardEmailSubscriptionsRequest setAllowMissing(Boolean allowMissing) { + this.allowMissing = allowMissing; + return this; + } + + public Boolean getAllowMissing() { + return allowMissing; + } + + public UpdateDashboardEmailSubscriptionsRequest setFieldMask(String fieldMask) { + this.fieldMask = fieldMask; + return this; + } + + public String getFieldMask() { + return fieldMask; + } + + public UpdateDashboardEmailSubscriptionsRequest setSetting(DashboardEmailSubscriptions setting) { + this.setting = setting; + return this; + } + + public DashboardEmailSubscriptions getSetting() { + return setting; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + UpdateDashboardEmailSubscriptionsRequest that = (UpdateDashboardEmailSubscriptionsRequest) o; + return Objects.equals(allowMissing, that.allowMissing) + && Objects.equals(fieldMask, that.fieldMask) + && Objects.equals(setting, that.setting); + } + + @Override + public int hashCode() { + return Objects.hash(allowMissing, fieldMask, setting); + } + + @Override + public String toString() { + return new ToStringer(UpdateDashboardEmailSubscriptionsRequest.class) + .add("allowMissing", allowMissing) + .add("fieldMask", fieldMask) + .add("setting", setting) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java new file mode 100755 index 000000000..a0d263a52 --- /dev/null +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateSqlResultsDownloadRequest.java @@ -0,0 +1,85 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package com.databricks.sdk.service.settings; + +import com.databricks.sdk.support.Generated; +import com.databricks.sdk.support.ToStringer; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + +/** Details required to update a setting. */ +@Generated +public class UpdateSqlResultsDownloadRequest { + /** This should always be set to true for Settings API. Added for AIP compliance. */ + @JsonProperty("allow_missing") + private Boolean allowMissing; + + /** + * The field mask must be a single string, with multiple fields separated by commas (no spaces). + * The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields + * (e.g., `author.given_name`). Specification of elements in sequence or map fields is not + * allowed, as only the entire collection field can be specified. Field names must exactly match + * the resource field names. + * + *

A field mask of `*` indicates full replacement. It’s recommended to always explicitly list + * the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if + * the API changes in the future. + */ + @JsonProperty("field_mask") + private String fieldMask; + + /** */ + @JsonProperty("setting") + private SqlResultsDownload setting; + + public UpdateSqlResultsDownloadRequest setAllowMissing(Boolean allowMissing) { + this.allowMissing = allowMissing; + return this; + } + + public Boolean getAllowMissing() { + return allowMissing; + } + + public UpdateSqlResultsDownloadRequest setFieldMask(String fieldMask) { + this.fieldMask = fieldMask; + return this; + } + + public String getFieldMask() { + return fieldMask; + } + + public UpdateSqlResultsDownloadRequest setSetting(SqlResultsDownload setting) { + this.setting = setting; + return this; + } + + public SqlResultsDownload getSetting() { + return setting; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + UpdateSqlResultsDownloadRequest that = (UpdateSqlResultsDownloadRequest) o; + return Objects.equals(allowMissing, that.allowMissing) + && Objects.equals(fieldMask, that.fieldMask) + && Objects.equals(setting, that.setting); + } + + @Override + public int hashCode() { + return Objects.hash(allowMissing, fieldMask, setting); + } + + @Override + public String toString() { + return new ToStringer(UpdateSqlResultsDownloadRequest.class) + .add("allowMissing", allowMissing) + .add("fieldMask", fieldMask) + .add("setting", setting) + .toString(); + } +} diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java index 84c39c6b0..3dbcb2ba5 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/UpdateWorkspaceNetworkOptionRequest.java @@ -8,7 +8,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Objects; -/** Update workspace network configuration */ +/** Update workspace network option */ @Generated public class UpdateWorkspaceNetworkOptionRequest { /** The workspace ID. */ diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationAPI.java index 90fe8feba..825cf4f15 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationAPI.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationAPI.java @@ -7,11 +7,12 @@ import org.slf4j.LoggerFactory; /** - * These APIs allow configuration of network settings for Databricks workspaces. Each workspace is - * always associated with exactly one network policy that controls which network destinations can be - * accessed from the Databricks environment. By default, workspaces are associated with the - * 'default-policy' network policy. You cannot create or delete a workspace's network configuration, - * only update it to associate the workspace with a different policy. + * These APIs allow configuration of network settings for Databricks workspaces by selecting which + * network policy to associate with the workspace. Each workspace is always associated with exactly + * one network policy that controls which network destinations can be accessed from the Databricks + * environment. By default, workspaces are associated with the 'default-policy' network policy. You + * cannot create or delete a workspace's network option, only update it to associate the workspace + * with a different policy */ @Generated public class WorkspaceNetworkConfigurationAPI { @@ -35,10 +36,10 @@ public WorkspaceNetworkOption getWorkspaceNetworkOptionRpc(long workspaceId) { } /** - * Get workspace network configuration. + * Get workspace network option. * - *

Gets the network configuration for a workspace. Every workspace has exactly one network - * policy binding, with 'default-policy' used if no explicit assignment exists. + *

Gets the network option for a workspace. Every workspace has exactly one network policy + * binding, with 'default-policy' used if no explicit assignment exists. */ public WorkspaceNetworkOption getWorkspaceNetworkOptionRpc( GetWorkspaceNetworkOptionRequest request) { @@ -54,11 +55,11 @@ public WorkspaceNetworkOption updateWorkspaceNetworkOptionRpc( } /** - * Update workspace network configuration. + * Update workspace network option. * - *

Updates the network configuration for a workspace. This operation associates the workspace - * with the specified network policy. To revert to the default policy, specify 'default-policy' as - * the network_policy_id. + *

Updates the network option for a workspace. This operation associates the workspace with the + * specified network policy. To revert to the default policy, specify 'default-policy' as the + * network_policy_id. */ public WorkspaceNetworkOption updateWorkspaceNetworkOptionRpc( UpdateWorkspaceNetworkOptionRequest request) { diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationService.java index 7c414aa6d..0a45ac324 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationService.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/WorkspaceNetworkConfigurationService.java @@ -4,11 +4,12 @@ import com.databricks.sdk.support.Generated; /** - * These APIs allow configuration of network settings for Databricks workspaces. Each workspace is - * always associated with exactly one network policy that controls which network destinations can be - * accessed from the Databricks environment. By default, workspaces are associated with the - * 'default-policy' network policy. You cannot create or delete a workspace's network configuration, - * only update it to associate the workspace with a different policy. + * These APIs allow configuration of network settings for Databricks workspaces by selecting which + * network policy to associate with the workspace. Each workspace is always associated with exactly + * one network policy that controls which network destinations can be accessed from the Databricks + * environment. By default, workspaces are associated with the 'default-policy' network policy. You + * cannot create or delete a workspace's network option, only update it to associate the workspace + * with a different policy * *

This is the high-level interface, that contains generated methods. * @@ -17,20 +18,20 @@ @Generated public interface WorkspaceNetworkConfigurationService { /** - * Get workspace network configuration. + * Get workspace network option. * - *

Gets the network configuration for a workspace. Every workspace has exactly one network - * policy binding, with 'default-policy' used if no explicit assignment exists. + *

Gets the network option for a workspace. Every workspace has exactly one network policy + * binding, with 'default-policy' used if no explicit assignment exists. */ WorkspaceNetworkOption getWorkspaceNetworkOptionRpc( GetWorkspaceNetworkOptionRequest getWorkspaceNetworkOptionRequest); /** - * Update workspace network configuration. + * Update workspace network option. * - *

Updates the network configuration for a workspace. This operation associates the workspace - * with the specified network policy. To revert to the default policy, specify 'default-policy' as - * the network_policy_id. + *

Updates the network option for a workspace. This operation associates the workspace with the + * specified network policy. To revert to the default policy, specify 'default-policy' as the + * network_policy_id. */ WorkspaceNetworkOption updateWorkspaceNetworkOptionRpc( UpdateWorkspaceNetworkOptionRequest updateWorkspaceNetworkOptionRequest); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sharing/AuthenticationType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sharing/AuthenticationType.java index 6b48f36b2..05d2bd9c4 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sharing/AuthenticationType.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sharing/AuthenticationType.java @@ -9,5 +9,6 @@ public enum AuthenticationType { DATABRICKS, OAUTH_CLIENT_CREDENTIALS, + OIDC_FEDERATION, TOKEN, } diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2API.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2API.java index 756895733..e868999eb 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2API.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2API.java @@ -7,7 +7,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -/** TODO: Add description */ +/** New version of SQL Alerts */ @Generated public class AlertsV2API { private static final Logger LOG = LoggerFactory.getLogger(AlertsV2API.class); diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2Service.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2Service.java index f8740fa39..cb9ec351a 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2Service.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/AlertsV2Service.java @@ -4,7 +4,7 @@ import com.databricks.sdk.support.Generated; /** - * TODO: Add description + * New version of SQL Alerts * *

This is the high-level interface, that contains generated methods. * From 3f85c2211631400d89522edfe1f82765ab6d13d8 Mon Sep 17 00:00:00 2001 From: Hector Castejon Diaz Date: Thu, 19 Jun 2025 12:08:59 +0000 Subject: [PATCH 4/8] POM --- databricks-sdk-java/pom.xml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/databricks-sdk-java/pom.xml b/databricks-sdk-java/pom.xml index cdcea322f..0eed8fa0c 100644 --- a/databricks-sdk-java/pom.xml +++ b/databricks-sdk-java/pom.xml @@ -103,5 +103,16 @@ jackson-datatype-jsr310 ${jackson.version} + + + com.google.protobuf + protobuf-java + 3.25.1 + + + com.google.protobuf + protobuf-java-util + 3.25.1 + From d397983151427daf3a2ea53c6f08594fdd65df39 Mon Sep 17 00:00:00 2001 From: Hector Castejon Diaz Date: Fri, 20 Jun 2025 07:37:23 +0000 Subject: [PATCH 5/8] Tests --- .../DurationDeserializerTest.java | 36 +++++++++ .../serialization/DurationSerializerTest.java | 76 +++++++++++++++++++ .../FieldMaskDeserializerTest.java | 36 +++++++++ .../FieldMaskSerializerTest.java | 37 +++++++++ .../TimestampDeserializerTest.java | 48 ++++++++++++ .../TimestampSerializerTest.java | 50 ++++++++++++ 6 files changed, 283 insertions(+) create mode 100644 databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationDeserializerTest.java create mode 100644 databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationSerializerTest.java create mode 100644 databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskDeserializerTest.java create mode 100644 databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskSerializerTest.java create mode 100644 databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampDeserializerTest.java create mode 100644 databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampSerializerTest.java diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationDeserializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationDeserializerTest.java new file mode 100644 index 000000000..81fd35665 --- /dev/null +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationDeserializerTest.java @@ -0,0 +1,36 @@ +package com.databricks.sdk.core.serialization; + +import static org.junit.jupiter.api.Assertions.*; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.google.protobuf.Duration; +import com.google.protobuf.util.Durations; +import org.junit.jupiter.api.Test; + +public class DurationDeserializerTest { + private static class TestClass { + @JsonDeserialize(using = DurationDeserializer.class) + private Duration duration; + + public Duration getDuration() { + return duration; + } + } + + @Test + public void testDurationDeserialization() throws Exception { + String json = "{\"duration\":\"3.500s\"}"; + ObjectMapper mapper = new ObjectMapper(); + TestClass obj = mapper.readValue(json, TestClass.class); + assertEquals(Durations.parse("3.500s"), obj.getDuration()); + } + + @Test + public void testNullDurationDeserialization() throws Exception { + String json = "{\"duration\":null}"; + ObjectMapper mapper = new ObjectMapper(); + TestClass obj = mapper.readValue(json, TestClass.class); + assertNull(obj.getDuration()); + } +} \ No newline at end of file diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationSerializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationSerializerTest.java new file mode 100644 index 000000000..0500d5c36 --- /dev/null +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationSerializerTest.java @@ -0,0 +1,76 @@ +package com.databricks.sdk.core.serialization; + +import static org.junit.jupiter.api.Assertions.*; + +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.google.protobuf.Duration; +import org.junit.jupiter.api.Test; + +public class DurationSerializerTest { + + private static class TestClass { + @JsonSerialize(using = DurationSerializer.class) + private Duration duration; + + public TestClass(Duration duration) { + this.duration = duration; + } + } + + @Test + public void testDurationSerialization() throws Exception { + // Create a Duration of 3 seconds + Duration duration = Duration.newBuilder().setSeconds(3).build(); + + TestClass testObject = new TestClass(duration); + + ObjectMapper mapper = new ObjectMapper(); + String json = mapper.writeValueAsString(testObject); + + // The Duration should be serialized as "3s" by Durations.toString() + assertEquals("{\"duration\":\"3s\"}", json); + } + + @Test + public void testDurationSerializationWithNanos() throws Exception { + // Create a Duration of 3.5 seconds (3 seconds + 500000000 nanoseconds) + Duration duration = Duration.newBuilder() + .setSeconds(3) + .setNanos(500000000) + .build(); + + TestClass testObject = new TestClass(duration); + + ObjectMapper mapper = new ObjectMapper(); + String json = mapper.writeValueAsString(testObject); + + // The Duration should be serialized as "3.500s" by Durations.toString() + assertEquals("{\"duration\":\"3.500s\"}", json); + } + + @Test + public void testNullDurationSerialization() throws Exception { + TestClass testObject = new TestClass(null); + + ObjectMapper mapper = new ObjectMapper(); + String json = mapper.writeValueAsString(testObject); + + // Null duration should be serialized as null + assertEquals("{\"duration\":null}", json); + } + + @Test + public void testZeroDurationSerialization() throws Exception { + // Create a Duration of 0 seconds + Duration duration = Duration.newBuilder().setSeconds(0).build(); + + TestClass testObject = new TestClass(duration); + + ObjectMapper mapper = new ObjectMapper(); + String json = mapper.writeValueAsString(testObject); + + // The Duration should be serialized as "0s" by Durations.toString() + assertEquals("{\"duration\":\"0s\"}", json); + } +} \ No newline at end of file diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskDeserializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskDeserializerTest.java new file mode 100644 index 000000000..f27152ae9 --- /dev/null +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskDeserializerTest.java @@ -0,0 +1,36 @@ +package com.databricks.sdk.core.serialization; + +import static org.junit.jupiter.api.Assertions.*; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.google.protobuf.FieldMask; +import com.google.protobuf.util.FieldMaskUtil; +import org.junit.jupiter.api.Test; + +public class FieldMaskDeserializerTest { + private static class TestClass { + @JsonDeserialize(using = FieldMaskDeserializer.class) + private FieldMask fieldMask; + + public FieldMask getFieldMask() { + return fieldMask; + } + } + + @Test + public void testFieldMaskDeserialization() throws Exception { + String json = "{\"fieldMask\":\"foo,bar.baz\"}"; + ObjectMapper mapper = new ObjectMapper(); + TestClass obj = mapper.readValue(json, TestClass.class); + assertEquals(FieldMaskUtil.fromStringList(java.util.Arrays.asList("foo", "bar.baz")), obj.getFieldMask()); + } + + @Test + public void testNullFieldMaskDeserialization() throws Exception { + String json = "{\"fieldMask\":null}"; + ObjectMapper mapper = new ObjectMapper(); + TestClass obj = mapper.readValue(json, TestClass.class); + assertNull(obj.getFieldMask()); + } +} \ No newline at end of file diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskSerializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskSerializerTest.java new file mode 100644 index 000000000..b7ebed54b --- /dev/null +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskSerializerTest.java @@ -0,0 +1,37 @@ +package com.databricks.sdk.core.serialization; + +import static org.junit.jupiter.api.Assertions.*; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.google.protobuf.FieldMask; +import com.google.protobuf.util.FieldMaskUtil; +import org.junit.jupiter.api.Test; + +public class FieldMaskSerializerTest { + private static class TestClass { + @JsonSerialize(using = FieldMaskSerializer.class) + private FieldMask fieldMask; + + public TestClass(FieldMask fieldMask) { + this.fieldMask = fieldMask; + } + } + + @Test + public void testFieldMaskSerialization() throws Exception { + FieldMask mask = FieldMaskUtil.fromStringList(java.util.Arrays.asList("foo", "bar.baz")); + TestClass testObject = new TestClass(mask); + ObjectMapper mapper = new ObjectMapper(); + String json = mapper.writeValueAsString(testObject); + assertEquals("{\"fieldMask\":\"foo,bar.baz\"}", json); + } + + @Test + public void testNullFieldMaskSerialization() throws Exception { + TestClass testObject = new TestClass(null); + ObjectMapper mapper = new ObjectMapper(); + String json = mapper.writeValueAsString(testObject); + assertEquals("{\"fieldMask\":null}", json); + } +} \ No newline at end of file diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampDeserializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampDeserializerTest.java new file mode 100644 index 000000000..e86625624 --- /dev/null +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampDeserializerTest.java @@ -0,0 +1,48 @@ +package com.databricks.sdk.core.serialization; + +import static org.junit.jupiter.api.Assertions.*; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.annotation.JsonDeserialize; +import com.google.protobuf.Timestamp; +import com.google.protobuf.util.Timestamps; +import org.junit.jupiter.api.Test; + +public class TimestampDeserializerTest { + private static class TestClass { + @JsonDeserialize(using = TimestampDeserializer.class) + private Timestamp timestamp; + + public Timestamp getTimestamp() { + return timestamp; + } + } + + @Test + public void testTimestampDeserialization() throws Exception { + String json = "{\"timestamp\":\"2024-06-20T12:34:56Z\"}"; + ObjectMapper mapper = new ObjectMapper(); + TestClass obj = mapper.readValue(json, TestClass.class); + assertEquals(Timestamps.parse("2024-06-20T12:34:56Z"), obj.getTimestamp()); + } + + @Test + public void testNullTimestampDeserialization() throws Exception { + String json = "{\"timestamp\":null}"; + ObjectMapper mapper = new ObjectMapper(); + TestClass obj = mapper.readValue(json, TestClass.class); + assertNull(obj.getTimestamp()); + } + + @Test + public void testTimestampDeserializationWithNanos() throws Exception { + String json = "{\"timestamp\":\"2024-06-20T12:34:56.123456789Z\"}"; + ObjectMapper mapper = new ObjectMapper(); + TestClass obj = mapper.readValue(json, TestClass.class); + Timestamp expected = Timestamp.newBuilder() + .setSeconds(1718886896L) + .setNanos(123456789) + .build(); + assertEquals(expected, obj.getTimestamp()); + } +} \ No newline at end of file diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampSerializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampSerializerTest.java new file mode 100644 index 000000000..7a4aa68e1 --- /dev/null +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampSerializerTest.java @@ -0,0 +1,50 @@ +package com.databricks.sdk.core.serialization; + +import static org.junit.jupiter.api.Assertions.*; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; +import com.google.protobuf.Timestamp; +import com.google.protobuf.util.Timestamps; +import org.junit.jupiter.api.Test; + +public class TimestampSerializerTest { + private static class TestClass { + @JsonSerialize(using = TimestampSerializer.class) + private Timestamp timestamp; + + public TestClass(Timestamp timestamp) { + this.timestamp = timestamp; + } + } + + @Test + public void testTimestampSerialization() throws Exception { + Timestamp ts = Timestamps.parse("2024-06-20T12:34:56Z"); + TestClass testObject = new TestClass(ts); + ObjectMapper mapper = new ObjectMapper(); + String json = mapper.writeValueAsString(testObject); + assertEquals("{\"timestamp\":\"2024-06-20T12:34:56Z\"}", json); + } + + @Test + public void testNullTimestampSerialization() throws Exception { + TestClass testObject = new TestClass(null); + ObjectMapper mapper = new ObjectMapper(); + String json = mapper.writeValueAsString(testObject); + assertEquals("{\"timestamp\":null}", json); + } + + @Test + public void testTimestampSerializationWithNanos() throws Exception { + Timestamp ts = Timestamp.newBuilder() + .setSeconds(1718886896L) // 2024-06-20T12:34:56Z + .setNanos(123456789) + .build(); + TestClass testObject = new TestClass(ts); + ObjectMapper mapper = new ObjectMapper(); + String json = mapper.writeValueAsString(testObject); + // Should match the RFC 3339 format with fractional seconds + assertEquals("{\"timestamp\":\"2024-06-20T12:34:56.123456789Z\"}", json); + } +} \ No newline at end of file From c8bd5af6b25b3fc710ce3153cdac395299bc8af0 Mon Sep 17 00:00:00 2001 From: Hector Castejon Diaz Date: Fri, 20 Jun 2025 07:40:42 +0000 Subject: [PATCH 6/8] Fmt --- .../databricks/sdk/core/DatabricksConfig.java | 8 +++-- .../DurationDeserializerTest.java | 2 +- .../serialization/DurationSerializerTest.java | 31 +++++++++---------- .../FieldMaskDeserializerTest.java | 6 ++-- .../FieldMaskSerializerTest.java | 2 +- .../TimestampDeserializerTest.java | 7 ++--- .../TimestampSerializerTest.java | 11 ++++--- 7 files changed, 34 insertions(+), 33 deletions(-) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java index de6548982..df16ebae3 100644 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java @@ -410,13 +410,17 @@ public DatabricksConfig setAzureUseMsi(boolean azureUseMsi) { return this; } - /** @deprecated Use {@link #getAzureUseMsi()} instead. */ + /** + * @deprecated Use {@link #getAzureUseMsi()} instead. + */ @Deprecated() public boolean getAzureUseMSI() { return azureUseMsi; } - /** @deprecated Use {@link #setAzureUseMsi(boolean)} instead. */ + /** + * @deprecated Use {@link #setAzureUseMsi(boolean)} instead. + */ @Deprecated public DatabricksConfig setAzureUseMSI(boolean azureUseMsi) { this.azureUseMsi = azureUseMsi; diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationDeserializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationDeserializerTest.java index 81fd35665..e905303b1 100644 --- a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationDeserializerTest.java +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationDeserializerTest.java @@ -33,4 +33,4 @@ public void testNullDurationDeserialization() throws Exception { TestClass obj = mapper.readValue(json, TestClass.class); assertNull(obj.getDuration()); } -} \ No newline at end of file +} diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationSerializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationSerializerTest.java index 0500d5c36..8a71993c7 100644 --- a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationSerializerTest.java +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationSerializerTest.java @@ -2,8 +2,8 @@ import static org.junit.jupiter.api.Assertions.*; -import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.google.protobuf.Duration; import org.junit.jupiter.api.Test; @@ -22,12 +22,12 @@ public TestClass(Duration duration) { public void testDurationSerialization() throws Exception { // Create a Duration of 3 seconds Duration duration = Duration.newBuilder().setSeconds(3).build(); - + TestClass testObject = new TestClass(duration); - + ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(testObject); - + // The Duration should be serialized as "3s" by Durations.toString() assertEquals("{\"duration\":\"3s\"}", json); } @@ -35,16 +35,13 @@ public void testDurationSerialization() throws Exception { @Test public void testDurationSerializationWithNanos() throws Exception { // Create a Duration of 3.5 seconds (3 seconds + 500000000 nanoseconds) - Duration duration = Duration.newBuilder() - .setSeconds(3) - .setNanos(500000000) - .build(); - + Duration duration = Duration.newBuilder().setSeconds(3).setNanos(500000000).build(); + TestClass testObject = new TestClass(duration); - + ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(testObject); - + // The Duration should be serialized as "3.500s" by Durations.toString() assertEquals("{\"duration\":\"3.500s\"}", json); } @@ -52,10 +49,10 @@ public void testDurationSerializationWithNanos() throws Exception { @Test public void testNullDurationSerialization() throws Exception { TestClass testObject = new TestClass(null); - + ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(testObject); - + // Null duration should be serialized as null assertEquals("{\"duration\":null}", json); } @@ -64,13 +61,13 @@ public void testNullDurationSerialization() throws Exception { public void testZeroDurationSerialization() throws Exception { // Create a Duration of 0 seconds Duration duration = Duration.newBuilder().setSeconds(0).build(); - + TestClass testObject = new TestClass(duration); - + ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(testObject); - + // The Duration should be serialized as "0s" by Durations.toString() assertEquals("{\"duration\":\"0s\"}", json); } -} \ No newline at end of file +} diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskDeserializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskDeserializerTest.java index f27152ae9..20f69f45c 100644 --- a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskDeserializerTest.java +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskDeserializerTest.java @@ -23,7 +23,9 @@ public void testFieldMaskDeserialization() throws Exception { String json = "{\"fieldMask\":\"foo,bar.baz\"}"; ObjectMapper mapper = new ObjectMapper(); TestClass obj = mapper.readValue(json, TestClass.class); - assertEquals(FieldMaskUtil.fromStringList(java.util.Arrays.asList("foo", "bar.baz")), obj.getFieldMask()); + assertEquals( + FieldMaskUtil.fromStringList(java.util.Arrays.asList("foo", "bar.baz")), + obj.getFieldMask()); } @Test @@ -33,4 +35,4 @@ public void testNullFieldMaskDeserialization() throws Exception { TestClass obj = mapper.readValue(json, TestClass.class); assertNull(obj.getFieldMask()); } -} \ No newline at end of file +} diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskSerializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskSerializerTest.java index b7ebed54b..81da81467 100644 --- a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskSerializerTest.java +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskSerializerTest.java @@ -34,4 +34,4 @@ public void testNullFieldMaskSerialization() throws Exception { String json = mapper.writeValueAsString(testObject); assertEquals("{\"fieldMask\":null}", json); } -} \ No newline at end of file +} diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampDeserializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampDeserializerTest.java index e86625624..88c03ea2a 100644 --- a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampDeserializerTest.java +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampDeserializerTest.java @@ -39,10 +39,7 @@ public void testTimestampDeserializationWithNanos() throws Exception { String json = "{\"timestamp\":\"2024-06-20T12:34:56.123456789Z\"}"; ObjectMapper mapper = new ObjectMapper(); TestClass obj = mapper.readValue(json, TestClass.class); - Timestamp expected = Timestamp.newBuilder() - .setSeconds(1718886896L) - .setNanos(123456789) - .build(); + Timestamp expected = Timestamp.newBuilder().setSeconds(1718886896L).setNanos(123456789).build(); assertEquals(expected, obj.getTimestamp()); } -} \ No newline at end of file +} diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampSerializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampSerializerTest.java index 7a4aa68e1..4433ca506 100644 --- a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampSerializerTest.java +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampSerializerTest.java @@ -37,14 +37,15 @@ public void testNullTimestampSerialization() throws Exception { @Test public void testTimestampSerializationWithNanos() throws Exception { - Timestamp ts = Timestamp.newBuilder() - .setSeconds(1718886896L) // 2024-06-20T12:34:56Z - .setNanos(123456789) - .build(); + Timestamp ts = + Timestamp.newBuilder() + .setSeconds(1718886896L) // 2024-06-20T12:34:56Z + .setNanos(123456789) + .build(); TestClass testObject = new TestClass(ts); ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(testObject); // Should match the RFC 3339 format with fractional seconds assertEquals("{\"timestamp\":\"2024-06-20T12:34:56.123456789Z\"}", json); } -} \ No newline at end of file +} From 43e2218c203c57ad970903587cf3693078a76176 Mon Sep 17 00:00:00 2001 From: Hector Castejon Diaz Date: Fri, 20 Jun 2025 08:00:30 +0000 Subject: [PATCH 7/8] param --- .../DurationDeserializerTest.java | 36 ++++++---- .../serialization/DurationSerializerTest.java | 70 ++++++------------- .../FieldMaskDeserializerTest.java | 44 ++++++++---- .../FieldMaskSerializerTest.java | 41 +++++++---- .../TimestampDeserializerTest.java | 44 ++++++------ .../TimestampSerializerTest.java | 49 ++++++------- 6 files changed, 149 insertions(+), 135 deletions(-) diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationDeserializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationDeserializerTest.java index e905303b1..19a33ff18 100644 --- a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationDeserializerTest.java +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationDeserializerTest.java @@ -5,8 +5,10 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.google.protobuf.Duration; -import com.google.protobuf.util.Durations; -import org.junit.jupiter.api.Test; +import java.util.stream.Stream; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; public class DurationDeserializerTest { private static class TestClass { @@ -18,19 +20,27 @@ public Duration getDuration() { } } - @Test - public void testDurationDeserialization() throws Exception { - String json = "{\"duration\":\"3.500s\"}"; + @ParameterizedTest + @MethodSource("durationDeserializationTestCases") + public void testDurationDeserialization(String inputJson, Duration expectedDuration) + throws Exception { ObjectMapper mapper = new ObjectMapper(); - TestClass obj = mapper.readValue(json, TestClass.class); - assertEquals(Durations.parse("3.500s"), obj.getDuration()); + TestClass obj = mapper.readValue(inputJson, TestClass.class); + assertEquals(expectedDuration, obj.getDuration()); } - @Test - public void testNullDurationDeserialization() throws Exception { - String json = "{\"duration\":null}"; - ObjectMapper mapper = new ObjectMapper(); - TestClass obj = mapper.readValue(json, TestClass.class); - assertNull(obj.getDuration()); + static Stream durationDeserializationTestCases() { + return Stream.of( + // Duration with seconds and nanos + Arguments.of( + "{\"duration\":\"3.500s\"}", + Duration.newBuilder().setSeconds(3).setNanos(500000000).build()), + // Duration with only seconds + Arguments.of("{\"duration\":\"5s\"}", Duration.newBuilder().setSeconds(5).build()), + // Duration with only nanos + Arguments.of( + "{\"duration\":\"0.123456789s\"}", Duration.newBuilder().setNanos(123456789).build()), + // Null duration + Arguments.of("{\"duration\":null}", null)); } } diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationSerializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationSerializerTest.java index 8a71993c7..f703972a1 100644 --- a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationSerializerTest.java +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/DurationSerializerTest.java @@ -5,7 +5,10 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.google.protobuf.Duration; -import org.junit.jupiter.api.Test; +import java.util.stream.Stream; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; public class DurationSerializerTest { @@ -18,56 +21,29 @@ public TestClass(Duration duration) { } } - @Test - public void testDurationSerialization() throws Exception { - // Create a Duration of 3 seconds - Duration duration = Duration.newBuilder().setSeconds(3).build(); - - TestClass testObject = new TestClass(duration); - - ObjectMapper mapper = new ObjectMapper(); - String json = mapper.writeValueAsString(testObject); - - // The Duration should be serialized as "3s" by Durations.toString() - assertEquals("{\"duration\":\"3s\"}", json); - } - - @Test - public void testDurationSerializationWithNanos() throws Exception { - // Create a Duration of 3.5 seconds (3 seconds + 500000000 nanoseconds) - Duration duration = Duration.newBuilder().setSeconds(3).setNanos(500000000).build(); - + @ParameterizedTest + @MethodSource("durationSerializationTestCases") + public void testDurationSerialization(Duration duration, String expectedJson) throws Exception { TestClass testObject = new TestClass(duration); - ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(testObject); - - // The Duration should be serialized as "3.500s" by Durations.toString() - assertEquals("{\"duration\":\"3.500s\"}", json); + assertEquals(expectedJson, json); } - @Test - public void testNullDurationSerialization() throws Exception { - TestClass testObject = new TestClass(null); - - ObjectMapper mapper = new ObjectMapper(); - String json = mapper.writeValueAsString(testObject); - - // Null duration should be serialized as null - assertEquals("{\"duration\":null}", json); - } - - @Test - public void testZeroDurationSerialization() throws Exception { - // Create a Duration of 0 seconds - Duration duration = Duration.newBuilder().setSeconds(0).build(); - - TestClass testObject = new TestClass(duration); - - ObjectMapper mapper = new ObjectMapper(); - String json = mapper.writeValueAsString(testObject); - - // The Duration should be serialized as "0s" by Durations.toString() - assertEquals("{\"duration\":\"0s\"}", json); + static Stream durationSerializationTestCases() { + return Stream.of( + // Duration of 3 seconds + Arguments.of(Duration.newBuilder().setSeconds(3).build(), "{\"duration\":\"3s\"}"), + // Duration of 3.5 seconds (3 seconds + 500000000 nanoseconds) + Arguments.of( + Duration.newBuilder().setSeconds(3).setNanos(500000000).build(), + "{\"duration\":\"3.500s\"}"), + // Duration of 0 seconds + Arguments.of(Duration.newBuilder().setSeconds(0).build(), "{\"duration\":\"0s\"}"), + // Duration with only nanos + Arguments.of( + Duration.newBuilder().setNanos(123456789).build(), "{\"duration\":\"0.123456789s\"}"), + // Null duration + Arguments.of(null, "{\"duration\":null}")); } } diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskDeserializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskDeserializerTest.java index 20f69f45c..0fe41e2a6 100644 --- a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskDeserializerTest.java +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskDeserializerTest.java @@ -6,7 +6,12 @@ import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.google.protobuf.FieldMask; import com.google.protobuf.util.FieldMaskUtil; -import org.junit.jupiter.api.Test; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Stream; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; public class FieldMaskDeserializerTest { private static class TestClass { @@ -18,21 +23,32 @@ public FieldMask getFieldMask() { } } - @Test - public void testFieldMaskDeserialization() throws Exception { - String json = "{\"fieldMask\":\"foo,bar.baz\"}"; + @ParameterizedTest + @MethodSource("fieldMaskDeserializationTestCases") + public void testFieldMaskDeserialization(String inputJson, List expectedFieldPaths) + throws Exception { ObjectMapper mapper = new ObjectMapper(); - TestClass obj = mapper.readValue(json, TestClass.class); - assertEquals( - FieldMaskUtil.fromStringList(java.util.Arrays.asList("foo", "bar.baz")), - obj.getFieldMask()); + TestClass obj = mapper.readValue(inputJson, TestClass.class); + + if (expectedFieldPaths == null) { + assertNull(obj.getFieldMask()); + } else { + FieldMask expected = FieldMaskUtil.fromStringList(expectedFieldPaths); + assertEquals(expected, obj.getFieldMask()); + } } - @Test - public void testNullFieldMaskDeserialization() throws Exception { - String json = "{\"fieldMask\":null}"; - ObjectMapper mapper = new ObjectMapper(); - TestClass obj = mapper.readValue(json, TestClass.class); - assertNull(obj.getFieldMask()); + static Stream fieldMaskDeserializationTestCases() { + return Stream.of( + // Simple field mask + Arguments.of("{\"fieldMask\":\"foo,bar.baz\"}", Arrays.asList("foo", "bar.baz")), + // Single field + Arguments.of("{\"fieldMask\":\"name\"}", Arrays.asList("name")), + // Nested fields + Arguments.of( + "{\"fieldMask\":\"user.profile.email,user.profile.name\"}", + Arrays.asList("user.profile.email", "user.profile.name")), + // Null field mask + Arguments.of("{\"fieldMask\":null}", null)); } } diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskSerializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskSerializerTest.java index 81da81467..83baf0936 100644 --- a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskSerializerTest.java +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/FieldMaskSerializerTest.java @@ -6,7 +6,12 @@ import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.google.protobuf.FieldMask; import com.google.protobuf.util.FieldMaskUtil; -import org.junit.jupiter.api.Test; +import java.util.Arrays; +import java.util.List; +import java.util.stream.Stream; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; public class FieldMaskSerializerTest { private static class TestClass { @@ -18,20 +23,32 @@ public TestClass(FieldMask fieldMask) { } } - @Test - public void testFieldMaskSerialization() throws Exception { - FieldMask mask = FieldMaskUtil.fromStringList(java.util.Arrays.asList("foo", "bar.baz")); - TestClass testObject = new TestClass(mask); + @ParameterizedTest + @MethodSource("fieldMaskSerializationTestCases") + public void testFieldMaskSerialization(List fieldPaths, String expectedJson) + throws Exception { + FieldMask fieldMask = null; + if (fieldPaths != null) { + fieldMask = FieldMaskUtil.fromStringList(fieldPaths); + } + + TestClass testObject = new TestClass(fieldMask); ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(testObject); - assertEquals("{\"fieldMask\":\"foo,bar.baz\"}", json); + assertEquals(expectedJson, json); } - @Test - public void testNullFieldMaskSerialization() throws Exception { - TestClass testObject = new TestClass(null); - ObjectMapper mapper = new ObjectMapper(); - String json = mapper.writeValueAsString(testObject); - assertEquals("{\"fieldMask\":null}", json); + static Stream fieldMaskSerializationTestCases() { + return Stream.of( + // Simple field mask + Arguments.of(Arrays.asList("foo", "bar.baz"), "{\"fieldMask\":\"foo,bar.baz\"}"), + // Single field + Arguments.of(Arrays.asList("name"), "{\"fieldMask\":\"name\"}"), + // Nested fields + Arguments.of( + Arrays.asList("user.profile.email", "user.profile.name"), + "{\"fieldMask\":\"user.profile.email,user.profile.name\"}"), + // Null field mask + Arguments.of(null, "{\"fieldMask\":null}")); } } diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampDeserializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampDeserializerTest.java index 88c03ea2a..270bd56bd 100644 --- a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampDeserializerTest.java +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampDeserializerTest.java @@ -5,8 +5,10 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.annotation.JsonDeserialize; import com.google.protobuf.Timestamp; -import com.google.protobuf.util.Timestamps; -import org.junit.jupiter.api.Test; +import java.util.stream.Stream; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; public class TimestampDeserializerTest { private static class TestClass { @@ -18,28 +20,26 @@ public Timestamp getTimestamp() { } } - @Test - public void testTimestampDeserialization() throws Exception { - String json = "{\"timestamp\":\"2024-06-20T12:34:56Z\"}"; + @ParameterizedTest + @MethodSource("timestampDeserializationTestCases") + public void testTimestampDeserialization(String inputJson, Timestamp expectedTimestamp) + throws Exception { ObjectMapper mapper = new ObjectMapper(); - TestClass obj = mapper.readValue(json, TestClass.class); - assertEquals(Timestamps.parse("2024-06-20T12:34:56Z"), obj.getTimestamp()); + TestClass obj = mapper.readValue(inputJson, TestClass.class); + assertEquals(expectedTimestamp, obj.getTimestamp()); } - @Test - public void testNullTimestampDeserialization() throws Exception { - String json = "{\"timestamp\":null}"; - ObjectMapper mapper = new ObjectMapper(); - TestClass obj = mapper.readValue(json, TestClass.class); - assertNull(obj.getTimestamp()); - } - - @Test - public void testTimestampDeserializationWithNanos() throws Exception { - String json = "{\"timestamp\":\"2024-06-20T12:34:56.123456789Z\"}"; - ObjectMapper mapper = new ObjectMapper(); - TestClass obj = mapper.readValue(json, TestClass.class); - Timestamp expected = Timestamp.newBuilder().setSeconds(1718886896L).setNanos(123456789).build(); - assertEquals(expected, obj.getTimestamp()); + static Stream timestampDeserializationTestCases() { + return Stream.of( + // Timestamp without nanos + Arguments.of( + "{\"timestamp\":\"2024-06-20T12:34:56Z\"}", + Timestamp.newBuilder().setSeconds(1718886896L).build()), + // Timestamp with nanos + Arguments.of( + "{\"timestamp\":\"2024-06-20T12:34:56.123456789Z\"}", + Timestamp.newBuilder().setSeconds(1718886896L).setNanos(123456789).build()), + // Null timestamp + Arguments.of("{\"timestamp\":null}", null)); } } diff --git a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampSerializerTest.java b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampSerializerTest.java index 4433ca506..05b9c567d 100644 --- a/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampSerializerTest.java +++ b/databricks-sdk-java/src/test/java/com/databricks/sdk/core/serialization/TimestampSerializerTest.java @@ -5,8 +5,10 @@ import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.google.protobuf.Timestamp; -import com.google.protobuf.util.Timestamps; -import org.junit.jupiter.api.Test; +import java.util.stream.Stream; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; public class TimestampSerializerTest { private static class TestClass { @@ -18,34 +20,27 @@ public TestClass(Timestamp timestamp) { } } - @Test - public void testTimestampSerialization() throws Exception { - Timestamp ts = Timestamps.parse("2024-06-20T12:34:56Z"); - TestClass testObject = new TestClass(ts); + @ParameterizedTest + @MethodSource("timestampSerializationTestCases") + public void testTimestampSerialization(Timestamp timestamp, String expectedJson) + throws Exception { + TestClass testObject = new TestClass(timestamp); ObjectMapper mapper = new ObjectMapper(); String json = mapper.writeValueAsString(testObject); - assertEquals("{\"timestamp\":\"2024-06-20T12:34:56Z\"}", json); + assertEquals(expectedJson, json); } - @Test - public void testNullTimestampSerialization() throws Exception { - TestClass testObject = new TestClass(null); - ObjectMapper mapper = new ObjectMapper(); - String json = mapper.writeValueAsString(testObject); - assertEquals("{\"timestamp\":null}", json); - } - - @Test - public void testTimestampSerializationWithNanos() throws Exception { - Timestamp ts = - Timestamp.newBuilder() - .setSeconds(1718886896L) // 2024-06-20T12:34:56Z - .setNanos(123456789) - .build(); - TestClass testObject = new TestClass(ts); - ObjectMapper mapper = new ObjectMapper(); - String json = mapper.writeValueAsString(testObject); - // Should match the RFC 3339 format with fractional seconds - assertEquals("{\"timestamp\":\"2024-06-20T12:34:56.123456789Z\"}", json); + static Stream timestampSerializationTestCases() { + return Stream.of( + // Basic timestamp without nanos + Arguments.of( + Timestamp.newBuilder().setSeconds(1718886896L).build(), + "{\"timestamp\":\"2024-06-20T12:34:56Z\"}"), + // Timestamp with nanos + Arguments.of( + Timestamp.newBuilder().setSeconds(1718886896L).setNanos(123456789).build(), + "{\"timestamp\":\"2024-06-20T12:34:56.123456789Z\"}"), + // Null timestamp + Arguments.of(null, "{\"timestamp\":null}")); } } From 8ac621fc37d80d99489f7a5aacc8b14f78275eb9 Mon Sep 17 00:00:00 2001 From: Hector Castejon Diaz Date: Fri, 20 Jun 2025 08:05:23 +0000 Subject: [PATCH 8/8] fmt --- .../java/com/databricks/sdk/core/DatabricksConfig.java | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java index df16ebae3..de6548982 100644 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/core/DatabricksConfig.java @@ -410,17 +410,13 @@ public DatabricksConfig setAzureUseMsi(boolean azureUseMsi) { return this; } - /** - * @deprecated Use {@link #getAzureUseMsi()} instead. - */ + /** @deprecated Use {@link #getAzureUseMsi()} instead. */ @Deprecated() public boolean getAzureUseMSI() { return azureUseMsi; } - /** - * @deprecated Use {@link #setAzureUseMsi(boolean)} instead. - */ + /** @deprecated Use {@link #setAzureUseMsi(boolean)} instead. */ @Deprecated public DatabricksConfig setAzureUseMSI(boolean azureUseMsi) { this.azureUseMsi = azureUseMsi;