Skip to content

Commit b348970

Browse files
Google APIscopybara-github
authored andcommitted
feat: add support for Salesforce connections, which are usable only by allowlisted partners
feat: add cloud spanner connection properties - use_data_boost feat: add cloud spanner connection properties - max_parallelism PiperOrigin-RevId: 546084599
1 parent b98e88f commit b348970

1 file changed

Lines changed: 46 additions & 10 deletions

File tree

google/cloud/bigquery/connection/v1/connection.proto

Lines changed: 46 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -263,6 +263,12 @@ message Connection {
263263

264264
// Spark properties.
265265
SparkProperties spark = 23;
266+
267+
// Optional. Salesforce DataCloud properties. This field is intended for
268+
// use only by Salesforce partner projects. This field contains properties
269+
// for your Salesforce DataCloud connection.
270+
SalesforceDataCloudProperties salesforce_data_cloud = 24
271+
[(google.api.field_behavior) = OPTIONAL];
266272
}
267273

268274
// Output only. The creation timestamp of the connection.
@@ -327,23 +333,39 @@ message CloudSpannerProperties {
327333
// If parallelism should be used when reading from Cloud Spanner
328334
bool use_parallelism = 2;
329335

336+
// Allows setting max parallelism per query when executing on Spanner
337+
// independent compute resources. If unspecified, default values of
338+
// parallelism are chosen that are dependent on the Cloud Spanner instance
339+
// configuration.
340+
//
341+
// REQUIRES: `use_parallelism` must be set.
342+
// REQUIRES: Either `use_data_boost` or `use_serverless_analytics` must be
343+
// set.
344+
int32 max_parallelism = 5;
345+
330346
// If the serverless analytics service should be used to read data from Cloud
331347
// Spanner.
332348
// Note: `use_parallelism` must be set when using serverless analytics.
333349
bool use_serverless_analytics = 3;
334350

351+
// If set, the request will be executed via Spanner independent compute
352+
// resources.
353+
// REQUIRES: `use_parallelism` must be set.
354+
//
355+
// NOTE: `use_serverless_analytics` will be deprecated. Prefer
356+
// `use_data_boost` over `use_serverless_analytics`.
357+
bool use_data_boost = 6;
358+
335359
// Optional. Cloud Spanner database role for fine-grained access control.
336-
// A database role is a collection of fine-grained access privileges. Example:
337-
// Admin predefines roles that provides user a set of permissions (SELECT,
338-
// INSERT, ..). The user can then specify a predefined role on a connection to
339-
// execute their Cloud Spanner query. The role is passthrough here. If the
340-
// user is not authorized to use the specified role, they get an error. This
341-
// validation happens on Cloud Spanner.
360+
// The Cloud Spanner admin should have provisioned the database role with
361+
// appropriate permissions, such as `SELECT` and `INSERT`. Other users should
362+
// only use roles provided by their Cloud Spanner admins.
342363
//
343-
// See https://cloud.google.com/spanner/docs/fgac-about for more details.
364+
// For more details, see [About fine-grained access control]
365+
// (https://cloud.google.com/spanner/docs/fgac-about).
344366
//
345-
// REQUIRES: database role name must start with uppercase/lowercase letter
346-
// and only contain uppercase/lowercase letters, numbers, and underscores.
367+
// REQUIRES: The database role name must start with a letter, and can only
368+
// contain letters, numbers, and underscores.
347369
string database_role = 4 [(google.api.field_behavior) = OPTIONAL];
348370
}
349371

@@ -476,7 +498,7 @@ message SparkProperties {
476498
// The service account does not have any permissions associated with it when
477499
// it is created. After creation, customers delegate permissions to the
478500
// service account. When the connection is used in the context of a stored
479-
// procedure for Apache Spark in BigQuery, the service account will be used to
501+
// procedure for Apache Spark in BigQuery, the service account is used to
480502
// connect to the desired resources in Google Cloud.
481503
//
482504
// The account ID is in the form of:
@@ -491,3 +513,17 @@ message SparkProperties {
491513
SparkHistoryServerConfig spark_history_server_config = 4
492514
[(google.api.field_behavior) = OPTIONAL];
493515
}
516+
517+
// Connection properties specific to Salesforce DataCloud. This is intended for
518+
// use only by Salesforce partner projects.
519+
message SalesforceDataCloudProperties {
520+
// The URL to the user's Salesforce DataCloud instance.
521+
string instance_uri = 1;
522+
523+
// Output only. A unique Google-owned and Google-generated service account
524+
// identity for the connection.
525+
string identity = 2 [(google.api.field_behavior) = OUTPUT_ONLY];
526+
527+
// The ID of the user's Salesforce tenant.
528+
string tenant_id = 3;
529+
}

0 commit comments

Comments
 (0)