Skip to content

Commit 2b006af

Browse files
Google APIscopybara-github
authored andcommitted
feat: add ResourceExhausted to retryable error for Write API unary calls
docs: add multiplexing documentation PiperOrigin-RevId: 545839491
1 parent 2e20c05 commit 2b006af

3 files changed

Lines changed: 45 additions & 19 deletions

File tree

google/cloud/bigquery/storage/v1/bigquerystorage_grpc_service_config.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,7 +96,8 @@
9696
"backoffMultiplier": 1.3,
9797
"retryableStatusCodes": [
9898
"DEADLINE_EXCEEDED",
99-
"UNAVAILABLE"
99+
"UNAVAILABLE",
100+
"RESOURCE_EXHAUSTED"
100101
]
101102
}
102103
}, {

google/cloud/bigquery/storage/v1/storage.proto

Lines changed: 39 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -397,19 +397,25 @@ message CreateWriteStreamRequest {
397397

398398
// Request message for `AppendRows`.
399399
//
400-
// Due to the nature of AppendRows being a bidirectional streaming RPC, certain
401-
// parts of the AppendRowsRequest need only be specified for the first request
402-
// sent each time the gRPC network connection is opened/reopened.
400+
// Because AppendRows is a bidirectional streaming RPC, certain parts of the
401+
// AppendRowsRequest need only be specified for the first request before
402+
// switching table destinations. You can also switch table destinations within
403+
// the same connection for the default stream.
403404
//
404405
// The size of a single AppendRowsRequest must be less than 10 MB in size.
405406
// Requests larger than this return an error, typically `INVALID_ARGUMENT`.
406407
message AppendRowsRequest {
407408
// ProtoData contains the data rows and schema when constructing append
408409
// requests.
409410
message ProtoData {
410-
// Proto schema used to serialize the data. This value only needs to be
411-
// provided as part of the first request on a gRPC network connection,
412-
// and will be ignored for subsequent requests on the connection.
411+
// The protocol buffer schema used to serialize the data. Provide this value
412+
// whenever:
413+
//
414+
// * You send the first request of an RPC connection.
415+
//
416+
// * You change the input schema.
417+
//
418+
// * You specify a new destination table.
413419
ProtoSchema writer_schema = 1;
414420

415421
// Serialized row data in protobuf message format.
@@ -419,10 +425,9 @@ message AppendRowsRequest {
419425
ProtoRows rows = 2;
420426
}
421427

422-
// An enum to indicate how to interpret missing values. Missing values are
423-
// fields present in user schema but missing in rows. A missing value can
424-
// represent a NULL or a column default value defined in BigQuery table
425-
// schema.
428+
// An enum to indicate how to interpret missing values of fields that are
429+
// present in user schema but missing in rows. A missing value can represent a
430+
// NULL or a column default value defined in BigQuery table schema.
426431
enum MissingValueInterpretation {
427432
// Invalid missing value interpretation. Requests with this value will be
428433
// rejected.
@@ -436,10 +441,14 @@ message AppendRowsRequest {
436441
DEFAULT_VALUE = 2;
437442
}
438443

439-
// Required. The write_stream identifies the target of the append operation,
440-
// and only needs to be specified as part of the first request on the gRPC
441-
// connection. If provided for subsequent requests, it must match the value of
442-
// the first request.
444+
// Required. The write_stream identifies the append operation. It must be
445+
// provided in the following scenarios:
446+
//
447+
// * In the first request to an AppendRows connection.
448+
//
449+
// * In all subsequent requests to an AppendRows connection, if you use the
450+
// same connection to write to multiple tables or change the input schema for
451+
// default streams.
443452
//
444453
// For explicitly created write streams, the format is:
445454
//
@@ -448,6 +457,22 @@ message AppendRowsRequest {
448457
// For the special default stream, the format is:
449458
//
450459
// * `projects/{project}/datasets/{dataset}/tables/{table}/streams/_default`.
460+
//
461+
// An example of a possible sequence of requests with write_stream fields
462+
// within a single connection:
463+
//
464+
// * r1: {write_stream: stream_name_1}
465+
//
466+
// * r2: {write_stream: /*omit*/}
467+
//
468+
// * r3: {write_stream: /*omit*/}
469+
//
470+
// * r4: {write_stream: stream_name_2}
471+
//
472+
// * r5: {write_stream: stream_name_2}
473+
//
474+
// The destination changed in request_4, so the write_stream field must be
475+
// populated in all subsequent requests in this stream.
451476
string write_stream = 1 [
452477
(google.api.field_behavior) = REQUIRED,
453478
(google.api.resource_reference) = {

google/cloud/bigquery/storage/v1/stream.proto

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -194,10 +194,10 @@ message ReadSession {
194194
int64 estimated_total_bytes_scanned = 12
195195
[(google.api.field_behavior) = OUTPUT_ONLY];
196196

197-
// Output only. A pre-projected estimate of the total physical size (in bytes)
198-
// of files this session will scan when all streams are completely consumed.
199-
// This estimate does not depend on the selected columns and can be based on
200-
// metadata from the table which might be incomplete or stale. Only set for
197+
// Output only. A pre-projected estimate of the total physical size of files
198+
// (in bytes) that this session will scan when all streams are consumed. This
199+
// estimate is independent of the selected columns and can be based on
200+
// incomplete or stale metadata from the table. This field is only set for
201201
// BigLake tables.
202202
int64 estimated_total_physical_file_size = 15
203203
[(google.api.field_behavior) = OUTPUT_ONLY];

0 commit comments

Comments
 (0)