@@ -17,6 +17,9 @@ syntax = "proto3";
1717package google.bigtable.v2 ;
1818
1919import "google/api/field_behavior.proto" ;
20+ import "google/bigtable/v2/types.proto" ;
21+ import "google/protobuf/timestamp.proto" ;
22+ import "google/type/date.proto" ;
2023
2124option csharp_namespace = "Google.Cloud.Bigtable.V2" ;
2225option go_package = "google.golang.org/genproto/googleapis/bigtable/v2;bigtable" ;
@@ -92,6 +95,21 @@ message Cell {
9295// value (which may be of a more complex type). See the documentation of the
9396// `Type` message for more details.
9497message Value {
98+ // The verified `Type` of this `Value`, if it cannot be inferred.
99+ //
100+ // Read results will never specify the encoding for `type` since the value
101+ // will already have been decoded by the server. Furthermore, the `type` will
102+ // be omitted entirely if it can be inferred from a previous response. The
103+ // exact semantics for inferring `type` will vary, and are therefore
104+ // documented separately for each read method.
105+ //
106+ // When using composite types (Struct, Array, Map) only the outermost `Value`
107+ // will specify the `type`. This top-level `type` will define the types for
108+ // any nested `Struct' fields, `Array` elements, or `Map` key/value pairs.
109+ // If a nested `Value` provides a `type` on write, the request will be
110+ // rejected with INVALID_ARGUMENT.
111+ Type type = 7 ;
112+
95113 // Options for transporting values within the protobuf type system. A given
96114 // `kind` may support more than one `type` and vice versa. On write, this is
97115 // roughly analogous to a GoogleSQL literal.
@@ -107,12 +125,42 @@ message Value {
107125 // The `type` field must be omitted.
108126 int64 raw_timestamp_micros = 9 ;
109127
128+ // Represents a typed value transported as a byte sequence.
129+ bytes bytes_value = 2 ;
130+
131+ // Represents a typed value transported as a string.
132+ string string_value = 3 ;
133+
110134 // Represents a typed value transported as an integer.
111- // Default type for writes: `Int64`
112135 int64 int_value = 6 ;
136+
137+ // Represents a typed value transported as a boolean.
138+ bool bool_value = 10 ;
139+
140+ // Represents a typed value transported as a floating point number.
141+ double float_value = 11 ;
142+
143+ // Represents a typed value transported as a timestamp.
144+ google.protobuf.Timestamp timestamp_value = 12 ;
145+
146+ // Represents a typed value transported as a date.
147+ google.type.Date date_value = 13 ;
148+
149+ // Represents a typed value transported as a sequence of values.
150+ // To differentiate between `Struct`, `Array`, and `Map`, the outermost
151+ // `Value` must provide an explicit `type` on write. This `type` will
152+ // apply recursively to the nested `Struct` fields, `Array` elements,
153+ // or `Map` key/value pairs, which *must not* supply their own `type`.
154+ ArrayValue array_value = 4 ;
113155 }
114156}
115157
158+ // `ArrayValue` is an ordered list of `Value`.
159+ message ArrayValue {
160+ // The ordered elements in the array.
161+ repeated Value values = 1 ;
162+ }
163+
116164// Specifies a contiguous range of rows.
117165message RowRange {
118166 // The row key at which to start the range.
@@ -609,3 +657,84 @@ message StreamContinuationToken {
609657 // An encoded position in the stream to restart reading from.
610658 string token = 2 ;
611659}
660+
661+ // Protocol buffers format descriptor, as described by Messages ProtoSchema and
662+ // ProtoRows
663+ message ProtoFormat {}
664+
665+ // Describes a column in a Bigtable Query Language result set.
666+ message ColumnMetadata {
667+ // The name of the column.
668+ string name = 1 ;
669+
670+ // The type of the column.
671+ Type type = 2 ;
672+ }
673+
674+ // ResultSet schema in proto format
675+ message ProtoSchema {
676+ // The columns in the result set.
677+ repeated ColumnMetadata columns = 1 ;
678+ }
679+
680+ // Describes the structure of a Bigtable result set.
681+ message ResultSetMetadata {
682+ // The schema of the ResultSet, contains ordered list of column names
683+ // with types
684+ oneof schema {
685+ // Schema in proto format
686+ ProtoSchema proto_schema = 1 ;
687+ }
688+ }
689+
690+ // Batch of serialized ProtoRows.
691+ message ProtoRowsBatch {
692+ // Merge partial results by concatenating these bytes, then parsing the
693+ // overall value as a `ProtoRows` message.
694+ bytes batch_data = 1 ;
695+ }
696+
697+ // A partial result set from the streaming query API.
698+ // CBT client will buffer partial_rows from result_sets until it gets a
699+ // resumption_token.
700+ message PartialResultSet {
701+ // Partial Rows in one of the supported formats. It may require many
702+ // PartialResultSets to stream a batch of rows that can decoded on the client.
703+ // The client should buffer partial_rows until it gets a `resume_token`,
704+ // at which point the batch is complete and can be decoded and yielded to the
705+ // user. Each sub-message documents the appropriate way to combine results.
706+ oneof partial_rows {
707+ // Partial rows in serialized ProtoRows format.
708+ ProtoRowsBatch proto_rows_batch = 3 ;
709+ }
710+
711+ // An opaque token sent by the server to allow query resumption and signal
712+ // the client to accumulate `partial_rows` since the last non-empty
713+ // `resume_token`. On resumption, the resumed query will return the remaining
714+ // rows for this query.
715+ //
716+ // If there is a batch in progress, a non-empty `resume_token`
717+ // means that that the batch of `partial_rows` will be complete after merging
718+ // the `partial_rows` from this response. The client must only yield
719+ // completed batches to the application, and must ensure that any future
720+ // retries send the latest token to avoid returning duplicate data.
721+ //
722+ // The server may set 'resume_token' without a 'partial_rows'. If there is a
723+ // batch in progress the client should yield it.
724+ //
725+ // The server will also send a sentinel `resume_token` when last batch of
726+ // `partial_rows` is sent. If the client retries the ExecuteQueryRequest with
727+ // the sentinel `resume_token`, the server will emit it again without any
728+ // `partial_rows`, then return OK.
729+ bytes resume_token = 5 ;
730+
731+ // Estimated size of a new batch. The server will always set this when
732+ // returning the first `partial_rows` of a batch, and will not set it at any
733+ // other time.
734+ //
735+ // The client can use this estimate to allocate an initial buffer for the
736+ // batched results. This helps minimize the number of allocations required,
737+ // though the buffer size may still need to be increased if the estimate is
738+ // too low.
739+ int32 estimated_batch_size = 4 ;
740+ }
0 commit comments