bigtable_service_messages.proto 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217
  1. // Copyright 2016 Google Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.bigtable.v1;
  16. import "google/bigtable/v1/bigtable_data.proto";
  17. import "google/rpc/status.proto";
  18. option java_multiple_files = true;
  19. option java_outer_classname = "BigtableServiceMessagesProto";
  20. option java_package = "com.google.bigtable.v1";
  21. // Request message for BigtableServer.ReadRows.
  22. message ReadRowsRequest {
  23. // The unique name of the table from which to read.
  24. string table_name = 1;
  25. // If neither row_key nor row_range is set, reads from all rows.
  26. oneof target {
  27. // The key of a single row from which to read.
  28. bytes row_key = 2;
  29. // A range of rows from which to read.
  30. RowRange row_range = 3;
  31. // A set of rows from which to read. Entries need not be in order, and will
  32. // be deduplicated before reading.
  33. // The total serialized size of the set must not exceed 1MB.
  34. RowSet row_set = 8;
  35. }
  36. // The filter to apply to the contents of the specified row(s). If unset,
  37. // reads the entire table.
  38. RowFilter filter = 5;
  39. // By default, rows are read sequentially, producing results which are
  40. // guaranteed to arrive in increasing row order. Setting
  41. // "allow_row_interleaving" to true allows multiple rows to be interleaved in
  42. // the response stream, which increases throughput but breaks this guarantee,
  43. // and may force the client to use more memory to buffer partially-received
  44. // rows. Cannot be set to true when specifying "num_rows_limit".
  45. bool allow_row_interleaving = 6;
  46. // The read will terminate after committing to N rows' worth of results. The
  47. // default (zero) is to return all results.
  48. // Note that "allow_row_interleaving" cannot be set to true when this is set.
  49. int64 num_rows_limit = 7;
  50. }
  51. // Response message for BigtableService.ReadRows.
  52. message ReadRowsResponse {
  53. // Specifies a piece of a row's contents returned as part of the read
  54. // response stream.
  55. message Chunk {
  56. oneof chunk {
  57. // A subset of the data from a particular row. As long as no "reset_row"
  58. // is received in between, multiple "row_contents" from the same row are
  59. // from the same atomic view of that row, and will be received in the
  60. // expected family/column/timestamp order.
  61. Family row_contents = 1;
  62. // Indicates that the client should drop all previous chunks for
  63. // "row_key", as it will be re-read from the beginning.
  64. bool reset_row = 2;
  65. // Indicates that the client can safely process all previous chunks for
  66. // "row_key", as its data has been fully read.
  67. bool commit_row = 3;
  68. }
  69. }
  70. // The key of the row for which we're receiving data.
  71. // Results will be received in increasing row key order, unless
  72. // "allow_row_interleaving" was specified in the request.
  73. bytes row_key = 1;
  74. // One or more chunks of the row specified by "row_key".
  75. repeated Chunk chunks = 2;
  76. }
  77. // Request message for BigtableService.SampleRowKeys.
  78. message SampleRowKeysRequest {
  79. // The unique name of the table from which to sample row keys.
  80. string table_name = 1;
  81. }
  82. // Response message for BigtableService.SampleRowKeys.
  83. message SampleRowKeysResponse {
  84. // Sorted streamed sequence of sample row keys in the table. The table might
  85. // have contents before the first row key in the list and after the last one,
  86. // but a key containing the empty string indicates "end of table" and will be
  87. // the last response given, if present.
  88. // Note that row keys in this list may not have ever been written to or read
  89. // from, and users should therefore not make any assumptions about the row key
  90. // structure that are specific to their use case.
  91. bytes row_key = 1;
  92. // Approximate total storage space used by all rows in the table which precede
  93. // "row_key". Buffering the contents of all rows between two subsequent
  94. // samples would require space roughly equal to the difference in their
  95. // "offset_bytes" fields.
  96. int64 offset_bytes = 2;
  97. }
  98. // Request message for BigtableService.MutateRow.
  99. message MutateRowRequest {
  100. // The unique name of the table to which the mutation should be applied.
  101. string table_name = 1;
  102. // The key of the row to which the mutation should be applied.
  103. bytes row_key = 2;
  104. // Changes to be atomically applied to the specified row. Entries are applied
  105. // in order, meaning that earlier mutations can be masked by later ones.
  106. // Must contain at least one entry and at most 100000.
  107. repeated Mutation mutations = 3;
  108. }
  109. // Request message for BigtableService.MutateRows.
  110. message MutateRowsRequest {
  111. message Entry {
  112. // The key of the row to which the `mutations` should be applied.
  113. bytes row_key = 1;
  114. // Changes to be atomically applied to the specified row. Mutations are
  115. // applied in order, meaning that earlier mutations can be masked by
  116. // later ones.
  117. // At least one mutation must be specified.
  118. repeated Mutation mutations = 2;
  119. }
  120. // The unique name of the table to which the mutations should be applied.
  121. string table_name = 1;
  122. // The row keys/mutations to be applied in bulk.
  123. // Each entry is applied as an atomic mutation, but the entries may be
  124. // applied in arbitrary order (even between entries for the same row).
  125. // At least one entry must be specified, and in total the entries may
  126. // contain at most 100000 mutations.
  127. repeated Entry entries = 2;
  128. }
  129. // Response message for BigtableService.MutateRows.
  130. message MutateRowsResponse {
  131. // The results for each Entry from the request, presented in the order
  132. // in which the entries were originally given.
  133. // Depending on how requests are batched during execution, it is possible
  134. // for one Entry to fail due to an error with another Entry. In the event
  135. // that this occurs, the same error will be reported for both entries.
  136. repeated google.rpc.Status statuses = 1;
  137. }
  138. // Request message for BigtableService.CheckAndMutateRowRequest
  139. message CheckAndMutateRowRequest {
  140. // The unique name of the table to which the conditional mutation should be
  141. // applied.
  142. string table_name = 1;
  143. // The key of the row to which the conditional mutation should be applied.
  144. bytes row_key = 2;
  145. // The filter to be applied to the contents of the specified row. Depending
  146. // on whether or not any results are yielded, either "true_mutations" or
  147. // "false_mutations" will be executed. If unset, checks that the row contains
  148. // any values at all.
  149. RowFilter predicate_filter = 6;
  150. // Changes to be atomically applied to the specified row if "predicate_filter"
  151. // yields at least one cell when applied to "row_key". Entries are applied in
  152. // order, meaning that earlier mutations can be masked by later ones.
  153. // Must contain at least one entry if "false_mutations" is empty, and at most
  154. // 100000.
  155. repeated Mutation true_mutations = 4;
  156. // Changes to be atomically applied to the specified row if "predicate_filter"
  157. // does not yield any cells when applied to "row_key". Entries are applied in
  158. // order, meaning that earlier mutations can be masked by later ones.
  159. // Must contain at least one entry if "true_mutations" is empty, and at most
  160. // 100000.
  161. repeated Mutation false_mutations = 5;
  162. }
  163. // Response message for BigtableService.CheckAndMutateRowRequest.
  164. message CheckAndMutateRowResponse {
  165. // Whether or not the request's "predicate_filter" yielded any results for
  166. // the specified row.
  167. bool predicate_matched = 1;
  168. }
  169. // Request message for BigtableService.ReadModifyWriteRowRequest.
  170. message ReadModifyWriteRowRequest {
  171. // The unique name of the table to which the read/modify/write rules should be
  172. // applied.
  173. string table_name = 1;
  174. // The key of the row to which the read/modify/write rules should be applied.
  175. bytes row_key = 2;
  176. // Rules specifying how the specified row's contents are to be transformed
  177. // into writes. Entries are applied in order, meaning that earlier rules will
  178. // affect the results of later ones.
  179. repeated ReadModifyWriteRule rules = 3;
  180. }