bigtable.proto 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321
  1. // Copyright 2016 Google Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.bigtable.v2;
  16. import "google/api/annotations.proto";
  17. import "google/bigtable/v2/data.proto";
  18. import "google/protobuf/wrappers.proto";
  19. import "google/rpc/status.proto";
  20. option java_multiple_files = true;
  21. option java_outer_classname = "BigtableProto";
  22. option java_package = "com.google.bigtable.v2";
  23. // Service for reading from and writing to existing Bigtable tables.
  24. service Bigtable {
  25. // Streams back the contents of all requested rows, optionally
  26. // applying the same Reader filter to each. Depending on their size,
  27. // rows and cells may be broken up across multiple responses, but
  28. // atomicity of each row will still be preserved. See the
  29. // ReadRowsResponse documentation for details.
  30. rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
  31. option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readRows" body: "*" };
  32. }
  33. // Returns a sample of row keys in the table. The returned row keys will
  34. // delimit contiguous sections of the table of approximately equal size,
  35. // which can be used to break up the data for distributed tasks like
  36. // mapreduces.
  37. rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) {
  38. option (google.api.http) = { get: "/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" };
  39. }
  40. // Mutates a row atomically. Cells already present in the row are left
  41. // unchanged unless explicitly changed by `mutation`.
  42. rpc MutateRow(MutateRowRequest) returns (MutateRowResponse) {
  43. option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" body: "*" };
  44. }
  45. // Mutates multiple rows in a batch. Each individual row is mutated
  46. // atomically as in MutateRow, but the entire batch is not executed
  47. // atomically.
  48. rpc MutateRows(MutateRowsRequest) returns (stream MutateRowsResponse) {
  49. option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" body: "*" };
  50. }
  51. // Mutates a row atomically based on the output of a predicate Reader filter.
  52. rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {
  53. option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" body: "*" };
  54. }
  55. // Modifies a row atomically. The method reads the latest existing timestamp
  56. // and value from the specified columns and writes a new entry based on
  57. // pre-defined read/modify/write rules. The new value for the timestamp is the
  58. // greater of the existing timestamp or the current server time. The method
  59. // returns the new contents of all modified cells.
  60. rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (ReadModifyWriteRowResponse) {
  61. option (google.api.http) = { post: "/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" body: "*" };
  62. }
  63. }
  64. // Request message for Bigtable.ReadRows.
  65. message ReadRowsRequest {
  66. // The unique name of the table from which to read.
  67. // Values are of the form
  68. // projects/<project>/instances/<instance>/tables/<table>
  69. string table_name = 1;
  70. // The row keys and/or ranges to read. If not specified, reads from all rows.
  71. RowSet rows = 2;
  72. // The filter to apply to the contents of the specified row(s). If unset,
  73. // reads the entirety of each row.
  74. RowFilter filter = 3;
  75. // The read will terminate after committing to N rows' worth of results. The
  76. // default (zero) is to return all results.
  77. int64 rows_limit = 4;
  78. }
  79. // Response message for Bigtable.ReadRows.
  80. message ReadRowsResponse {
  81. // Specifies a piece of a row's contents returned as part of the read
  82. // response stream.
  83. message CellChunk {
  84. // The row key for this chunk of data. If the row key is empty,
  85. // this CellChunk is a continuation of the same row as the previous
  86. // CellChunk in the response stream, even if that CellChunk was in a
  87. // previous ReadRowsResponse message.
  88. bytes row_key = 1;
  89. // The column family name for this chunk of data. If this message
  90. // is not present this CellChunk is a continuation of the same column
  91. // family as the previous CellChunk. The empty string can occur as a
  92. // column family name in a response so clients must check
  93. // explicitly for the presence of this message, not just for
  94. // `family_name.value` being non-empty.
  95. google.protobuf.StringValue family_name = 2;
  96. // The column qualifier for this chunk of data. If this message
  97. // is not present, this CellChunk is a continuation of the same column
  98. // as the previous CellChunk. Column qualifiers may be empty so
  99. // clients must check for the presence of this message, not just
  100. // for `qualifier.value` being non-empty.
  101. google.protobuf.BytesValue qualifier = 3;
  102. // The cell's stored timestamp, which also uniquely identifies it
  103. // within its column. Values are always expressed in
  104. // microseconds, but individual tables may set a coarser
  105. // granularity to further restrict the allowed values. For
  106. // example, a table which specifies millisecond granularity will
  107. // only allow values of `timestamp_micros` which are multiples of
  108. // 1000. Timestamps are only set in the first CellChunk per cell
  109. // (for cells split into multiple chunks).
  110. int64 timestamp_micros = 4;
  111. // Labels applied to the cell by a
  112. // [RowFilter][google.bigtable.v2.RowFilter]. Labels are only set
  113. // on the first CellChunk per cell.
  114. repeated string labels = 5;
  115. // The value stored in the cell. Cell values can be split across
  116. // multiple CellChunks. In that case only the value field will be
  117. // set in CellChunks after the first: the timestamp and labels
  118. // will only be present in the first CellChunk, even if the first
  119. // CellChunk came in a previous ReadRowsResponse.
  120. bytes value = 6;
  121. // If this CellChunk is part of a chunked cell value and this is
  122. // not the final chunk of that cell, value_size will be set to the
  123. // total length of the cell value. The client can use this size
  124. // to pre-allocate memory to hold the full cell value.
  125. int32 value_size = 7;
  126. oneof row_status {
  127. // Indicates that the client should drop all previous chunks for
  128. // `row_key`, as it will be re-read from the beginning.
  129. bool reset_row = 8;
  130. // Indicates that the client can safely process all previous chunks for
  131. // `row_key`, as its data has been fully read.
  132. bool commit_row = 9;
  133. }
  134. }
  135. repeated CellChunk chunks = 1;
  136. // Optionally the server might return the row key of the last row it
  137. // has scanned. The client can use this to construct a more
  138. // efficient retry request if needed: any row keys or portions of
  139. // ranges less than this row key can be dropped from the request.
  140. // This is primarily useful for cases where the server has read a
  141. // lot of data that was filtered out since the last committed row
  142. // key, allowing the client to skip that work on a retry.
  143. bytes last_scanned_row_key = 2;
  144. }
  145. // Request message for Bigtable.SampleRowKeys.
  146. message SampleRowKeysRequest {
  147. // The unique name of the table from which to sample row keys.
  148. // Values are of the form
  149. // projects/<project>/instances/<instance>/tables/<table>
  150. string table_name = 1;
  151. }
  152. // Response message for Bigtable.SampleRowKeys.
  153. message SampleRowKeysResponse {
  154. // Sorted streamed sequence of sample row keys in the table. The table might
  155. // have contents before the first row key in the list and after the last one,
  156. // but a key containing the empty string indicates "end of table" and will be
  157. // the last response given, if present.
  158. // Note that row keys in this list may not have ever been written to or read
  159. // from, and users should therefore not make any assumptions about the row key
  160. // structure that are specific to their use case.
  161. bytes row_key = 1;
  162. // Approximate total storage space used by all rows in the table which precede
  163. // `row_key`. Buffering the contents of all rows between two subsequent
  164. // samples would require space roughly equal to the difference in their
  165. // `offset_bytes` fields.
  166. int64 offset_bytes = 2;
  167. }
  168. // Request message for Bigtable.MutateRow.
  169. message MutateRowRequest {
  170. // The unique name of the table to which the mutation should be applied.
  171. // Values are of the form
  172. // projects/<project>/instances/<instance>/tables/<table>
  173. string table_name = 1;
  174. // The key of the row to which the mutation should be applied.
  175. bytes row_key = 2;
  176. // Changes to be atomically applied to the specified row. Entries are applied
  177. // in order, meaning that earlier mutations can be masked by later ones.
  178. // Must contain at least one entry and at most 100000.
  179. repeated Mutation mutations = 3;
  180. }
  181. // Response message for Bigtable.MutateRow.
  182. message MutateRowResponse {
  183. }
  184. // Request message for BigtableService.MutateRows.
  185. message MutateRowsRequest {
  186. message Entry {
  187. // The key of the row to which the `mutations` should be applied.
  188. bytes row_key = 1;
  189. // Changes to be atomically applied to the specified row. Mutations are
  190. // applied in order, meaning that earlier mutations can be masked by
  191. // later ones.
  192. // You must specify at least one mutation.
  193. repeated Mutation mutations = 2;
  194. }
  195. // The unique name of the table to which the mutations should be applied.
  196. string table_name = 1;
  197. // The row keys and corresponding mutations to be applied in bulk.
  198. // Each entry is applied as an atomic mutation, but the entries may be
  199. // applied in arbitrary order (even between entries for the same row).
  200. // At least one entry must be specified, and in total the entries can
  201. // contain at most 100000 mutations.
  202. repeated Entry entries = 2;
  203. }
  204. // Response message for BigtableService.MutateRows.
  205. message MutateRowsResponse {
  206. message Entry {
  207. // The index into the original request's `entries` list of the Entry
  208. // for which a result is being reported.
  209. int64 index = 1;
  210. // The result of the request Entry identified by `index`.
  211. // Depending on how requests are batched during execution, it is possible
  212. // for one Entry to fail due to an error with another Entry. In the event
  213. // that this occurs, the same error will be reported for both entries.
  214. google.rpc.Status status = 2;
  215. }
  216. // One or more results for Entries from the batch request.
  217. repeated Entry entries = 1;
  218. }
  219. // Request message for Bigtable.CheckAndMutateRow.
  220. message CheckAndMutateRowRequest {
  221. // The unique name of the table to which the conditional mutation should be
  222. // applied.
  223. // Values are of the form
  224. // projects/<project>/instances/<instance>/tables/<table>
  225. string table_name = 1;
  226. // The key of the row to which the conditional mutation should be applied.
  227. bytes row_key = 2;
  228. // The filter to be applied to the contents of the specified row. Depending
  229. // on whether or not any results are yielded, either `true_mutations` or
  230. // `false_mutations` will be executed. If unset, checks that the row contains
  231. // any values at all.
  232. RowFilter predicate_filter = 6;
  233. // Changes to be atomically applied to the specified row if `predicate_filter`
  234. // yields at least one cell when applied to `row_key`. Entries are applied in
  235. // order, meaning that earlier mutations can be masked by later ones.
  236. // Must contain at least one entry if `false_mutations` is empty, and at most
  237. // 100000.
  238. repeated Mutation true_mutations = 4;
  239. // Changes to be atomically applied to the specified row if `predicate_filter`
  240. // does not yield any cells when applied to `row_key`. Entries are applied in
  241. // order, meaning that earlier mutations can be masked by later ones.
  242. // Must contain at least one entry if `true_mutations` is empty, and at most
  243. // 100000.
  244. repeated Mutation false_mutations = 5;
  245. }
  246. // Response message for Bigtable.CheckAndMutateRow.
  247. message CheckAndMutateRowResponse {
  248. // Whether or not the request's `predicate_filter` yielded any results for
  249. // the specified row.
  250. bool predicate_matched = 1;
  251. }
  252. // Request message for Bigtable.ReadModifyWriteRow.
  253. message ReadModifyWriteRowRequest {
  254. // The unique name of the table to which the read/modify/write rules should be
  255. // applied.
  256. // Values are of the form
  257. // projects/<project>/instances/<instance>/tables/<table>
  258. string table_name = 1;
  259. // The key of the row to which the read/modify/write rules should be applied.
  260. bytes row_key = 2;
  261. // Rules specifying how the specified row's contents are to be transformed
  262. // into writes. Entries are applied in order, meaning that earlier rules will
  263. // affect the results of later ones.
  264. repeated ReadModifyWriteRule rules = 3;
  265. }
  266. // Response message for Bigtable.ReadModifyWriteRow.
  267. message ReadModifyWriteRowResponse {
  268. // A Row containing the new contents of all cells modified by the request.
  269. Row row = 1;
  270. }