bigtable_service.proto 3.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. // Copyright 2016 Google Inc.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. syntax = "proto3";
  15. package google.bigtable.v1;
  16. import "google/api/annotations.proto";
  17. import "google/bigtable/v1/bigtable_data.proto";
  18. import "google/bigtable/v1/bigtable_service_messages.proto";
  19. import "google/protobuf/empty.proto";
  20. option java_generic_services = true;
  21. option java_multiple_files = true;
  22. option java_outer_classname = "BigtableServicesProto";
  23. option java_package = "com.google.bigtable.v1";
  24. // Service for reading from and writing to existing Bigtables.
  25. service BigtableService {
  26. // Streams back the contents of all requested rows, optionally applying
  27. // the same Reader filter to each. Depending on their size, rows may be
  28. // broken up across multiple responses, but atomicity of each row will still
  29. // be preserved.
  30. rpc ReadRows(ReadRowsRequest) returns (stream ReadRowsResponse) {
  31. option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:read" body: "*" };
  32. }
  33. // Returns a sample of row keys in the table. The returned row keys will
  34. // delimit contiguous sections of the table of approximately equal size,
  35. // which can be used to break up the data for distributed tasks like
  36. // mapreduces.
  37. rpc SampleRowKeys(SampleRowKeysRequest) returns (stream SampleRowKeysResponse) {
  38. option (google.api.http) = { get: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows:sampleKeys" };
  39. }
  40. // Mutates a row atomically. Cells already present in the row are left
  41. // unchanged unless explicitly changed by 'mutation'.
  42. rpc MutateRow(MutateRowRequest) returns (google.protobuf.Empty) {
  43. option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:mutate" body: "*" };
  44. }
  45. // Mutates multiple rows in a batch. Each individual row is mutated
  46. // atomically as in MutateRow, but the entire batch is not executed
  47. // atomically.
  48. rpc MutateRows(MutateRowsRequest) returns (MutateRowsResponse) {
  49. option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}:mutateRows" body: "*" };
  50. }
  51. // Mutates a row atomically based on the output of a predicate Reader filter.
  52. rpc CheckAndMutateRow(CheckAndMutateRowRequest) returns (CheckAndMutateRowResponse) {
  53. option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:checkAndMutate" body: "*" };
  54. }
  55. // Modifies a row atomically, reading the latest existing timestamp/value from
  56. // the specified columns and writing a new value at
  57. // max(existing timestamp, current server time) based on pre-defined
  58. // read/modify/write rules. Returns the new contents of all modified cells.
  59. rpc ReadModifyWriteRow(ReadModifyWriteRowRequest) returns (Row) {
  60. option (google.api.http) = { post: "/v1/{table_name=projects/*/zones/*/clusters/*/tables/*}/rows/{row_key}:readModifyWrite" body: "*" };
  61. }
  62. }