| // Code generated by protoc-gen-go. DO NOT EDIT. |
| // source: google/cloud/bigquery/storage/v1beta1/storage.proto |
| |
| package storage |
| |
| import ( |
| context "context" |
| fmt "fmt" |
| proto "github.com/golang/protobuf/proto" |
| empty "github.com/golang/protobuf/ptypes/empty" |
| timestamp "github.com/golang/protobuf/ptypes/timestamp" |
| grpc "google.golang.org/grpc" |
| math "math" |
| ) |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ = proto.Marshal |
| var _ = fmt.Errorf |
| var _ = math.Inf |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the proto package it is being compiled against. |
| // A compilation error at this line likely means your copy of the |
| // proto package needs to be updated. |
| const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package |
| |
| // Data format for input or output data. |
| type DataFormat int32 |
| |
| const ( |
| DataFormat_DATA_FORMAT_UNSPECIFIED DataFormat = 0 |
| // Avro is a standard open source row based file format. |
| // See https://avro.apache.org/ for more details. |
| DataFormat_AVRO DataFormat = 1 |
| ) |
| |
| var DataFormat_name = map[int32]string{ |
| 0: "DATA_FORMAT_UNSPECIFIED", |
| 1: "AVRO", |
| } |
| |
| var DataFormat_value = map[string]int32{ |
| "DATA_FORMAT_UNSPECIFIED": 0, |
| "AVRO": 1, |
| } |
| |
| func (x DataFormat) String() string { |
| return proto.EnumName(DataFormat_name, int32(x)) |
| } |
| |
| func (DataFormat) EnumDescriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{0} |
| } |
| |
| type Stream struct { |
| // Name of the stream. In the form |
| // `/projects/{project_id}/stream/{stream_id}` |
| Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
| // Rows in the stream. |
| RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount,proto3" json:"row_count,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *Stream) Reset() { *m = Stream{} } |
| func (m *Stream) String() string { return proto.CompactTextString(m) } |
| func (*Stream) ProtoMessage() {} |
| func (*Stream) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{0} |
| } |
| |
| func (m *Stream) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_Stream.Unmarshal(m, b) |
| } |
| func (m *Stream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_Stream.Marshal(b, m, deterministic) |
| } |
| func (m *Stream) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_Stream.Merge(m, src) |
| } |
| func (m *Stream) XXX_Size() int { |
| return xxx_messageInfo_Stream.Size(m) |
| } |
| func (m *Stream) XXX_DiscardUnknown() { |
| xxx_messageInfo_Stream.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_Stream proto.InternalMessageInfo |
| |
| func (m *Stream) GetName() string { |
| if m != nil { |
| return m.Name |
| } |
| return "" |
| } |
| |
| func (m *Stream) GetRowCount() int64 { |
| if m != nil { |
| return m.RowCount |
| } |
| return 0 |
| } |
| |
| type StreamPosition struct { |
| Stream *Stream `protobuf:"bytes,1,opt,name=stream,proto3" json:"stream,omitempty"` |
| // Position in the stream. |
| Offset int64 `protobuf:"varint,2,opt,name=offset,proto3" json:"offset,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *StreamPosition) Reset() { *m = StreamPosition{} } |
| func (m *StreamPosition) String() string { return proto.CompactTextString(m) } |
| func (*StreamPosition) ProtoMessage() {} |
| func (*StreamPosition) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{1} |
| } |
| |
| func (m *StreamPosition) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_StreamPosition.Unmarshal(m, b) |
| } |
| func (m *StreamPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_StreamPosition.Marshal(b, m, deterministic) |
| } |
| func (m *StreamPosition) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_StreamPosition.Merge(m, src) |
| } |
| func (m *StreamPosition) XXX_Size() int { |
| return xxx_messageInfo_StreamPosition.Size(m) |
| } |
| func (m *StreamPosition) XXX_DiscardUnknown() { |
| xxx_messageInfo_StreamPosition.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_StreamPosition proto.InternalMessageInfo |
| |
| func (m *StreamPosition) GetStream() *Stream { |
| if m != nil { |
| return m.Stream |
| } |
| return nil |
| } |
| |
| func (m *StreamPosition) GetOffset() int64 { |
| if m != nil { |
| return m.Offset |
| } |
| return 0 |
| } |
| |
| type ReadSession struct { |
| // Unique identifier for the session. In the form |
| // `projects/{project_id}/sessions/{session_id}` |
| Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` |
| // Time at which the session becomes invalid. After this time, subsequent |
| // requests to read this Session will return errors. |
| ExpireTime *timestamp.Timestamp `protobuf:"bytes,2,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` |
| // The schema for the read. If read_options.selected_fields is set, the |
| // schema may be different from the table schema as it will only contain |
| // the selected fields. |
| // |
| // Types that are valid to be assigned to Schema: |
| // *ReadSession_AvroSchema |
| Schema isReadSession_Schema `protobuf_oneof:"schema"` |
| // Streams associated with this session. |
| Streams []*Stream `protobuf:"bytes,4,rep,name=streams,proto3" json:"streams,omitempty"` |
| // Table that this ReadSession is reading from. |
| TableReference *TableReference `protobuf:"bytes,7,opt,name=table_reference,json=tableReference,proto3" json:"table_reference,omitempty"` |
| // Any modifiers which are applied when reading from the specified table. |
| TableModifiers *TableModifiers `protobuf:"bytes,8,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ReadSession) Reset() { *m = ReadSession{} } |
| func (m *ReadSession) String() string { return proto.CompactTextString(m) } |
| func (*ReadSession) ProtoMessage() {} |
| func (*ReadSession) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{2} |
| } |
| |
| func (m *ReadSession) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ReadSession.Unmarshal(m, b) |
| } |
| func (m *ReadSession) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ReadSession.Marshal(b, m, deterministic) |
| } |
| func (m *ReadSession) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ReadSession.Merge(m, src) |
| } |
| func (m *ReadSession) XXX_Size() int { |
| return xxx_messageInfo_ReadSession.Size(m) |
| } |
| func (m *ReadSession) XXX_DiscardUnknown() { |
| xxx_messageInfo_ReadSession.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ReadSession proto.InternalMessageInfo |
| |
| func (m *ReadSession) GetName() string { |
| if m != nil { |
| return m.Name |
| } |
| return "" |
| } |
| |
| func (m *ReadSession) GetExpireTime() *timestamp.Timestamp { |
| if m != nil { |
| return m.ExpireTime |
| } |
| return nil |
| } |
| |
| type isReadSession_Schema interface { |
| isReadSession_Schema() |
| } |
| |
| type ReadSession_AvroSchema struct { |
| AvroSchema *AvroSchema `protobuf:"bytes,5,opt,name=avro_schema,json=avroSchema,proto3,oneof"` |
| } |
| |
| func (*ReadSession_AvroSchema) isReadSession_Schema() {} |
| |
| func (m *ReadSession) GetSchema() isReadSession_Schema { |
| if m != nil { |
| return m.Schema |
| } |
| return nil |
| } |
| |
| func (m *ReadSession) GetAvroSchema() *AvroSchema { |
| if x, ok := m.GetSchema().(*ReadSession_AvroSchema); ok { |
| return x.AvroSchema |
| } |
| return nil |
| } |
| |
| func (m *ReadSession) GetStreams() []*Stream { |
| if m != nil { |
| return m.Streams |
| } |
| return nil |
| } |
| |
| func (m *ReadSession) GetTableReference() *TableReference { |
| if m != nil { |
| return m.TableReference |
| } |
| return nil |
| } |
| |
| func (m *ReadSession) GetTableModifiers() *TableModifiers { |
| if m != nil { |
| return m.TableModifiers |
| } |
| return nil |
| } |
| |
| // XXX_OneofFuncs is for the internal use of the proto package. |
| func (*ReadSession) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { |
| return _ReadSession_OneofMarshaler, _ReadSession_OneofUnmarshaler, _ReadSession_OneofSizer, []interface{}{ |
| (*ReadSession_AvroSchema)(nil), |
| } |
| } |
| |
| func _ReadSession_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { |
| m := msg.(*ReadSession) |
| // schema |
| switch x := m.Schema.(type) { |
| case *ReadSession_AvroSchema: |
| b.EncodeVarint(5<<3 | proto.WireBytes) |
| if err := b.EncodeMessage(x.AvroSchema); err != nil { |
| return err |
| } |
| case nil: |
| default: |
| return fmt.Errorf("ReadSession.Schema has unexpected type %T", x) |
| } |
| return nil |
| } |
| |
| func _ReadSession_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { |
| m := msg.(*ReadSession) |
| switch tag { |
| case 5: // schema.avro_schema |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| msg := new(AvroSchema) |
| err := b.DecodeMessage(msg) |
| m.Schema = &ReadSession_AvroSchema{msg} |
| return true, err |
| default: |
| return false, nil |
| } |
| } |
| |
| func _ReadSession_OneofSizer(msg proto.Message) (n int) { |
| m := msg.(*ReadSession) |
| // schema |
| switch x := m.Schema.(type) { |
| case *ReadSession_AvroSchema: |
| s := proto.Size(x.AvroSchema) |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(s)) |
| n += s |
| case nil: |
| default: |
| panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) |
| } |
| return n |
| } |
| |
| type CreateReadSessionRequest struct { |
| // Required. Reference to the table to read. |
| TableReference *TableReference `protobuf:"bytes,1,opt,name=table_reference,json=tableReference,proto3" json:"table_reference,omitempty"` |
| // Required. Project which this ReadSession is associated with. This is the |
| // project that will be billed for usage. |
| Parent string `protobuf:"bytes,6,opt,name=parent,proto3" json:"parent,omitempty"` |
| // Optional. Any modifiers to the Table (e.g. snapshot timestamp). |
| TableModifiers *TableModifiers `protobuf:"bytes,2,opt,name=table_modifiers,json=tableModifiers,proto3" json:"table_modifiers,omitempty"` |
| // Optional. Initial number of streams. If unset or 0, we will |
| // provide a value of streams so as to produce reasonable throughput. Must be |
| // non-negative. The number of streams may be lower than the requested number, |
| // depending on the amount parallelism that is reasonable for the table and |
| // the maximum amount of parallelism allowed by the system. |
| // |
| // Streams must be read starting from offset 0. |
| RequestedStreams int32 `protobuf:"varint,3,opt,name=requested_streams,json=requestedStreams,proto3" json:"requested_streams,omitempty"` |
| // Optional. Read options for this session (e.g. column selection, filters). |
| ReadOptions *TableReadOptions `protobuf:"bytes,4,opt,name=read_options,json=readOptions,proto3" json:"read_options,omitempty"` |
| // Data output format. Currently default to Avro. |
| Format DataFormat `protobuf:"varint,5,opt,name=format,proto3,enum=google.cloud.bigquery.storage.v1beta1.DataFormat" json:"format,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *CreateReadSessionRequest) Reset() { *m = CreateReadSessionRequest{} } |
| func (m *CreateReadSessionRequest) String() string { return proto.CompactTextString(m) } |
| func (*CreateReadSessionRequest) ProtoMessage() {} |
| func (*CreateReadSessionRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{3} |
| } |
| |
| func (m *CreateReadSessionRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_CreateReadSessionRequest.Unmarshal(m, b) |
| } |
| func (m *CreateReadSessionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_CreateReadSessionRequest.Marshal(b, m, deterministic) |
| } |
| func (m *CreateReadSessionRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_CreateReadSessionRequest.Merge(m, src) |
| } |
| func (m *CreateReadSessionRequest) XXX_Size() int { |
| return xxx_messageInfo_CreateReadSessionRequest.Size(m) |
| } |
| func (m *CreateReadSessionRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_CreateReadSessionRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_CreateReadSessionRequest proto.InternalMessageInfo |
| |
| func (m *CreateReadSessionRequest) GetTableReference() *TableReference { |
| if m != nil { |
| return m.TableReference |
| } |
| return nil |
| } |
| |
| func (m *CreateReadSessionRequest) GetParent() string { |
| if m != nil { |
| return m.Parent |
| } |
| return "" |
| } |
| |
| func (m *CreateReadSessionRequest) GetTableModifiers() *TableModifiers { |
| if m != nil { |
| return m.TableModifiers |
| } |
| return nil |
| } |
| |
| func (m *CreateReadSessionRequest) GetRequestedStreams() int32 { |
| if m != nil { |
| return m.RequestedStreams |
| } |
| return 0 |
| } |
| |
| func (m *CreateReadSessionRequest) GetReadOptions() *TableReadOptions { |
| if m != nil { |
| return m.ReadOptions |
| } |
| return nil |
| } |
| |
| func (m *CreateReadSessionRequest) GetFormat() DataFormat { |
| if m != nil { |
| return m.Format |
| } |
| return DataFormat_DATA_FORMAT_UNSPECIFIED |
| } |
| |
| type ReadRowsRequest struct { |
| // Required. Identifier of the position in the stream to start reading from. |
| // The offset requested must be less than the last row read from ReadRows. |
| // Requesting a larger offset is undefined. |
| ReadPosition *StreamPosition `protobuf:"bytes,1,opt,name=read_position,json=readPosition,proto3" json:"read_position,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ReadRowsRequest) Reset() { *m = ReadRowsRequest{} } |
| func (m *ReadRowsRequest) String() string { return proto.CompactTextString(m) } |
| func (*ReadRowsRequest) ProtoMessage() {} |
| func (*ReadRowsRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{4} |
| } |
| |
| func (m *ReadRowsRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ReadRowsRequest.Unmarshal(m, b) |
| } |
| func (m *ReadRowsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ReadRowsRequest.Marshal(b, m, deterministic) |
| } |
| func (m *ReadRowsRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ReadRowsRequest.Merge(m, src) |
| } |
| func (m *ReadRowsRequest) XXX_Size() int { |
| return xxx_messageInfo_ReadRowsRequest.Size(m) |
| } |
| func (m *ReadRowsRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_ReadRowsRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ReadRowsRequest proto.InternalMessageInfo |
| |
| func (m *ReadRowsRequest) GetReadPosition() *StreamPosition { |
| if m != nil { |
| return m.ReadPosition |
| } |
| return nil |
| } |
| |
| type StreamStatus struct { |
| // Number of estimated rows in the current stream. May change over time as |
| // different readers in the stream progress at rates which are relatively fast |
| // or slow. |
| EstimatedRowCount int64 `protobuf:"varint,1,opt,name=estimated_row_count,json=estimatedRowCount,proto3" json:"estimated_row_count,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *StreamStatus) Reset() { *m = StreamStatus{} } |
| func (m *StreamStatus) String() string { return proto.CompactTextString(m) } |
| func (*StreamStatus) ProtoMessage() {} |
| func (*StreamStatus) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{5} |
| } |
| |
| func (m *StreamStatus) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_StreamStatus.Unmarshal(m, b) |
| } |
| func (m *StreamStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_StreamStatus.Marshal(b, m, deterministic) |
| } |
| func (m *StreamStatus) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_StreamStatus.Merge(m, src) |
| } |
| func (m *StreamStatus) XXX_Size() int { |
| return xxx_messageInfo_StreamStatus.Size(m) |
| } |
| func (m *StreamStatus) XXX_DiscardUnknown() { |
| xxx_messageInfo_StreamStatus.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_StreamStatus proto.InternalMessageInfo |
| |
| func (m *StreamStatus) GetEstimatedRowCount() int64 { |
| if m != nil { |
| return m.EstimatedRowCount |
| } |
| return 0 |
| } |
| |
| // Information on if the current connection is being throttled. |
| type ThrottleStatus struct { |
| // How much this connection is being throttled. |
| // 0 is no throttling, 100 is completely throttled. |
| ThrottlePercent int32 `protobuf:"varint,1,opt,name=throttle_percent,json=throttlePercent,proto3" json:"throttle_percent,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ThrottleStatus) Reset() { *m = ThrottleStatus{} } |
| func (m *ThrottleStatus) String() string { return proto.CompactTextString(m) } |
| func (*ThrottleStatus) ProtoMessage() {} |
| func (*ThrottleStatus) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{6} |
| } |
| |
| func (m *ThrottleStatus) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ThrottleStatus.Unmarshal(m, b) |
| } |
| func (m *ThrottleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ThrottleStatus.Marshal(b, m, deterministic) |
| } |
| func (m *ThrottleStatus) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ThrottleStatus.Merge(m, src) |
| } |
| func (m *ThrottleStatus) XXX_Size() int { |
| return xxx_messageInfo_ThrottleStatus.Size(m) |
| } |
| func (m *ThrottleStatus) XXX_DiscardUnknown() { |
| xxx_messageInfo_ThrottleStatus.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ThrottleStatus proto.InternalMessageInfo |
| |
| func (m *ThrottleStatus) GetThrottlePercent() int32 { |
| if m != nil { |
| return m.ThrottlePercent |
| } |
| return 0 |
| } |
| |
| type ReadRowsResponse struct { |
| // Types that are valid to be assigned to Rows: |
| // *ReadRowsResponse_AvroRows |
| Rows isReadRowsResponse_Rows `protobuf_oneof:"rows"` |
| // Estimated stream statistics. |
| Status *StreamStatus `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` |
| // Throttling status. If unset, the latest response still describes |
| // the current throttling status. |
| ThrottleStatus *ThrottleStatus `protobuf:"bytes,5,opt,name=throttle_status,json=throttleStatus,proto3" json:"throttle_status,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *ReadRowsResponse) Reset() { *m = ReadRowsResponse{} } |
| func (m *ReadRowsResponse) String() string { return proto.CompactTextString(m) } |
| func (*ReadRowsResponse) ProtoMessage() {} |
| func (*ReadRowsResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{7} |
| } |
| |
| func (m *ReadRowsResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_ReadRowsResponse.Unmarshal(m, b) |
| } |
| func (m *ReadRowsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_ReadRowsResponse.Marshal(b, m, deterministic) |
| } |
| func (m *ReadRowsResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_ReadRowsResponse.Merge(m, src) |
| } |
| func (m *ReadRowsResponse) XXX_Size() int { |
| return xxx_messageInfo_ReadRowsResponse.Size(m) |
| } |
| func (m *ReadRowsResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_ReadRowsResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_ReadRowsResponse proto.InternalMessageInfo |
| |
| type isReadRowsResponse_Rows interface { |
| isReadRowsResponse_Rows() |
| } |
| |
| type ReadRowsResponse_AvroRows struct { |
| AvroRows *AvroRows `protobuf:"bytes,3,opt,name=avro_rows,json=avroRows,proto3,oneof"` |
| } |
| |
| func (*ReadRowsResponse_AvroRows) isReadRowsResponse_Rows() {} |
| |
| func (m *ReadRowsResponse) GetRows() isReadRowsResponse_Rows { |
| if m != nil { |
| return m.Rows |
| } |
| return nil |
| } |
| |
| func (m *ReadRowsResponse) GetAvroRows() *AvroRows { |
| if x, ok := m.GetRows().(*ReadRowsResponse_AvroRows); ok { |
| return x.AvroRows |
| } |
| return nil |
| } |
| |
| func (m *ReadRowsResponse) GetStatus() *StreamStatus { |
| if m != nil { |
| return m.Status |
| } |
| return nil |
| } |
| |
| func (m *ReadRowsResponse) GetThrottleStatus() *ThrottleStatus { |
| if m != nil { |
| return m.ThrottleStatus |
| } |
| return nil |
| } |
| |
| // XXX_OneofFuncs is for the internal use of the proto package. |
| func (*ReadRowsResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { |
| return _ReadRowsResponse_OneofMarshaler, _ReadRowsResponse_OneofUnmarshaler, _ReadRowsResponse_OneofSizer, []interface{}{ |
| (*ReadRowsResponse_AvroRows)(nil), |
| } |
| } |
| |
| func _ReadRowsResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { |
| m := msg.(*ReadRowsResponse) |
| // rows |
| switch x := m.Rows.(type) { |
| case *ReadRowsResponse_AvroRows: |
| b.EncodeVarint(3<<3 | proto.WireBytes) |
| if err := b.EncodeMessage(x.AvroRows); err != nil { |
| return err |
| } |
| case nil: |
| default: |
| return fmt.Errorf("ReadRowsResponse.Rows has unexpected type %T", x) |
| } |
| return nil |
| } |
| |
| func _ReadRowsResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { |
| m := msg.(*ReadRowsResponse) |
| switch tag { |
| case 3: // rows.avro_rows |
| if wire != proto.WireBytes { |
| return true, proto.ErrInternalBadWireType |
| } |
| msg := new(AvroRows) |
| err := b.DecodeMessage(msg) |
| m.Rows = &ReadRowsResponse_AvroRows{msg} |
| return true, err |
| default: |
| return false, nil |
| } |
| } |
| |
| func _ReadRowsResponse_OneofSizer(msg proto.Message) (n int) { |
| m := msg.(*ReadRowsResponse) |
| // rows |
| switch x := m.Rows.(type) { |
| case *ReadRowsResponse_AvroRows: |
| s := proto.Size(x.AvroRows) |
| n += 1 // tag and wire |
| n += proto.SizeVarint(uint64(s)) |
| n += s |
| case nil: |
| default: |
| panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) |
| } |
| return n |
| } |
| |
| type BatchCreateReadSessionStreamsRequest struct { |
| // Required. Must be a non-expired session obtained from a call to |
| // CreateReadSession. Only the name field needs to be set. |
| Session *ReadSession `protobuf:"bytes,1,opt,name=session,proto3" json:"session,omitempty"` |
| // Required. Number of new streams requested. Must be positive. |
| // Number of added streams may be less than this, see CreateReadSessionRequest |
| // for more information. |
| RequestedStreams int32 `protobuf:"varint,2,opt,name=requested_streams,json=requestedStreams,proto3" json:"requested_streams,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *BatchCreateReadSessionStreamsRequest) Reset() { *m = BatchCreateReadSessionStreamsRequest{} } |
| func (m *BatchCreateReadSessionStreamsRequest) String() string { return proto.CompactTextString(m) } |
| func (*BatchCreateReadSessionStreamsRequest) ProtoMessage() {} |
| func (*BatchCreateReadSessionStreamsRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{8} |
| } |
| |
| func (m *BatchCreateReadSessionStreamsRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_BatchCreateReadSessionStreamsRequest.Unmarshal(m, b) |
| } |
| func (m *BatchCreateReadSessionStreamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_BatchCreateReadSessionStreamsRequest.Marshal(b, m, deterministic) |
| } |
| func (m *BatchCreateReadSessionStreamsRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_BatchCreateReadSessionStreamsRequest.Merge(m, src) |
| } |
| func (m *BatchCreateReadSessionStreamsRequest) XXX_Size() int { |
| return xxx_messageInfo_BatchCreateReadSessionStreamsRequest.Size(m) |
| } |
| func (m *BatchCreateReadSessionStreamsRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_BatchCreateReadSessionStreamsRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_BatchCreateReadSessionStreamsRequest proto.InternalMessageInfo |
| |
| func (m *BatchCreateReadSessionStreamsRequest) GetSession() *ReadSession { |
| if m != nil { |
| return m.Session |
| } |
| return nil |
| } |
| |
| func (m *BatchCreateReadSessionStreamsRequest) GetRequestedStreams() int32 { |
| if m != nil { |
| return m.RequestedStreams |
| } |
| return 0 |
| } |
| |
| type BatchCreateReadSessionStreamsResponse struct { |
| // Newly added streams. |
| Streams []*Stream `protobuf:"bytes,1,rep,name=streams,proto3" json:"streams,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *BatchCreateReadSessionStreamsResponse) Reset() { *m = BatchCreateReadSessionStreamsResponse{} } |
| func (m *BatchCreateReadSessionStreamsResponse) String() string { return proto.CompactTextString(m) } |
| func (*BatchCreateReadSessionStreamsResponse) ProtoMessage() {} |
| func (*BatchCreateReadSessionStreamsResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{9} |
| } |
| |
| func (m *BatchCreateReadSessionStreamsResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_BatchCreateReadSessionStreamsResponse.Unmarshal(m, b) |
| } |
| func (m *BatchCreateReadSessionStreamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_BatchCreateReadSessionStreamsResponse.Marshal(b, m, deterministic) |
| } |
| func (m *BatchCreateReadSessionStreamsResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_BatchCreateReadSessionStreamsResponse.Merge(m, src) |
| } |
| func (m *BatchCreateReadSessionStreamsResponse) XXX_Size() int { |
| return xxx_messageInfo_BatchCreateReadSessionStreamsResponse.Size(m) |
| } |
| func (m *BatchCreateReadSessionStreamsResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_BatchCreateReadSessionStreamsResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_BatchCreateReadSessionStreamsResponse proto.InternalMessageInfo |
| |
| func (m *BatchCreateReadSessionStreamsResponse) GetStreams() []*Stream { |
| if m != nil { |
| return m.Streams |
| } |
| return nil |
| } |
| |
| type FinalizeStreamRequest struct { |
| // Stream to finalize. |
| Stream *Stream `protobuf:"bytes,2,opt,name=stream,proto3" json:"stream,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *FinalizeStreamRequest) Reset() { *m = FinalizeStreamRequest{} } |
| func (m *FinalizeStreamRequest) String() string { return proto.CompactTextString(m) } |
| func (*FinalizeStreamRequest) ProtoMessage() {} |
| func (*FinalizeStreamRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{10} |
| } |
| |
| func (m *FinalizeStreamRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_FinalizeStreamRequest.Unmarshal(m, b) |
| } |
| func (m *FinalizeStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_FinalizeStreamRequest.Marshal(b, m, deterministic) |
| } |
| func (m *FinalizeStreamRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_FinalizeStreamRequest.Merge(m, src) |
| } |
| func (m *FinalizeStreamRequest) XXX_Size() int { |
| return xxx_messageInfo_FinalizeStreamRequest.Size(m) |
| } |
| func (m *FinalizeStreamRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_FinalizeStreamRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_FinalizeStreamRequest proto.InternalMessageInfo |
| |
| func (m *FinalizeStreamRequest) GetStream() *Stream { |
| if m != nil { |
| return m.Stream |
| } |
| return nil |
| } |
| |
| type SplitReadStreamRequest struct { |
| // Stream to split. |
| OriginalStream *Stream `protobuf:"bytes,1,opt,name=original_stream,json=originalStream,proto3" json:"original_stream,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SplitReadStreamRequest) Reset() { *m = SplitReadStreamRequest{} } |
| func (m *SplitReadStreamRequest) String() string { return proto.CompactTextString(m) } |
| func (*SplitReadStreamRequest) ProtoMessage() {} |
| func (*SplitReadStreamRequest) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{11} |
| } |
| |
| func (m *SplitReadStreamRequest) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SplitReadStreamRequest.Unmarshal(m, b) |
| } |
| func (m *SplitReadStreamRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SplitReadStreamRequest.Marshal(b, m, deterministic) |
| } |
| func (m *SplitReadStreamRequest) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SplitReadStreamRequest.Merge(m, src) |
| } |
| func (m *SplitReadStreamRequest) XXX_Size() int { |
| return xxx_messageInfo_SplitReadStreamRequest.Size(m) |
| } |
| func (m *SplitReadStreamRequest) XXX_DiscardUnknown() { |
| xxx_messageInfo_SplitReadStreamRequest.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SplitReadStreamRequest proto.InternalMessageInfo |
| |
| func (m *SplitReadStreamRequest) GetOriginalStream() *Stream { |
| if m != nil { |
| return m.OriginalStream |
| } |
| return nil |
| } |
| |
| type SplitReadStreamResponse struct { |
| // Primary stream. Will contain the beginning portion of |
| // |original_stream|. |
| PrimaryStream *Stream `protobuf:"bytes,1,opt,name=primary_stream,json=primaryStream,proto3" json:"primary_stream,omitempty"` |
| // Remainder stream. Will contain the tail of |original_stream|. |
| RemainderStream *Stream `protobuf:"bytes,2,opt,name=remainder_stream,json=remainderStream,proto3" json:"remainder_stream,omitempty"` |
| XXX_NoUnkeyedLiteral struct{} `json:"-"` |
| XXX_unrecognized []byte `json:"-"` |
| XXX_sizecache int32 `json:"-"` |
| } |
| |
| func (m *SplitReadStreamResponse) Reset() { *m = SplitReadStreamResponse{} } |
| func (m *SplitReadStreamResponse) String() string { return proto.CompactTextString(m) } |
| func (*SplitReadStreamResponse) ProtoMessage() {} |
| func (*SplitReadStreamResponse) Descriptor() ([]byte, []int) { |
| return fileDescriptor_2a3518a93fa439fd, []int{12} |
| } |
| |
| func (m *SplitReadStreamResponse) XXX_Unmarshal(b []byte) error { |
| return xxx_messageInfo_SplitReadStreamResponse.Unmarshal(m, b) |
| } |
| func (m *SplitReadStreamResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { |
| return xxx_messageInfo_SplitReadStreamResponse.Marshal(b, m, deterministic) |
| } |
| func (m *SplitReadStreamResponse) XXX_Merge(src proto.Message) { |
| xxx_messageInfo_SplitReadStreamResponse.Merge(m, src) |
| } |
| func (m *SplitReadStreamResponse) XXX_Size() int { |
| return xxx_messageInfo_SplitReadStreamResponse.Size(m) |
| } |
| func (m *SplitReadStreamResponse) XXX_DiscardUnknown() { |
| xxx_messageInfo_SplitReadStreamResponse.DiscardUnknown(m) |
| } |
| |
| var xxx_messageInfo_SplitReadStreamResponse proto.InternalMessageInfo |
| |
| func (m *SplitReadStreamResponse) GetPrimaryStream() *Stream { |
| if m != nil { |
| return m.PrimaryStream |
| } |
| return nil |
| } |
| |
| func (m *SplitReadStreamResponse) GetRemainderStream() *Stream { |
| if m != nil { |
| return m.RemainderStream |
| } |
| return nil |
| } |
| |
| func init() { |
| proto.RegisterEnum("google.cloud.bigquery.storage.v1beta1.DataFormat", DataFormat_name, DataFormat_value) |
| proto.RegisterType((*Stream)(nil), "google.cloud.bigquery.storage.v1beta1.Stream") |
| proto.RegisterType((*StreamPosition)(nil), "google.cloud.bigquery.storage.v1beta1.StreamPosition") |
| proto.RegisterType((*ReadSession)(nil), "google.cloud.bigquery.storage.v1beta1.ReadSession") |
| proto.RegisterType((*CreateReadSessionRequest)(nil), "google.cloud.bigquery.storage.v1beta1.CreateReadSessionRequest") |
| proto.RegisterType((*ReadRowsRequest)(nil), "google.cloud.bigquery.storage.v1beta1.ReadRowsRequest") |
| proto.RegisterType((*StreamStatus)(nil), "google.cloud.bigquery.storage.v1beta1.StreamStatus") |
| proto.RegisterType((*ThrottleStatus)(nil), "google.cloud.bigquery.storage.v1beta1.ThrottleStatus") |
| proto.RegisterType((*ReadRowsResponse)(nil), "google.cloud.bigquery.storage.v1beta1.ReadRowsResponse") |
| proto.RegisterType((*BatchCreateReadSessionStreamsRequest)(nil), "google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsRequest") |
| proto.RegisterType((*BatchCreateReadSessionStreamsResponse)(nil), "google.cloud.bigquery.storage.v1beta1.BatchCreateReadSessionStreamsResponse") |
| proto.RegisterType((*FinalizeStreamRequest)(nil), "google.cloud.bigquery.storage.v1beta1.FinalizeStreamRequest") |
| proto.RegisterType((*SplitReadStreamRequest)(nil), "google.cloud.bigquery.storage.v1beta1.SplitReadStreamRequest") |
| proto.RegisterType((*SplitReadStreamResponse)(nil), "google.cloud.bigquery.storage.v1beta1.SplitReadStreamResponse") |
| } |
| |
| func init() { |
| proto.RegisterFile("google/cloud/bigquery/storage/v1beta1/storage.proto", fileDescriptor_2a3518a93fa439fd) |
| } |
| |
| var fileDescriptor_2a3518a93fa439fd = []byte{ |
| // 1014 bytes of a gzipped FileDescriptorProto |
| 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0x5b, 0x6f, 0x1b, 0x45, |
| 0x14, 0xf6, 0xc6, 0x97, 0x38, 0xc7, 0xad, 0xed, 0x0c, 0x22, 0xb5, 0x1c, 0x21, 0xa2, 0x15, 0x95, |
| 0x52, 0x10, 0x76, 0xe3, 0x08, 0x0a, 0x0a, 0x14, 0xd9, 0xb9, 0xd0, 0xa8, 0x69, 0x13, 0xc6, 0xa6, |
| 0x42, 0x79, 0xe8, 0x6a, 0x6c, 0x8f, 0x37, 0x2b, 0x79, 0x77, 0xb6, 0xb3, 0xe3, 0x84, 0xf0, 0xc2, |
| 0x3b, 0x8f, 0xbc, 0xf0, 0xcc, 0xff, 0xe0, 0x19, 0xc4, 0x8f, 0xe0, 0xbf, 0xa0, 0x9d, 0x8b, 0x2f, |
| 0x89, 0xa1, 0xeb, 0x3a, 0x6f, 0x3e, 0x73, 0xf6, 0xfb, 0xce, 0xf9, 0xce, 0x9c, 0x73, 0x76, 0x0d, |
| 0xbb, 0x2e, 0x63, 0xee, 0x90, 0xd6, 0x7b, 0x43, 0x36, 0xea, 0xd7, 0xbb, 0x9e, 0xfb, 0x66, 0x44, |
| 0xf9, 0x75, 0x3d, 0x12, 0x8c, 0x13, 0x97, 0xd6, 0x2f, 0x77, 0xba, 0x54, 0x90, 0x1d, 0x63, 0xd7, |
| 0x42, 0xce, 0x04, 0x43, 0x0f, 0x15, 0xa8, 0x26, 0x41, 0x35, 0x03, 0xaa, 0x99, 0x87, 0x34, 0xa8, |
| 0xfa, 0x38, 0x19, 0x37, 0xb9, 0xe4, 0x4c, 0x11, 0x57, 0xbf, 0x48, 0x86, 0xe0, 0x94, 0xf4, 0x1d, |
| 0x16, 0x0a, 0x8f, 0x05, 0x91, 0x46, 0xee, 0x25, 0x43, 0x0a, 0xd2, 0x1d, 0x52, 0x87, 0xd3, 0x01, |
| 0xe5, 0x34, 0xe8, 0x69, 0x3d, 0xd5, 0x4d, 0x0d, 0x96, 0x56, 0x77, 0x34, 0xa8, 0x53, 0x3f, 0x14, |
| 0xd7, 0xda, 0xf9, 0xe1, 0x4d, 0xa7, 0xf0, 0x7c, 0x1a, 0x09, 0xe2, 0x87, 0xea, 0x01, 0xfb, 0x4b, |
| 0xc8, 0xb5, 0x05, 0xa7, 0xc4, 0x47, 0x08, 0x32, 0x01, 0xf1, 0x69, 0xc5, 0xda, 0xb2, 0xb6, 0xd7, |
| 0xb0, 0xfc, 0x8d, 0x36, 0x61, 0x8d, 0xb3, 0x2b, 0xa7, 0xc7, 0x46, 0x81, 0xa8, 0xac, 0x6c, 0x59, |
| 0xdb, 0x69, 0x9c, 0xe7, 0xec, 0x6a, 0x3f, 0xb6, 0x6d, 0x06, 0x45, 0x05, 0x3d, 0x63, 0x91, 0x17, |
| 0xcb, 0x41, 0x87, 0x90, 0x8b, 0xe4, 0x89, 0x24, 0x29, 0x34, 0x3e, 0xad, 0x25, 0xaa, 0x75, 0x4d, |
| 0xd1, 0x60, 0x0d, 0x46, 0x1b, 0x90, 0x63, 0x83, 0x41, 0x44, 0x4d, 0x48, 0x6d, 0xd9, 0x7f, 0xa5, |
| 0xa1, 0x80, 0x29, 0xe9, 0xb7, 0x69, 0x14, 0xc5, 0xe1, 0xe6, 0x65, 0xbc, 0x07, 0x05, 0xfa, 0x63, |
| 0xe8, 0x71, 0xea, 0xc4, 0x4a, 0x25, 0x41, 0xa1, 0x51, 0x35, 0x79, 0x98, 0x32, 0xd4, 0x3a, 0xa6, |
| 0x0c, 0x18, 0xd4, 0xe3, 0xf1, 0x01, 0xea, 0x40, 0x21, 0xbe, 0x4f, 0x27, 0xea, 0x5d, 0x50, 0x9f, |
| 0x54, 0xb2, 0x12, 0xbc, 0x93, 0x50, 0x44, 0xf3, 0x92, 0xb3, 0xb6, 0x04, 0x3e, 0x4b, 0x61, 0x20, |
| 0x63, 0x0b, 0x7d, 0x0b, 0xab, 0x4a, 0x58, 0x54, 0xc9, 0x6c, 0xa5, 0x17, 0x2f, 0x8b, 0x41, 0xa3, |
| 0xd7, 0x50, 0xba, 0xd1, 0x02, 0x95, 0x55, 0x99, 0xe2, 0x67, 0x09, 0x09, 0x3b, 0x31, 0x1a, 0x1b, |
| 0x30, 0x2e, 0x8a, 0x19, 0x7b, 0xc2, 0xef, 0xb3, 0xbe, 0x37, 0xf0, 0x28, 0x8f, 0x2a, 0xf9, 0xc5, |
| 0xf9, 0x5f, 0x18, 0xb0, 0xe6, 0x1f, 0xdb, 0xad, 0x3c, 0xe4, 0x54, 0x65, 0xed, 0x3f, 0xd3, 0x50, |
| 0xd9, 0xe7, 0x94, 0x08, 0x3a, 0x75, 0x9f, 0x98, 0xbe, 0x19, 0xd1, 0x48, 0xcc, 0x93, 0x69, 0xdd, |
| 0xa5, 0xcc, 0x0d, 0xc8, 0x85, 0x84, 0xd3, 0x40, 0x54, 0x72, 0xb2, 0x71, 0xb4, 0x35, 0x4f, 0xfe, |
| 0xca, 0x1d, 0xca, 0x47, 0x9f, 0xc0, 0x3a, 0x57, 0x12, 0x69, 0xdf, 0x31, 0x1d, 0x91, 0xde, 0xb2, |
| 0xb6, 0xb3, 0xb8, 0x3c, 0x76, 0xb4, 0xf5, 0x5d, 0x9f, 0xc3, 0xbd, 0xe9, 0x45, 0x51, 0xc9, 0xc8, |
| 0x4c, 0x9e, 0x2c, 0x56, 0x01, 0xd2, 0x3f, 0x55, 0x70, 0x5c, 0xe0, 0x13, 0x03, 0x1d, 0x43, 0x6e, |
| 0xc0, 0xb8, 0x4f, 0x84, 0xec, 0xf0, 0x62, 0xe2, 0x0e, 0x3f, 0x20, 0x82, 0x1c, 0x49, 0x20, 0xd6, |
| 0x04, 0xb6, 0x0f, 0xa5, 0x38, 0x0c, 0x66, 0x57, 0x91, 0xb9, 0xbe, 0x73, 0xb8, 0x2f, 0x33, 0x0f, |
| 0xf5, 0x56, 0x58, 0xf0, 0xf2, 0x66, 0x57, 0x0a, 0x96, 0x55, 0x30, 0x96, 0xfd, 0x14, 0xee, 0x29, |
| 0x7f, 0x5b, 0x10, 0x31, 0x8a, 0x50, 0x0d, 0xde, 0xa3, 0x91, 0xf0, 0x7c, 0x12, 0x97, 0x74, 0xb2, |
| 0xa9, 0x2c, 0xb9, 0x36, 0xd6, 0xc7, 0x2e, 0x6c, 0x56, 0xd6, 0x1e, 0x14, 0x3b, 0x17, 0x9c, 0x09, |
| 0x31, 0xa4, 0x9a, 0xe1, 0x11, 0x94, 0x85, 0x3e, 0x71, 0x42, 0xca, 0x7b, 0x54, 0xc3, 0xb3, 0xb8, |
| 0x64, 0xce, 0xcf, 0xd4, 0xb1, 0xfd, 0xdb, 0x0a, 0x94, 0x27, 0x62, 0xa3, 0x90, 0x05, 0x11, 0x45, |
| 0x2f, 0x61, 0x4d, 0xae, 0x0c, 0xce, 0xae, 0xd4, 0x65, 0x16, 0x1a, 0xf5, 0x05, 0x16, 0x46, 0xcc, |
| 0xf5, 0x2c, 0x85, 0xf3, 0x44, 0xff, 0x46, 0xcf, 0xe3, 0x15, 0x1a, 0x67, 0xa6, 0x7b, 0x6f, 0x77, |
| 0xa1, 0xb2, 0x29, 0x51, 0x58, 0x53, 0xc8, 0x8e, 0x36, 0xe2, 0x34, 0x6b, 0x76, 0xb1, 0x8e, 0x9e, |
| 0x29, 0x16, 0x2e, 0x8a, 0x19, 0xbb, 0x95, 0x83, 0x4c, 0xac, 0xdb, 0xfe, 0xdd, 0x82, 0x8f, 0x5a, |
| 0x44, 0xf4, 0x2e, 0x6e, 0xcd, 0xb4, 0x6e, 0x67, 0xd3, 0x1b, 0x27, 0xb0, 0x1a, 0x29, 0x87, 0xee, |
| 0x8a, 0x46, 0xc2, 0x44, 0xa6, 0xd7, 0x84, 0xa1, 0x98, 0x3f, 0x50, 0x2b, 0xf3, 0x07, 0xca, 0x0e, |
| 0xe1, 0xe1, 0x5b, 0x52, 0xd4, 0x37, 0x3a, 0xb5, 0xae, 0xad, 0x65, 0xd6, 0xb5, 0xfd, 0x1a, 0xde, |
| 0x3f, 0xf2, 0x02, 0x32, 0xf4, 0x7e, 0xa2, 0xda, 0xa5, 0xab, 0x30, 0x79, 0x4d, 0xae, 0x2c, 0xf1, |
| 0x9a, 0xb4, 0x43, 0xd8, 0x68, 0x87, 0x43, 0x4f, 0x48, 0x2d, 0x33, 0x01, 0x5e, 0x41, 0x89, 0x71, |
| 0xcf, 0x8d, 0x83, 0x3b, 0xcb, 0xbc, 0x90, 0x8b, 0x86, 0x45, 0xd9, 0xf6, 0xdf, 0x16, 0x3c, 0xb8, |
| 0x15, 0x52, 0x97, 0xad, 0x03, 0xc5, 0x90, 0x7b, 0x3e, 0xe1, 0xd7, 0x4b, 0x85, 0xbc, 0xaf, 0x49, |
| 0xf4, 0x47, 0xc9, 0x0f, 0x50, 0xe6, 0xd4, 0x27, 0x5e, 0xd0, 0xa7, 0xdc, 0x59, 0xa6, 0x68, 0xa5, |
| 0x31, 0x8d, 0x3a, 0xf8, 0x78, 0x17, 0x60, 0xb2, 0xcf, 0xd0, 0x26, 0x3c, 0x38, 0x68, 0x76, 0x9a, |
| 0xce, 0xd1, 0x29, 0x7e, 0xd1, 0xec, 0x38, 0xdf, 0xbf, 0x6c, 0x9f, 0x1d, 0xee, 0x1f, 0x1f, 0x1d, |
| 0x1f, 0x1e, 0x94, 0x53, 0x28, 0x0f, 0x99, 0xe6, 0x2b, 0x7c, 0x5a, 0xb6, 0x1a, 0xff, 0x64, 0xa1, |
| 0xd4, 0xf2, 0xdc, 0xef, 0xe2, 0x48, 0x6d, 0x15, 0x08, 0xfd, 0x62, 0xc1, 0xfa, 0xad, 0xa6, 0x42, |
| 0xdf, 0x24, 0x4c, 0xef, 0xbf, 0xde, 0x82, 0xd5, 0x77, 0x98, 0x0c, 0x3b, 0x85, 0x7e, 0x86, 0xbc, |
| 0x59, 0x51, 0xe8, 0xf3, 0x05, 0x18, 0xa6, 0x16, 0x78, 0xf5, 0xc9, 0xc2, 0x38, 0xd5, 0x02, 0x76, |
| 0xea, 0xb1, 0x85, 0xfe, 0xb0, 0xe0, 0x83, 0xff, 0x9d, 0x33, 0xf4, 0x3c, 0x21, 0x7d, 0x92, 0x85, |
| 0x52, 0x3d, 0xb9, 0x1b, 0x32, 0x23, 0x00, 0x5d, 0x40, 0x71, 0x76, 0x66, 0xd1, 0x57, 0x09, 0x23, |
| 0xcc, 0x1d, 0xf5, 0xea, 0xc6, 0xad, 0x2f, 0xcf, 0xc3, 0xf8, 0xeb, 0xdc, 0x4e, 0xa1, 0x5f, 0x2d, |
| 0x28, 0xdd, 0x98, 0x25, 0xf4, 0x75, 0xd2, 0x9e, 0x9e, 0x3b, 0xf6, 0xd5, 0xa7, 0xef, 0x0a, 0x37, |
| 0xf2, 0x5b, 0xd7, 0xf0, 0xa8, 0xc7, 0xfc, 0x64, 0x34, 0xe7, 0x27, 0xfa, 0x31, 0x97, 0x0d, 0x49, |
| 0xe0, 0xd6, 0x18, 0x77, 0xeb, 0x2e, 0x0d, 0xa4, 0xd0, 0xba, 0x72, 0x91, 0xd0, 0x8b, 0xde, 0xf2, |
| 0xa7, 0x66, 0x4f, 0xdb, 0xdd, 0x9c, 0x04, 0xee, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x2a, 0x1b, |
| 0xa2, 0x6a, 0xd4, 0x0d, 0x00, 0x00, |
| } |
| |
| // Reference imports to suppress errors if they are not otherwise used. |
| var _ context.Context |
| var _ grpc.ClientConn |
| |
| // This is a compile-time assertion to ensure that this generated file |
| // is compatible with the grpc package it is being compiled against. |
| const _ = grpc.SupportPackageIsVersion4 |
| |
| // BigQueryStorageClient is the client API for BigQueryStorage service. |
| // |
| // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. |
| type BigQueryStorageClient interface { |
| // Creates a new read session. A read session divides the contents of a |
| // BigQuery table into one or more streams, which can then be used to read |
| // data from the table. The read session also specifies properties of the |
| // data to be read, such as a list of columns or a push-down filter describing |
| // the rows to be returned. |
| // |
| // A particular row can be read by at most one stream. When the caller has |
| // reached the end of each stream in the session, then all the data in the |
| // table has been read. |
| // |
| // Read sessions automatically expire 24 hours after they are created and do |
| // not require manual clean-up by the caller. |
| CreateReadSession(ctx context.Context, in *CreateReadSessionRequest, opts ...grpc.CallOption) (*ReadSession, error) |
| // Reads rows from the table in the format prescribed by the read session. |
| // Each response contains one or more table rows, up to a maximum of 10 MiB |
| // per response; read requests which attempt to read individual rows larger |
| // than this will fail. |
| // |
| // Each request also returns a set of stream statistics reflecting the |
| // estimated total number of rows in the read stream. This number is computed |
| // based on the total table size and the number of active streams in the read |
| // session, and may change as other streams continue to read data. |
| ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigQueryStorage_ReadRowsClient, error) |
| // Creates additional streams for a ReadSession. This API can be used to |
| // dynamically adjust the parallelism of a batch processing task upwards by |
| // adding additional workers. |
| BatchCreateReadSessionStreams(ctx context.Context, in *BatchCreateReadSessionStreamsRequest, opts ...grpc.CallOption) (*BatchCreateReadSessionStreamsResponse, error) |
| // Triggers the graceful termination of a single stream in a ReadSession. This |
| // API can be used to dynamically adjust the parallelism of a batch processing |
| // task downwards without losing data. |
| // |
| // This API does not delete the stream -- it remains visible in the |
| // ReadSession, and any data processed by the stream is not released to other |
| // streams. However, no additional data will be assigned to the stream once |
| // this call completes. Callers must continue reading data on the stream until |
| // the end of the stream is reached so that data which has already been |
| // assigned to the stream will be processed. |
| // |
| // This method will return an error if there are no other live streams |
| // in the Session, or if SplitReadStream() has been called on the given |
| // Stream. |
| FinalizeStream(ctx context.Context, in *FinalizeStreamRequest, opts ...grpc.CallOption) (*empty.Empty, error) |
| // Splits a given read stream into two Streams. These streams are referred to |
| // as the primary and the residual of the split. The original stream can still |
| // be read from in the same manner as before. Both of the returned streams can |
| // also be read from, and the total rows return by both child streams will be |
| // the same as the rows read from the original stream. |
| // |
| // Moreover, the two child streams will be allocated back to back in the |
| // original Stream. Concretely, it is guaranteed that for streams Original, |
| // Primary, and Residual, that Original[0-j] = Primary[0-j] and |
| // Original[j-n] = Residual[0-m] once the streams have been read to |
| // completion. |
| // |
| // This method is guaranteed to be idempotent. |
| SplitReadStream(ctx context.Context, in *SplitReadStreamRequest, opts ...grpc.CallOption) (*SplitReadStreamResponse, error) |
| } |
| |
| type bigQueryStorageClient struct { |
| cc *grpc.ClientConn |
| } |
| |
| func NewBigQueryStorageClient(cc *grpc.ClientConn) BigQueryStorageClient { |
| return &bigQueryStorageClient{cc} |
| } |
| |
| func (c *bigQueryStorageClient) CreateReadSession(ctx context.Context, in *CreateReadSessionRequest, opts ...grpc.CallOption) (*ReadSession, error) { |
| out := new(ReadSession) |
| err := c.cc.Invoke(ctx, "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/CreateReadSession", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *bigQueryStorageClient) ReadRows(ctx context.Context, in *ReadRowsRequest, opts ...grpc.CallOption) (BigQueryStorage_ReadRowsClient, error) { |
| stream, err := c.cc.NewStream(ctx, &_BigQueryStorage_serviceDesc.Streams[0], "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/ReadRows", opts...) |
| if err != nil { |
| return nil, err |
| } |
| x := &bigQueryStorageReadRowsClient{stream} |
| if err := x.ClientStream.SendMsg(in); err != nil { |
| return nil, err |
| } |
| if err := x.ClientStream.CloseSend(); err != nil { |
| return nil, err |
| } |
| return x, nil |
| } |
| |
| type BigQueryStorage_ReadRowsClient interface { |
| Recv() (*ReadRowsResponse, error) |
| grpc.ClientStream |
| } |
| |
| type bigQueryStorageReadRowsClient struct { |
| grpc.ClientStream |
| } |
| |
| func (x *bigQueryStorageReadRowsClient) Recv() (*ReadRowsResponse, error) { |
| m := new(ReadRowsResponse) |
| if err := x.ClientStream.RecvMsg(m); err != nil { |
| return nil, err |
| } |
| return m, nil |
| } |
| |
| func (c *bigQueryStorageClient) BatchCreateReadSessionStreams(ctx context.Context, in *BatchCreateReadSessionStreamsRequest, opts ...grpc.CallOption) (*BatchCreateReadSessionStreamsResponse, error) { |
| out := new(BatchCreateReadSessionStreamsResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/BatchCreateReadSessionStreams", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *bigQueryStorageClient) FinalizeStream(ctx context.Context, in *FinalizeStreamRequest, opts ...grpc.CallOption) (*empty.Empty, error) { |
| out := new(empty.Empty) |
| err := c.cc.Invoke(ctx, "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/FinalizeStream", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| func (c *bigQueryStorageClient) SplitReadStream(ctx context.Context, in *SplitReadStreamRequest, opts ...grpc.CallOption) (*SplitReadStreamResponse, error) { |
| out := new(SplitReadStreamResponse) |
| err := c.cc.Invoke(ctx, "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/SplitReadStream", in, out, opts...) |
| if err != nil { |
| return nil, err |
| } |
| return out, nil |
| } |
| |
| // BigQueryStorageServer is the server API for BigQueryStorage service. |
| type BigQueryStorageServer interface { |
| // Creates a new read session. A read session divides the contents of a |
| // BigQuery table into one or more streams, which can then be used to read |
| // data from the table. The read session also specifies properties of the |
| // data to be read, such as a list of columns or a push-down filter describing |
| // the rows to be returned. |
| // |
| // A particular row can be read by at most one stream. When the caller has |
| // reached the end of each stream in the session, then all the data in the |
| // table has been read. |
| // |
| // Read sessions automatically expire 24 hours after they are created and do |
| // not require manual clean-up by the caller. |
| CreateReadSession(context.Context, *CreateReadSessionRequest) (*ReadSession, error) |
| // Reads rows from the table in the format prescribed by the read session. |
| // Each response contains one or more table rows, up to a maximum of 10 MiB |
| // per response; read requests which attempt to read individual rows larger |
| // than this will fail. |
| // |
| // Each request also returns a set of stream statistics reflecting the |
| // estimated total number of rows in the read stream. This number is computed |
| // based on the total table size and the number of active streams in the read |
| // session, and may change as other streams continue to read data. |
| ReadRows(*ReadRowsRequest, BigQueryStorage_ReadRowsServer) error |
| // Creates additional streams for a ReadSession. This API can be used to |
| // dynamically adjust the parallelism of a batch processing task upwards by |
| // adding additional workers. |
| BatchCreateReadSessionStreams(context.Context, *BatchCreateReadSessionStreamsRequest) (*BatchCreateReadSessionStreamsResponse, error) |
| // Triggers the graceful termination of a single stream in a ReadSession. This |
| // API can be used to dynamically adjust the parallelism of a batch processing |
| // task downwards without losing data. |
| // |
| // This API does not delete the stream -- it remains visible in the |
| // ReadSession, and any data processed by the stream is not released to other |
| // streams. However, no additional data will be assigned to the stream once |
| // this call completes. Callers must continue reading data on the stream until |
| // the end of the stream is reached so that data which has already been |
| // assigned to the stream will be processed. |
| // |
| // This method will return an error if there are no other live streams |
| // in the Session, or if SplitReadStream() has been called on the given |
| // Stream. |
| FinalizeStream(context.Context, *FinalizeStreamRequest) (*empty.Empty, error) |
| // Splits a given read stream into two Streams. These streams are referred to |
| // as the primary and the residual of the split. The original stream can still |
| // be read from in the same manner as before. Both of the returned streams can |
| // also be read from, and the total rows return by both child streams will be |
| // the same as the rows read from the original stream. |
| // |
| // Moreover, the two child streams will be allocated back to back in the |
| // original Stream. Concretely, it is guaranteed that for streams Original, |
| // Primary, and Residual, that Original[0-j] = Primary[0-j] and |
| // Original[j-n] = Residual[0-m] once the streams have been read to |
| // completion. |
| // |
| // This method is guaranteed to be idempotent. |
| SplitReadStream(context.Context, *SplitReadStreamRequest) (*SplitReadStreamResponse, error) |
| } |
| |
| func RegisterBigQueryStorageServer(s *grpc.Server, srv BigQueryStorageServer) { |
| s.RegisterService(&_BigQueryStorage_serviceDesc, srv) |
| } |
| |
| func _BigQueryStorage_CreateReadSession_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(CreateReadSessionRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(BigQueryStorageServer).CreateReadSession(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/CreateReadSession", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(BigQueryStorageServer).CreateReadSession(ctx, req.(*CreateReadSessionRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _BigQueryStorage_ReadRows_Handler(srv interface{}, stream grpc.ServerStream) error { |
| m := new(ReadRowsRequest) |
| if err := stream.RecvMsg(m); err != nil { |
| return err |
| } |
| return srv.(BigQueryStorageServer).ReadRows(m, &bigQueryStorageReadRowsServer{stream}) |
| } |
| |
| type BigQueryStorage_ReadRowsServer interface { |
| Send(*ReadRowsResponse) error |
| grpc.ServerStream |
| } |
| |
| type bigQueryStorageReadRowsServer struct { |
| grpc.ServerStream |
| } |
| |
| func (x *bigQueryStorageReadRowsServer) Send(m *ReadRowsResponse) error { |
| return x.ServerStream.SendMsg(m) |
| } |
| |
| func _BigQueryStorage_BatchCreateReadSessionStreams_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(BatchCreateReadSessionStreamsRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(BigQueryStorageServer).BatchCreateReadSessionStreams(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/BatchCreateReadSessionStreams", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(BigQueryStorageServer).BatchCreateReadSessionStreams(ctx, req.(*BatchCreateReadSessionStreamsRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _BigQueryStorage_FinalizeStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(FinalizeStreamRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(BigQueryStorageServer).FinalizeStream(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/FinalizeStream", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(BigQueryStorageServer).FinalizeStream(ctx, req.(*FinalizeStreamRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| func _BigQueryStorage_SplitReadStream_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { |
| in := new(SplitReadStreamRequest) |
| if err := dec(in); err != nil { |
| return nil, err |
| } |
| if interceptor == nil { |
| return srv.(BigQueryStorageServer).SplitReadStream(ctx, in) |
| } |
| info := &grpc.UnaryServerInfo{ |
| Server: srv, |
| FullMethod: "/google.cloud.bigquery.storage.v1beta1.BigQueryStorage/SplitReadStream", |
| } |
| handler := func(ctx context.Context, req interface{}) (interface{}, error) { |
| return srv.(BigQueryStorageServer).SplitReadStream(ctx, req.(*SplitReadStreamRequest)) |
| } |
| return interceptor(ctx, in, info, handler) |
| } |
| |
| var _BigQueryStorage_serviceDesc = grpc.ServiceDesc{ |
| ServiceName: "google.cloud.bigquery.storage.v1beta1.BigQueryStorage", |
| HandlerType: (*BigQueryStorageServer)(nil), |
| Methods: []grpc.MethodDesc{ |
| { |
| MethodName: "CreateReadSession", |
| Handler: _BigQueryStorage_CreateReadSession_Handler, |
| }, |
| { |
| MethodName: "BatchCreateReadSessionStreams", |
| Handler: _BigQueryStorage_BatchCreateReadSessionStreams_Handler, |
| }, |
| { |
| MethodName: "FinalizeStream", |
| Handler: _BigQueryStorage_FinalizeStream_Handler, |
| }, |
| { |
| MethodName: "SplitReadStream", |
| Handler: _BigQueryStorage_SplitReadStream_Handler, |
| }, |
| }, |
| Streams: []grpc.StreamDesc{ |
| { |
| StreamName: "ReadRows", |
| Handler: _BigQueryStorage_ReadRows_Handler, |
| ServerStreams: true, |
| }, |
| }, |
| Metadata: "google/cloud/bigquery/storage/v1beta1/storage.proto", |
| } |