| // Generated by the protocol buffer compiler. DO NOT EDIT! |
| // source: LlapPluginProtocol.proto |
| |
| package org.apache.hadoop.hive.llap.plugin.rpc; |
| |
| public final class LlapPluginProtocolProtos { |
| private LlapPluginProtocolProtos() {} |
| public static void registerAllExtensions( |
| com.google.protobuf.ExtensionRegistry registry) { |
| } |
| public interface UpdateQueryRequestProtoOrBuilder |
| extends com.google.protobuf.MessageOrBuilder { |
| |
| // optional int32 guaranteed_task_count = 1; |
| /** |
| * <code>optional int32 guaranteed_task_count = 1;</code> |
| */ |
| boolean hasGuaranteedTaskCount(); |
| /** |
| * <code>optional int32 guaranteed_task_count = 1;</code> |
| */ |
| int getGuaranteedTaskCount(); |
| } |
| /** |
| * Protobuf type {@code UpdateQueryRequestProto} |
| */ |
| public static final class UpdateQueryRequestProto extends |
| com.google.protobuf.GeneratedMessage |
| implements UpdateQueryRequestProtoOrBuilder { |
| // Use UpdateQueryRequestProto.newBuilder() to construct. |
| private UpdateQueryRequestProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) { |
| super(builder); |
| this.unknownFields = builder.getUnknownFields(); |
| } |
| private UpdateQueryRequestProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } |
| |
| private static final UpdateQueryRequestProto defaultInstance; |
| public static UpdateQueryRequestProto getDefaultInstance() { |
| return defaultInstance; |
| } |
| |
| public UpdateQueryRequestProto getDefaultInstanceForType() { |
| return defaultInstance; |
| } |
| |
| private final com.google.protobuf.UnknownFieldSet unknownFields; |
| @java.lang.Override |
| public final com.google.protobuf.UnknownFieldSet |
| getUnknownFields() { |
| return this.unknownFields; |
| } |
| private UpdateQueryRequestProto( |
| com.google.protobuf.CodedInputStream input, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws com.google.protobuf.InvalidProtocolBufferException { |
| initFields(); |
| int mutable_bitField0_ = 0; |
| com.google.protobuf.UnknownFieldSet.Builder unknownFields = |
| com.google.protobuf.UnknownFieldSet.newBuilder(); |
| try { |
| boolean done = false; |
| while (!done) { |
| int tag = input.readTag(); |
| switch (tag) { |
| case 0: |
| done = true; |
| break; |
| default: { |
| if (!parseUnknownField(input, unknownFields, |
| extensionRegistry, tag)) { |
| done = true; |
| } |
| break; |
| } |
| case 8: { |
| bitField0_ |= 0x00000001; |
| guaranteedTaskCount_ = input.readInt32(); |
| break; |
| } |
| } |
| } |
| } catch (com.google.protobuf.InvalidProtocolBufferException e) { |
| throw e.setUnfinishedMessage(this); |
| } catch (java.io.IOException e) { |
| throw new com.google.protobuf.InvalidProtocolBufferException( |
| e.getMessage()).setUnfinishedMessage(this); |
| } finally { |
| this.unknownFields = unknownFields.build(); |
| makeExtensionsImmutable(); |
| } |
| } |
| public static final com.google.protobuf.Descriptors.Descriptor |
| getDescriptor() { |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryRequestProto_descriptor; |
| } |
| |
| protected com.google.protobuf.GeneratedMessage.FieldAccessorTable |
| internalGetFieldAccessorTable() { |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryRequestProto_fieldAccessorTable |
| .ensureFieldAccessorsInitialized( |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto.class, org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto.Builder.class); |
| } |
| |
| public static com.google.protobuf.Parser<UpdateQueryRequestProto> PARSER = |
| new com.google.protobuf.AbstractParser<UpdateQueryRequestProto>() { |
| public UpdateQueryRequestProto parsePartialFrom( |
| com.google.protobuf.CodedInputStream input, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws com.google.protobuf.InvalidProtocolBufferException { |
| return new UpdateQueryRequestProto(input, extensionRegistry); |
| } |
| }; |
| |
| @java.lang.Override |
| public com.google.protobuf.Parser<UpdateQueryRequestProto> getParserForType() { |
| return PARSER; |
| } |
| |
| private int bitField0_; |
| // optional int32 guaranteed_task_count = 1; |
| public static final int GUARANTEED_TASK_COUNT_FIELD_NUMBER = 1; |
| private int guaranteedTaskCount_; |
| /** |
| * <code>optional int32 guaranteed_task_count = 1;</code> |
| */ |
| public boolean hasGuaranteedTaskCount() { |
| return ((bitField0_ & 0x00000001) == 0x00000001); |
| } |
| /** |
| * <code>optional int32 guaranteed_task_count = 1;</code> |
| */ |
| public int getGuaranteedTaskCount() { |
| return guaranteedTaskCount_; |
| } |
| |
| private void initFields() { |
| guaranteedTaskCount_ = 0; |
| } |
| private byte memoizedIsInitialized = -1; |
| public final boolean isInitialized() { |
| byte isInitialized = memoizedIsInitialized; |
| if (isInitialized != -1) return isInitialized == 1; |
| |
| memoizedIsInitialized = 1; |
| return true; |
| } |
| |
| public void writeTo(com.google.protobuf.CodedOutputStream output) |
| throws java.io.IOException { |
| getSerializedSize(); |
| if (((bitField0_ & 0x00000001) == 0x00000001)) { |
| output.writeInt32(1, guaranteedTaskCount_); |
| } |
| getUnknownFields().writeTo(output); |
| } |
| |
| private int memoizedSerializedSize = -1; |
| public int getSerializedSize() { |
| int size = memoizedSerializedSize; |
| if (size != -1) return size; |
| |
| size = 0; |
| if (((bitField0_ & 0x00000001) == 0x00000001)) { |
| size += com.google.protobuf.CodedOutputStream |
| .computeInt32Size(1, guaranteedTaskCount_); |
| } |
| size += getUnknownFields().getSerializedSize(); |
| memoizedSerializedSize = size; |
| return size; |
| } |
| |
| private static final long serialVersionUID = 0L; |
| @java.lang.Override |
| protected java.lang.Object writeReplace() |
| throws java.io.ObjectStreamException { |
| return super.writeReplace(); |
| } |
| |
| @java.lang.Override |
| public boolean equals(final java.lang.Object obj) { |
| if (obj == this) { |
| return true; |
| } |
| if (!(obj instanceof org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto)) { |
| return super.equals(obj); |
| } |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto other = (org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto) obj; |
| |
| boolean result = true; |
| result = result && (hasGuaranteedTaskCount() == other.hasGuaranteedTaskCount()); |
| if (hasGuaranteedTaskCount()) { |
| result = result && (getGuaranteedTaskCount() |
| == other.getGuaranteedTaskCount()); |
| } |
| result = result && |
| getUnknownFields().equals(other.getUnknownFields()); |
| return result; |
| } |
| |
| private int memoizedHashCode = 0; |
| @java.lang.Override |
| public int hashCode() { |
| if (memoizedHashCode != 0) { |
| return memoizedHashCode; |
| } |
| int hash = 41; |
| hash = (19 * hash) + getDescriptorForType().hashCode(); |
| if (hasGuaranteedTaskCount()) { |
| hash = (37 * hash) + GUARANTEED_TASK_COUNT_FIELD_NUMBER; |
| hash = (53 * hash) + getGuaranteedTaskCount(); |
| } |
| hash = (29 * hash) + getUnknownFields().hashCode(); |
| memoizedHashCode = hash; |
| return hash; |
| } |
| |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom( |
| com.google.protobuf.ByteString data) |
| throws com.google.protobuf.InvalidProtocolBufferException { |
| return PARSER.parseFrom(data); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom( |
| com.google.protobuf.ByteString data, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws com.google.protobuf.InvalidProtocolBufferException { |
| return PARSER.parseFrom(data, extensionRegistry); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom(byte[] data) |
| throws com.google.protobuf.InvalidProtocolBufferException { |
| return PARSER.parseFrom(data); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom( |
| byte[] data, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws com.google.protobuf.InvalidProtocolBufferException { |
| return PARSER.parseFrom(data, extensionRegistry); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom(java.io.InputStream input) |
| throws java.io.IOException { |
| return PARSER.parseFrom(input); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom( |
| java.io.InputStream input, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws java.io.IOException { |
| return PARSER.parseFrom(input, extensionRegistry); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseDelimitedFrom(java.io.InputStream input) |
| throws java.io.IOException { |
| return PARSER.parseDelimitedFrom(input); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseDelimitedFrom( |
| java.io.InputStream input, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws java.io.IOException { |
| return PARSER.parseDelimitedFrom(input, extensionRegistry); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom( |
| com.google.protobuf.CodedInputStream input) |
| throws java.io.IOException { |
| return PARSER.parseFrom(input); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parseFrom( |
| com.google.protobuf.CodedInputStream input, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws java.io.IOException { |
| return PARSER.parseFrom(input, extensionRegistry); |
| } |
| |
| public static Builder newBuilder() { return Builder.create(); } |
| public Builder newBuilderForType() { return newBuilder(); } |
| public static Builder newBuilder(org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto prototype) { |
| return newBuilder().mergeFrom(prototype); |
| } |
| public Builder toBuilder() { return newBuilder(this); } |
| |
| @java.lang.Override |
| protected Builder newBuilderForType( |
| com.google.protobuf.GeneratedMessage.BuilderParent parent) { |
| Builder builder = new Builder(parent); |
| return builder; |
| } |
| /** |
| * Protobuf type {@code UpdateQueryRequestProto} |
| */ |
| public static final class Builder extends |
| com.google.protobuf.GeneratedMessage.Builder<Builder> |
| implements org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProtoOrBuilder { |
| public static final com.google.protobuf.Descriptors.Descriptor |
| getDescriptor() { |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryRequestProto_descriptor; |
| } |
| |
| protected com.google.protobuf.GeneratedMessage.FieldAccessorTable |
| internalGetFieldAccessorTable() { |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryRequestProto_fieldAccessorTable |
| .ensureFieldAccessorsInitialized( |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto.class, org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto.Builder.class); |
| } |
| |
| // Construct using org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto.newBuilder() |
| private Builder() { |
| maybeForceBuilderInitialization(); |
| } |
| |
| private Builder( |
| com.google.protobuf.GeneratedMessage.BuilderParent parent) { |
| super(parent); |
| maybeForceBuilderInitialization(); |
| } |
| private void maybeForceBuilderInitialization() { |
| if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { |
| } |
| } |
| private static Builder create() { |
| return new Builder(); |
| } |
| |
| public Builder clear() { |
| super.clear(); |
| guaranteedTaskCount_ = 0; |
| bitField0_ = (bitField0_ & ~0x00000001); |
| return this; |
| } |
| |
| public Builder clone() { |
| return create().mergeFrom(buildPartial()); |
| } |
| |
| public com.google.protobuf.Descriptors.Descriptor |
| getDescriptorForType() { |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryRequestProto_descriptor; |
| } |
| |
| public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto getDefaultInstanceForType() { |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto.getDefaultInstance(); |
| } |
| |
| public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto build() { |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto result = buildPartial(); |
| if (!result.isInitialized()) { |
| throw newUninitializedMessageException(result); |
| } |
| return result; |
| } |
| |
| public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto buildPartial() { |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto result = new org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto(this); |
| int from_bitField0_ = bitField0_; |
| int to_bitField0_ = 0; |
| if (((from_bitField0_ & 0x00000001) == 0x00000001)) { |
| to_bitField0_ |= 0x00000001; |
| } |
| result.guaranteedTaskCount_ = guaranteedTaskCount_; |
| result.bitField0_ = to_bitField0_; |
| onBuilt(); |
| return result; |
| } |
| |
| public Builder mergeFrom(com.google.protobuf.Message other) { |
| if (other instanceof org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto) { |
| return mergeFrom((org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto)other); |
| } else { |
| super.mergeFrom(other); |
| return this; |
| } |
| } |
| |
| public Builder mergeFrom(org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto other) { |
| if (other == org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto.getDefaultInstance()) return this; |
| if (other.hasGuaranteedTaskCount()) { |
| setGuaranteedTaskCount(other.getGuaranteedTaskCount()); |
| } |
| this.mergeUnknownFields(other.getUnknownFields()); |
| return this; |
| } |
| |
| public final boolean isInitialized() { |
| return true; |
| } |
| |
| public Builder mergeFrom( |
| com.google.protobuf.CodedInputStream input, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws java.io.IOException { |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto parsedMessage = null; |
| try { |
| parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); |
| } catch (com.google.protobuf.InvalidProtocolBufferException e) { |
| parsedMessage = (org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto) e.getUnfinishedMessage(); |
| throw e; |
| } finally { |
| if (parsedMessage != null) { |
| mergeFrom(parsedMessage); |
| } |
| } |
| return this; |
| } |
| private int bitField0_; |
| |
| // optional int32 guaranteed_task_count = 1; |
| private int guaranteedTaskCount_ ; |
| /** |
| * <code>optional int32 guaranteed_task_count = 1;</code> |
| */ |
| public boolean hasGuaranteedTaskCount() { |
| return ((bitField0_ & 0x00000001) == 0x00000001); |
| } |
| /** |
| * <code>optional int32 guaranteed_task_count = 1;</code> |
| */ |
| public int getGuaranteedTaskCount() { |
| return guaranteedTaskCount_; |
| } |
| /** |
| * <code>optional int32 guaranteed_task_count = 1;</code> |
| */ |
| public Builder setGuaranteedTaskCount(int value) { |
| bitField0_ |= 0x00000001; |
| guaranteedTaskCount_ = value; |
| onChanged(); |
| return this; |
| } |
| /** |
| * <code>optional int32 guaranteed_task_count = 1;</code> |
| */ |
| public Builder clearGuaranteedTaskCount() { |
| bitField0_ = (bitField0_ & ~0x00000001); |
| guaranteedTaskCount_ = 0; |
| onChanged(); |
| return this; |
| } |
| |
| // @@protoc_insertion_point(builder_scope:UpdateQueryRequestProto) |
| } |
| |
| static { |
| defaultInstance = new UpdateQueryRequestProto(true); |
| defaultInstance.initFields(); |
| } |
| |
| // @@protoc_insertion_point(class_scope:UpdateQueryRequestProto) |
| } |
| |
| public interface UpdateQueryResponseProtoOrBuilder |
| extends com.google.protobuf.MessageOrBuilder { |
| } |
| /** |
| * Protobuf type {@code UpdateQueryResponseProto} |
| */ |
| public static final class UpdateQueryResponseProto extends |
| com.google.protobuf.GeneratedMessage |
| implements UpdateQueryResponseProtoOrBuilder { |
| // Use UpdateQueryResponseProto.newBuilder() to construct. |
| private UpdateQueryResponseProto(com.google.protobuf.GeneratedMessage.Builder<?> builder) { |
| super(builder); |
| this.unknownFields = builder.getUnknownFields(); |
| } |
| private UpdateQueryResponseProto(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); } |
| |
| private static final UpdateQueryResponseProto defaultInstance; |
| public static UpdateQueryResponseProto getDefaultInstance() { |
| return defaultInstance; |
| } |
| |
| public UpdateQueryResponseProto getDefaultInstanceForType() { |
| return defaultInstance; |
| } |
| |
| private final com.google.protobuf.UnknownFieldSet unknownFields; |
| @java.lang.Override |
| public final com.google.protobuf.UnknownFieldSet |
| getUnknownFields() { |
| return this.unknownFields; |
| } |
| private UpdateQueryResponseProto( |
| com.google.protobuf.CodedInputStream input, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws com.google.protobuf.InvalidProtocolBufferException { |
| initFields(); |
| com.google.protobuf.UnknownFieldSet.Builder unknownFields = |
| com.google.protobuf.UnknownFieldSet.newBuilder(); |
| try { |
| boolean done = false; |
| while (!done) { |
| int tag = input.readTag(); |
| switch (tag) { |
| case 0: |
| done = true; |
| break; |
| default: { |
| if (!parseUnknownField(input, unknownFields, |
| extensionRegistry, tag)) { |
| done = true; |
| } |
| break; |
| } |
| } |
| } |
| } catch (com.google.protobuf.InvalidProtocolBufferException e) { |
| throw e.setUnfinishedMessage(this); |
| } catch (java.io.IOException e) { |
| throw new com.google.protobuf.InvalidProtocolBufferException( |
| e.getMessage()).setUnfinishedMessage(this); |
| } finally { |
| this.unknownFields = unknownFields.build(); |
| makeExtensionsImmutable(); |
| } |
| } |
| public static final com.google.protobuf.Descriptors.Descriptor |
| getDescriptor() { |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryResponseProto_descriptor; |
| } |
| |
| protected com.google.protobuf.GeneratedMessage.FieldAccessorTable |
| internalGetFieldAccessorTable() { |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryResponseProto_fieldAccessorTable |
| .ensureFieldAccessorsInitialized( |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.class, org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.Builder.class); |
| } |
| |
| public static com.google.protobuf.Parser<UpdateQueryResponseProto> PARSER = |
| new com.google.protobuf.AbstractParser<UpdateQueryResponseProto>() { |
| public UpdateQueryResponseProto parsePartialFrom( |
| com.google.protobuf.CodedInputStream input, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws com.google.protobuf.InvalidProtocolBufferException { |
| return new UpdateQueryResponseProto(input, extensionRegistry); |
| } |
| }; |
| |
| @java.lang.Override |
| public com.google.protobuf.Parser<UpdateQueryResponseProto> getParserForType() { |
| return PARSER; |
| } |
| |
| private void initFields() { |
| } |
| private byte memoizedIsInitialized = -1; |
| public final boolean isInitialized() { |
| byte isInitialized = memoizedIsInitialized; |
| if (isInitialized != -1) return isInitialized == 1; |
| |
| memoizedIsInitialized = 1; |
| return true; |
| } |
| |
| public void writeTo(com.google.protobuf.CodedOutputStream output) |
| throws java.io.IOException { |
| getSerializedSize(); |
| getUnknownFields().writeTo(output); |
| } |
| |
| private int memoizedSerializedSize = -1; |
| public int getSerializedSize() { |
| int size = memoizedSerializedSize; |
| if (size != -1) return size; |
| |
| size = 0; |
| size += getUnknownFields().getSerializedSize(); |
| memoizedSerializedSize = size; |
| return size; |
| } |
| |
| private static final long serialVersionUID = 0L; |
| @java.lang.Override |
| protected java.lang.Object writeReplace() |
| throws java.io.ObjectStreamException { |
| return super.writeReplace(); |
| } |
| |
| @java.lang.Override |
| public boolean equals(final java.lang.Object obj) { |
| if (obj == this) { |
| return true; |
| } |
| if (!(obj instanceof org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto)) { |
| return super.equals(obj); |
| } |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto other = (org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto) obj; |
| |
| boolean result = true; |
| result = result && |
| getUnknownFields().equals(other.getUnknownFields()); |
| return result; |
| } |
| |
| private int memoizedHashCode = 0; |
| @java.lang.Override |
| public int hashCode() { |
| if (memoizedHashCode != 0) { |
| return memoizedHashCode; |
| } |
| int hash = 41; |
| hash = (19 * hash) + getDescriptorForType().hashCode(); |
| hash = (29 * hash) + getUnknownFields().hashCode(); |
| memoizedHashCode = hash; |
| return hash; |
| } |
| |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom( |
| com.google.protobuf.ByteString data) |
| throws com.google.protobuf.InvalidProtocolBufferException { |
| return PARSER.parseFrom(data); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom( |
| com.google.protobuf.ByteString data, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws com.google.protobuf.InvalidProtocolBufferException { |
| return PARSER.parseFrom(data, extensionRegistry); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom(byte[] data) |
| throws com.google.protobuf.InvalidProtocolBufferException { |
| return PARSER.parseFrom(data); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom( |
| byte[] data, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws com.google.protobuf.InvalidProtocolBufferException { |
| return PARSER.parseFrom(data, extensionRegistry); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom(java.io.InputStream input) |
| throws java.io.IOException { |
| return PARSER.parseFrom(input); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom( |
| java.io.InputStream input, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws java.io.IOException { |
| return PARSER.parseFrom(input, extensionRegistry); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseDelimitedFrom(java.io.InputStream input) |
| throws java.io.IOException { |
| return PARSER.parseDelimitedFrom(input); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseDelimitedFrom( |
| java.io.InputStream input, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws java.io.IOException { |
| return PARSER.parseDelimitedFrom(input, extensionRegistry); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom( |
| com.google.protobuf.CodedInputStream input) |
| throws java.io.IOException { |
| return PARSER.parseFrom(input); |
| } |
| public static org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parseFrom( |
| com.google.protobuf.CodedInputStream input, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws java.io.IOException { |
| return PARSER.parseFrom(input, extensionRegistry); |
| } |
| |
| public static Builder newBuilder() { return Builder.create(); } |
| public Builder newBuilderForType() { return newBuilder(); } |
| public static Builder newBuilder(org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto prototype) { |
| return newBuilder().mergeFrom(prototype); |
| } |
| public Builder toBuilder() { return newBuilder(this); } |
| |
| @java.lang.Override |
| protected Builder newBuilderForType( |
| com.google.protobuf.GeneratedMessage.BuilderParent parent) { |
| Builder builder = new Builder(parent); |
| return builder; |
| } |
| /** |
| * Protobuf type {@code UpdateQueryResponseProto} |
| */ |
| public static final class Builder extends |
| com.google.protobuf.GeneratedMessage.Builder<Builder> |
| implements org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProtoOrBuilder { |
| public static final com.google.protobuf.Descriptors.Descriptor |
| getDescriptor() { |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryResponseProto_descriptor; |
| } |
| |
| protected com.google.protobuf.GeneratedMessage.FieldAccessorTable |
| internalGetFieldAccessorTable() { |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryResponseProto_fieldAccessorTable |
| .ensureFieldAccessorsInitialized( |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.class, org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.Builder.class); |
| } |
| |
| // Construct using org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.newBuilder() |
| private Builder() { |
| maybeForceBuilderInitialization(); |
| } |
| |
| private Builder( |
| com.google.protobuf.GeneratedMessage.BuilderParent parent) { |
| super(parent); |
| maybeForceBuilderInitialization(); |
| } |
| private void maybeForceBuilderInitialization() { |
| if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { |
| } |
| } |
| private static Builder create() { |
| return new Builder(); |
| } |
| |
| public Builder clear() { |
| super.clear(); |
| return this; |
| } |
| |
| public Builder clone() { |
| return create().mergeFrom(buildPartial()); |
| } |
| |
| public com.google.protobuf.Descriptors.Descriptor |
| getDescriptorForType() { |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.internal_static_UpdateQueryResponseProto_descriptor; |
| } |
| |
| public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto getDefaultInstanceForType() { |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.getDefaultInstance(); |
| } |
| |
| public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto build() { |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto result = buildPartial(); |
| if (!result.isInitialized()) { |
| throw newUninitializedMessageException(result); |
| } |
| return result; |
| } |
| |
| public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto buildPartial() { |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto result = new org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto(this); |
| onBuilt(); |
| return result; |
| } |
| |
| public Builder mergeFrom(com.google.protobuf.Message other) { |
| if (other instanceof org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto) { |
| return mergeFrom((org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto)other); |
| } else { |
| super.mergeFrom(other); |
| return this; |
| } |
| } |
| |
| public Builder mergeFrom(org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto other) { |
| if (other == org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.getDefaultInstance()) return this; |
| this.mergeUnknownFields(other.getUnknownFields()); |
| return this; |
| } |
| |
| public final boolean isInitialized() { |
| return true; |
| } |
| |
| public Builder mergeFrom( |
| com.google.protobuf.CodedInputStream input, |
| com.google.protobuf.ExtensionRegistryLite extensionRegistry) |
| throws java.io.IOException { |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto parsedMessage = null; |
| try { |
| parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); |
| } catch (com.google.protobuf.InvalidProtocolBufferException e) { |
| parsedMessage = (org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto) e.getUnfinishedMessage(); |
| throw e; |
| } finally { |
| if (parsedMessage != null) { |
| mergeFrom(parsedMessage); |
| } |
| } |
| return this; |
| } |
| |
| // @@protoc_insertion_point(builder_scope:UpdateQueryResponseProto) |
| } |
| |
| static { |
| defaultInstance = new UpdateQueryResponseProto(true); |
| defaultInstance.initFields(); |
| } |
| |
| // @@protoc_insertion_point(class_scope:UpdateQueryResponseProto) |
| } |
| |
| /** |
| * Protobuf service {@code LlapPluginProtocol} |
| */ |
| public static abstract class LlapPluginProtocol |
| implements com.google.protobuf.Service { |
| protected LlapPluginProtocol() {} |
| |
| public interface Interface { |
| /** |
| * <code>rpc updateQuery(.UpdateQueryRequestProto) returns (.UpdateQueryResponseProto);</code> |
| */ |
| public abstract void updateQuery( |
| com.google.protobuf.RpcController controller, |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto request, |
| com.google.protobuf.RpcCallback<org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto> done); |
| |
| } |
| |
| public static com.google.protobuf.Service newReflectiveService( |
| final Interface impl) { |
| return new LlapPluginProtocol() { |
| @java.lang.Override |
| public void updateQuery( |
| com.google.protobuf.RpcController controller, |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto request, |
| com.google.protobuf.RpcCallback<org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto> done) { |
| impl.updateQuery(controller, request, done); |
| } |
| |
| }; |
| } |
| |
| public static com.google.protobuf.BlockingService |
| newReflectiveBlockingService(final BlockingInterface impl) { |
| return new com.google.protobuf.BlockingService() { |
| public final com.google.protobuf.Descriptors.ServiceDescriptor |
| getDescriptorForType() { |
| return getDescriptor(); |
| } |
| |
| public final com.google.protobuf.Message callBlockingMethod( |
| com.google.protobuf.Descriptors.MethodDescriptor method, |
| com.google.protobuf.RpcController controller, |
| com.google.protobuf.Message request) |
| throws com.google.protobuf.ServiceException { |
| if (method.getService() != getDescriptor()) { |
| throw new java.lang.IllegalArgumentException( |
| "Service.callBlockingMethod() given method descriptor for " + |
| "wrong service type."); |
| } |
| switch(method.getIndex()) { |
| case 0: |
| return impl.updateQuery(controller, (org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto)request); |
| default: |
| throw new java.lang.AssertionError("Can't get here."); |
| } |
| } |
| |
| public final com.google.protobuf.Message |
| getRequestPrototype( |
| com.google.protobuf.Descriptors.MethodDescriptor method) { |
| if (method.getService() != getDescriptor()) { |
| throw new java.lang.IllegalArgumentException( |
| "Service.getRequestPrototype() given method " + |
| "descriptor for wrong service type."); |
| } |
| switch(method.getIndex()) { |
| case 0: |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto.getDefaultInstance(); |
| default: |
| throw new java.lang.AssertionError("Can't get here."); |
| } |
| } |
| |
| public final com.google.protobuf.Message |
| getResponsePrototype( |
| com.google.protobuf.Descriptors.MethodDescriptor method) { |
| if (method.getService() != getDescriptor()) { |
| throw new java.lang.IllegalArgumentException( |
| "Service.getResponsePrototype() given method " + |
| "descriptor for wrong service type."); |
| } |
| switch(method.getIndex()) { |
| case 0: |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.getDefaultInstance(); |
| default: |
| throw new java.lang.AssertionError("Can't get here."); |
| } |
| } |
| |
| }; |
| } |
| |
| /** |
| * <code>rpc updateQuery(.UpdateQueryRequestProto) returns (.UpdateQueryResponseProto);</code> |
| */ |
| public abstract void updateQuery( |
| com.google.protobuf.RpcController controller, |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto request, |
| com.google.protobuf.RpcCallback<org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto> done); |
| |
| public static final |
| com.google.protobuf.Descriptors.ServiceDescriptor |
| getDescriptor() { |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.getDescriptor().getServices().get(0); |
| } |
| public final com.google.protobuf.Descriptors.ServiceDescriptor |
| getDescriptorForType() { |
| return getDescriptor(); |
| } |
| |
| public final void callMethod( |
| com.google.protobuf.Descriptors.MethodDescriptor method, |
| com.google.protobuf.RpcController controller, |
| com.google.protobuf.Message request, |
| com.google.protobuf.RpcCallback< |
| com.google.protobuf.Message> done) { |
| if (method.getService() != getDescriptor()) { |
| throw new java.lang.IllegalArgumentException( |
| "Service.callMethod() given method descriptor for wrong " + |
| "service type."); |
| } |
| switch(method.getIndex()) { |
| case 0: |
| this.updateQuery(controller, (org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto)request, |
| com.google.protobuf.RpcUtil.<org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto>specializeCallback( |
| done)); |
| return; |
| default: |
| throw new java.lang.AssertionError("Can't get here."); |
| } |
| } |
| |
| public final com.google.protobuf.Message |
| getRequestPrototype( |
| com.google.protobuf.Descriptors.MethodDescriptor method) { |
| if (method.getService() != getDescriptor()) { |
| throw new java.lang.IllegalArgumentException( |
| "Service.getRequestPrototype() given method " + |
| "descriptor for wrong service type."); |
| } |
| switch(method.getIndex()) { |
| case 0: |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto.getDefaultInstance(); |
| default: |
| throw new java.lang.AssertionError("Can't get here."); |
| } |
| } |
| |
| public final com.google.protobuf.Message |
| getResponsePrototype( |
| com.google.protobuf.Descriptors.MethodDescriptor method) { |
| if (method.getService() != getDescriptor()) { |
| throw new java.lang.IllegalArgumentException( |
| "Service.getResponsePrototype() given method " + |
| "descriptor for wrong service type."); |
| } |
| switch(method.getIndex()) { |
| case 0: |
| return org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.getDefaultInstance(); |
| default: |
| throw new java.lang.AssertionError("Can't get here."); |
| } |
| } |
| |
| public static Stub newStub( |
| com.google.protobuf.RpcChannel channel) { |
| return new Stub(channel); |
| } |
| |
| public static final class Stub extends org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.LlapPluginProtocol implements Interface { |
| private Stub(com.google.protobuf.RpcChannel channel) { |
| this.channel = channel; |
| } |
| |
| private final com.google.protobuf.RpcChannel channel; |
| |
| public com.google.protobuf.RpcChannel getChannel() { |
| return channel; |
| } |
| |
| public void updateQuery( |
| com.google.protobuf.RpcController controller, |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto request, |
| com.google.protobuf.RpcCallback<org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto> done) { |
| channel.callMethod( |
| getDescriptor().getMethods().get(0), |
| controller, |
| request, |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.getDefaultInstance(), |
| com.google.protobuf.RpcUtil.generalizeCallback( |
| done, |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.class, |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.getDefaultInstance())); |
| } |
| } |
| |
| public static BlockingInterface newBlockingStub( |
| com.google.protobuf.BlockingRpcChannel channel) { |
| return new BlockingStub(channel); |
| } |
| |
| public interface BlockingInterface { |
| public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto updateQuery( |
| com.google.protobuf.RpcController controller, |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto request) |
| throws com.google.protobuf.ServiceException; |
| } |
| |
| private static final class BlockingStub implements BlockingInterface { |
| private BlockingStub(com.google.protobuf.BlockingRpcChannel channel) { |
| this.channel = channel; |
| } |
| |
| private final com.google.protobuf.BlockingRpcChannel channel; |
| |
| public org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto updateQuery( |
| com.google.protobuf.RpcController controller, |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryRequestProto request) |
| throws com.google.protobuf.ServiceException { |
| return (org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto) channel.callBlockingMethod( |
| getDescriptor().getMethods().get(0), |
| controller, |
| request, |
| org.apache.hadoop.hive.llap.plugin.rpc.LlapPluginProtocolProtos.UpdateQueryResponseProto.getDefaultInstance()); |
| } |
| |
| } |
| |
| // @@protoc_insertion_point(class_scope:LlapPluginProtocol) |
| } |
| |
| private static com.google.protobuf.Descriptors.Descriptor |
| internal_static_UpdateQueryRequestProto_descriptor; |
| private static |
| com.google.protobuf.GeneratedMessage.FieldAccessorTable |
| internal_static_UpdateQueryRequestProto_fieldAccessorTable; |
| private static com.google.protobuf.Descriptors.Descriptor |
| internal_static_UpdateQueryResponseProto_descriptor; |
| private static |
| com.google.protobuf.GeneratedMessage.FieldAccessorTable |
| internal_static_UpdateQueryResponseProto_fieldAccessorTable; |
| |
| public static com.google.protobuf.Descriptors.FileDescriptor |
| getDescriptor() { |
| return descriptor; |
| } |
| private static com.google.protobuf.Descriptors.FileDescriptor |
| descriptor; |
| static { |
| java.lang.String[] descriptorData = { |
| "\n\030LlapPluginProtocol.proto\"8\n\027UpdateQuer" + |
| "yRequestProto\022\035\n\025guaranteed_task_count\030\001" + |
| " \001(\005\"\032\n\030UpdateQueryResponseProto2X\n\022Llap" + |
| "PluginProtocol\022B\n\013updateQuery\022\030.UpdateQu" + |
| "eryRequestProto\032\031.UpdateQueryResponsePro" + |
| "toBH\n&org.apache.hadoop.hive.llap.plugin" + |
| ".rpcB\030LlapPluginProtocolProtos\210\001\001\240\001\001" |
| }; |
| com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = |
| new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { |
| public com.google.protobuf.ExtensionRegistry assignDescriptors( |
| com.google.protobuf.Descriptors.FileDescriptor root) { |
| descriptor = root; |
| internal_static_UpdateQueryRequestProto_descriptor = |
| getDescriptor().getMessageTypes().get(0); |
| internal_static_UpdateQueryRequestProto_fieldAccessorTable = new |
| com.google.protobuf.GeneratedMessage.FieldAccessorTable( |
| internal_static_UpdateQueryRequestProto_descriptor, |
| new java.lang.String[] { "GuaranteedTaskCount", }); |
| internal_static_UpdateQueryResponseProto_descriptor = |
| getDescriptor().getMessageTypes().get(1); |
| internal_static_UpdateQueryResponseProto_fieldAccessorTable = new |
| com.google.protobuf.GeneratedMessage.FieldAccessorTable( |
| internal_static_UpdateQueryResponseProto_descriptor, |
| new java.lang.String[] { }); |
| return null; |
| } |
| }; |
| com.google.protobuf.Descriptors.FileDescriptor |
| .internalBuildGeneratedFileFrom(descriptorData, |
| new com.google.protobuf.Descriptors.FileDescriptor[] { |
| }, assigner); |
| } |
| |
| // @@protoc_insertion_point(outer_class_scope) |
| } |