blob: 817694739e470173e70332a496ad74d5f1606e65 [file] [log] [blame]
// Generated by the protocol buffer compiler. DO NOT EDIT!
// source: ClusterStatus.proto
package org.apache.hadoop.hbase.protobuf.generated;
public final class ClusterStatusProtos {
private ClusterStatusProtos() {}
public static void registerAllExtensions(
com.google.protobuf.ExtensionRegistry registry) {
}
public interface RegionStateOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .RegionInfo region_info = 1;
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
boolean hasRegionInfo();
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo();
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder();
// required .RegionState.State state = 2;
/**
* <code>required .RegionState.State state = 2;</code>
*/
boolean hasState();
/**
* <code>required .RegionState.State state = 2;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState();
// optional uint64 stamp = 3;
/**
* <code>optional uint64 stamp = 3;</code>
*/
boolean hasStamp();
/**
* <code>optional uint64 stamp = 3;</code>
*/
long getStamp();
}
/**
* Protobuf type {@code RegionState}
*/
public static final class RegionState extends
com.google.protobuf.GeneratedMessage
implements RegionStateOrBuilder {
// Use RegionState.newBuilder() to construct.
private RegionState(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RegionState(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RegionState defaultInstance;
public static RegionState getDefaultInstance() {
return defaultInstance;
}
public RegionState getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RegionState(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = regionInfo_.toBuilder();
}
regionInfo_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(regionInfo_);
regionInfo_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
int rawValue = input.readEnum();
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State value = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.valueOf(rawValue);
if (value == null) {
unknownFields.mergeVarintField(2, rawValue);
} else {
bitField0_ |= 0x00000002;
state_ = value;
}
break;
}
case 24: {
bitField0_ |= 0x00000004;
stamp_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionState_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionState_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder.class);
}
public static com.google.protobuf.Parser<RegionState> PARSER =
new com.google.protobuf.AbstractParser<RegionState>() {
public RegionState parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new RegionState(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<RegionState> getParserForType() {
return PARSER;
}
/**
* Protobuf enum {@code RegionState.State}
*/
public enum State
implements com.google.protobuf.ProtocolMessageEnum {
/**
* <code>OFFLINE = 0;</code>
*
* <pre>
* region is in an offline state
* </pre>
*/
OFFLINE(0, 0),
/**
* <code>PENDING_OPEN = 1;</code>
*
* <pre>
* sent rpc to server to open but has not begun
* </pre>
*/
PENDING_OPEN(1, 1),
/**
* <code>OPENING = 2;</code>
*
* <pre>
* server has begun to open but not yet done
* </pre>
*/
OPENING(2, 2),
/**
* <code>OPEN = 3;</code>
*
* <pre>
* server opened region and updated meta
* </pre>
*/
OPEN(3, 3),
/**
* <code>PENDING_CLOSE = 4;</code>
*
* <pre>
* sent rpc to server to close but has not begun
* </pre>
*/
PENDING_CLOSE(4, 4),
/**
* <code>CLOSING = 5;</code>
*
* <pre>
* server has begun to close but not yet done
* </pre>
*/
CLOSING(5, 5),
/**
* <code>CLOSED = 6;</code>
*
* <pre>
* server closed region and updated meta
* </pre>
*/
CLOSED(6, 6),
/**
* <code>SPLITTING = 7;</code>
*
* <pre>
* server started split of a region
* </pre>
*/
SPLITTING(7, 7),
/**
* <code>SPLIT = 8;</code>
*
* <pre>
* server completed split of a region
* </pre>
*/
SPLIT(8, 8),
/**
* <code>FAILED_OPEN = 9;</code>
*
* <pre>
* failed to open, and won't retry any more
* </pre>
*/
FAILED_OPEN(9, 9),
/**
* <code>FAILED_CLOSE = 10;</code>
*
* <pre>
* failed to close, and won't retry any more
* </pre>
*/
FAILED_CLOSE(10, 10),
/**
* <code>MERGING = 11;</code>
*
* <pre>
* server started merge a region
* </pre>
*/
MERGING(11, 11),
/**
* <code>MERGED = 12;</code>
*
* <pre>
* server completed merge of a region
* </pre>
*/
MERGED(12, 12),
/**
* <code>SPLITTING_NEW = 13;</code>
*
* <pre>
* new region to be created when RS splits a parent
* </pre>
*/
SPLITTING_NEW(13, 13),
/**
* <code>MERGING_NEW = 14;</code>
*
* <pre>
* region but hasn't be created yet, or master doesn't
* know it's already created
* </pre>
*/
MERGING_NEW(14, 14),
;
/**
* <code>OFFLINE = 0;</code>
*
* <pre>
* region is in an offline state
* </pre>
*/
public static final int OFFLINE_VALUE = 0;
/**
* <code>PENDING_OPEN = 1;</code>
*
* <pre>
* sent rpc to server to open but has not begun
* </pre>
*/
public static final int PENDING_OPEN_VALUE = 1;
/**
* <code>OPENING = 2;</code>
*
* <pre>
* server has begun to open but not yet done
* </pre>
*/
public static final int OPENING_VALUE = 2;
/**
* <code>OPEN = 3;</code>
*
* <pre>
* server opened region and updated meta
* </pre>
*/
public static final int OPEN_VALUE = 3;
/**
* <code>PENDING_CLOSE = 4;</code>
*
* <pre>
* sent rpc to server to close but has not begun
* </pre>
*/
public static final int PENDING_CLOSE_VALUE = 4;
/**
* <code>CLOSING = 5;</code>
*
* <pre>
* server has begun to close but not yet done
* </pre>
*/
public static final int CLOSING_VALUE = 5;
/**
* <code>CLOSED = 6;</code>
*
* <pre>
* server closed region and updated meta
* </pre>
*/
public static final int CLOSED_VALUE = 6;
/**
* <code>SPLITTING = 7;</code>
*
* <pre>
* server started split of a region
* </pre>
*/
public static final int SPLITTING_VALUE = 7;
/**
* <code>SPLIT = 8;</code>
*
* <pre>
* server completed split of a region
* </pre>
*/
public static final int SPLIT_VALUE = 8;
/**
* <code>FAILED_OPEN = 9;</code>
*
* <pre>
* failed to open, and won't retry any more
* </pre>
*/
public static final int FAILED_OPEN_VALUE = 9;
/**
* <code>FAILED_CLOSE = 10;</code>
*
* <pre>
* failed to close, and won't retry any more
* </pre>
*/
public static final int FAILED_CLOSE_VALUE = 10;
/**
* <code>MERGING = 11;</code>
*
* <pre>
* server started merge a region
* </pre>
*/
public static final int MERGING_VALUE = 11;
/**
* <code>MERGED = 12;</code>
*
* <pre>
* server completed merge of a region
* </pre>
*/
public static final int MERGED_VALUE = 12;
/**
* <code>SPLITTING_NEW = 13;</code>
*
* <pre>
* new region to be created when RS splits a parent
* </pre>
*/
public static final int SPLITTING_NEW_VALUE = 13;
/**
* <code>MERGING_NEW = 14;</code>
*
* <pre>
* region but hasn't be created yet, or master doesn't
* know it's already created
* </pre>
*/
public static final int MERGING_NEW_VALUE = 14;
public final int getNumber() { return value; }
public static State valueOf(int value) {
switch (value) {
case 0: return OFFLINE;
case 1: return PENDING_OPEN;
case 2: return OPENING;
case 3: return OPEN;
case 4: return PENDING_CLOSE;
case 5: return CLOSING;
case 6: return CLOSED;
case 7: return SPLITTING;
case 8: return SPLIT;
case 9: return FAILED_OPEN;
case 10: return FAILED_CLOSE;
case 11: return MERGING;
case 12: return MERGED;
case 13: return SPLITTING_NEW;
case 14: return MERGING_NEW;
default: return null;
}
}
public static com.google.protobuf.Internal.EnumLiteMap<State>
internalGetValueMap() {
return internalValueMap;
}
private static com.google.protobuf.Internal.EnumLiteMap<State>
internalValueMap =
new com.google.protobuf.Internal.EnumLiteMap<State>() {
public State findValueByNumber(int number) {
return State.valueOf(number);
}
};
public final com.google.protobuf.Descriptors.EnumValueDescriptor
getValueDescriptor() {
return getDescriptor().getValues().get(index);
}
public final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptorForType() {
return getDescriptor();
}
public static final com.google.protobuf.Descriptors.EnumDescriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDescriptor().getEnumTypes().get(0);
}
private static final State[] VALUES = values();
public static State valueOf(
com.google.protobuf.Descriptors.EnumValueDescriptor desc) {
if (desc.getType() != getDescriptor()) {
throw new java.lang.IllegalArgumentException(
"EnumValueDescriptor is not for this type.");
}
return VALUES[desc.getIndex()];
}
private final int index;
private final int value;
private State(int index, int value) {
this.index = index;
this.value = value;
}
// @@protoc_insertion_point(enum_scope:RegionState.State)
}
private int bitField0_;
// required .RegionInfo region_info = 1;
public static final int REGION_INFO_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_;
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
public boolean hasRegionInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
return regionInfo_;
}
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
return regionInfo_;
}
// required .RegionState.State state = 2;
public static final int STATE_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State state_;
/**
* <code>required .RegionState.State state = 2;</code>
*/
public boolean hasState() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required .RegionState.State state = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState() {
return state_;
}
// optional uint64 stamp = 3;
public static final int STAMP_FIELD_NUMBER = 3;
private long stamp_;
/**
* <code>optional uint64 stamp = 3;</code>
*/
public boolean hasStamp() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional uint64 stamp = 3;</code>
*/
public long getStamp() {
return stamp_;
}
private void initFields() {
regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
stamp_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegionInfo()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasState()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegionInfo().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, regionInfo_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeEnum(2, state_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt64(3, stamp_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, regionInfo_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeEnumSize(2, state_.getNumber());
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(3, stamp_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState) obj;
boolean result = true;
result = result && (hasRegionInfo() == other.hasRegionInfo());
if (hasRegionInfo()) {
result = result && getRegionInfo()
.equals(other.getRegionInfo());
}
result = result && (hasState() == other.hasState());
if (hasState()) {
result = result &&
(getState() == other.getState());
}
result = result && (hasStamp() == other.hasStamp());
if (hasStamp()) {
result = result && (getStamp()
== other.getStamp());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegionInfo()) {
hash = (37 * hash) + REGION_INFO_FIELD_NUMBER;
hash = (53 * hash) + getRegionInfo().hashCode();
}
if (hasState()) {
hash = (37 * hash) + STATE_FIELD_NUMBER;
hash = (53 * hash) + hashEnum(getState());
}
if (hasStamp()) {
hash = (37 * hash) + STAMP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getStamp());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code RegionState}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionState_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionState_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegionInfoFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (regionInfoBuilder_ == null) {
regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
} else {
regionInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
bitField0_ = (bitField0_ & ~0x00000002);
stamp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionState_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState build() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (regionInfoBuilder_ == null) {
result.regionInfo_ = regionInfo_;
} else {
result.regionInfo_ = regionInfoBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.state_ = state_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.stamp_ = stamp_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance()) return this;
if (other.hasRegionInfo()) {
mergeRegionInfo(other.getRegionInfo());
}
if (other.hasState()) {
setState(other.getState());
}
if (other.hasStamp()) {
setStamp(other.getStamp());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegionInfo()) {
return false;
}
if (!hasState()) {
return false;
}
if (!getRegionInfo().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .RegionInfo region_info = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder> regionInfoBuilder_;
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
public boolean hasRegionInfo() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo getRegionInfo() {
if (regionInfoBuilder_ == null) {
return regionInfo_;
} else {
return regionInfoBuilder_.getMessage();
}
}
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
public Builder setRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
if (regionInfoBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
regionInfo_ = value;
onChanged();
} else {
regionInfoBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
public Builder setRegionInfo(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder builderForValue) {
if (regionInfoBuilder_ == null) {
regionInfo_ = builderForValue.build();
onChanged();
} else {
regionInfoBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
public Builder mergeRegionInfo(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo value) {
if (regionInfoBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
regionInfo_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance()) {
regionInfo_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.newBuilder(regionInfo_).mergeFrom(value).buildPartial();
} else {
regionInfo_ = value;
}
onChanged();
} else {
regionInfoBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
public Builder clearRegionInfo() {
if (regionInfoBuilder_ == null) {
regionInfo_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.getDefaultInstance();
onChanged();
} else {
regionInfoBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder getRegionInfoBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegionInfoFieldBuilder().getBuilder();
}
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder getRegionInfoOrBuilder() {
if (regionInfoBuilder_ != null) {
return regionInfoBuilder_.getMessageOrBuilder();
} else {
return regionInfo_;
}
}
/**
* <code>required .RegionInfo region_info = 1;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>
getRegionInfoFieldBuilder() {
if (regionInfoBuilder_ == null) {
regionInfoBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionInfoOrBuilder>(
regionInfo_,
getParentForChildren(),
isClean());
regionInfo_ = null;
}
return regionInfoBuilder_;
}
// required .RegionState.State state = 2;
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
/**
* <code>required .RegionState.State state = 2;</code>
*/
public boolean hasState() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required .RegionState.State state = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State getState() {
return state_;
}
/**
* <code>required .RegionState.State state = 2;</code>
*/
public Builder setState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000002;
state_ = value;
onChanged();
return this;
}
/**
* <code>required .RegionState.State state = 2;</code>
*/
public Builder clearState() {
bitField0_ = (bitField0_ & ~0x00000002);
state_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.State.OFFLINE;
onChanged();
return this;
}
// optional uint64 stamp = 3;
private long stamp_ ;
/**
* <code>optional uint64 stamp = 3;</code>
*/
public boolean hasStamp() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional uint64 stamp = 3;</code>
*/
public long getStamp() {
return stamp_;
}
/**
* <code>optional uint64 stamp = 3;</code>
*/
public Builder setStamp(long value) {
bitField0_ |= 0x00000004;
stamp_ = value;
onChanged();
return this;
}
/**
* <code>optional uint64 stamp = 3;</code>
*/
public Builder clearStamp() {
bitField0_ = (bitField0_ & ~0x00000004);
stamp_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:RegionState)
}
static {
defaultInstance = new RegionState(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:RegionState)
}
public interface RegionInTransitionOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .RegionSpecifier spec = 1;
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
boolean hasSpec();
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getSpec();
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getSpecOrBuilder();
// required .RegionState region_state = 2;
/**
* <code>required .RegionState region_state = 2;</code>
*/
boolean hasRegionState();
/**
* <code>required .RegionState region_state = 2;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState getRegionState();
/**
* <code>required .RegionState region_state = 2;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder getRegionStateOrBuilder();
}
/**
* Protobuf type {@code RegionInTransition}
*/
public static final class RegionInTransition extends
com.google.protobuf.GeneratedMessage
implements RegionInTransitionOrBuilder {
// Use RegionInTransition.newBuilder() to construct.
private RegionInTransition(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RegionInTransition(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RegionInTransition defaultInstance;
public static RegionInTransition getDefaultInstance() {
return defaultInstance;
}
public RegionInTransition getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RegionInTransition(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = spec_.toBuilder();
}
spec_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(spec_);
spec_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = regionState_.toBuilder();
}
regionState_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(regionState_);
regionState_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionInTransition_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionInTransition_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder.class);
}
public static com.google.protobuf.Parser<RegionInTransition> PARSER =
new com.google.protobuf.AbstractParser<RegionInTransition>() {
public RegionInTransition parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new RegionInTransition(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<RegionInTransition> getParserForType() {
return PARSER;
}
private int bitField0_;
// required .RegionSpecifier spec = 1;
public static final int SPEC_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier spec_;
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
public boolean hasSpec() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getSpec() {
return spec_;
}
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getSpecOrBuilder() {
return spec_;
}
// required .RegionState region_state = 2;
public static final int REGION_STATE_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState regionState_;
/**
* <code>required .RegionState region_state = 2;</code>
*/
public boolean hasRegionState() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required .RegionState region_state = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState getRegionState() {
return regionState_;
}
/**
* <code>required .RegionState region_state = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder getRegionStateOrBuilder() {
return regionState_;
}
private void initFields() {
spec_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
regionState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasSpec()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasRegionState()) {
memoizedIsInitialized = 0;
return false;
}
if (!getSpec().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegionState().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, spec_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, regionState_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, spec_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, regionState_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition) obj;
boolean result = true;
result = result && (hasSpec() == other.hasSpec());
if (hasSpec()) {
result = result && getSpec()
.equals(other.getSpec());
}
result = result && (hasRegionState() == other.hasRegionState());
if (hasRegionState()) {
result = result && getRegionState()
.equals(other.getRegionState());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasSpec()) {
hash = (37 * hash) + SPEC_FIELD_NUMBER;
hash = (53 * hash) + getSpec().hashCode();
}
if (hasRegionState()) {
hash = (37 * hash) + REGION_STATE_FIELD_NUMBER;
hash = (53 * hash) + getRegionState().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code RegionInTransition}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionInTransition_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionInTransition_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getSpecFieldBuilder();
getRegionStateFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (specBuilder_ == null) {
spec_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
} else {
specBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (regionStateBuilder_ == null) {
regionState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
} else {
regionStateBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionInTransition_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition build() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (specBuilder_ == null) {
result.spec_ = spec_;
} else {
result.spec_ = specBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (regionStateBuilder_ == null) {
result.regionState_ = regionState_;
} else {
result.regionState_ = regionStateBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.getDefaultInstance()) return this;
if (other.hasSpec()) {
mergeSpec(other.getSpec());
}
if (other.hasRegionState()) {
mergeRegionState(other.getRegionState());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasSpec()) {
return false;
}
if (!hasRegionState()) {
return false;
}
if (!getSpec().isInitialized()) {
return false;
}
if (!getRegionState().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .RegionSpecifier spec = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier spec_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> specBuilder_;
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
public boolean hasSpec() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getSpec() {
if (specBuilder_ == null) {
return spec_;
} else {
return specBuilder_.getMessage();
}
}
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
public Builder setSpec(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
if (specBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
spec_ = value;
onChanged();
} else {
specBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
public Builder setSpec(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
if (specBuilder_ == null) {
spec_ = builderForValue.build();
onChanged();
} else {
specBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
public Builder mergeSpec(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
if (specBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
spec_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
spec_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(spec_).mergeFrom(value).buildPartial();
} else {
spec_ = value;
}
onChanged();
} else {
specBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
public Builder clearSpec() {
if (specBuilder_ == null) {
spec_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
onChanged();
} else {
specBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getSpecBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getSpecFieldBuilder().getBuilder();
}
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getSpecOrBuilder() {
if (specBuilder_ != null) {
return specBuilder_.getMessageOrBuilder();
} else {
return spec_;
}
}
/**
* <code>required .RegionSpecifier spec = 1;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
getSpecFieldBuilder() {
if (specBuilder_ == null) {
specBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
spec_,
getParentForChildren(),
isClean());
spec_ = null;
}
return specBuilder_;
}
// required .RegionState region_state = 2;
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState regionState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder> regionStateBuilder_;
/**
* <code>required .RegionState region_state = 2;</code>
*/
public boolean hasRegionState() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required .RegionState region_state = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState getRegionState() {
if (regionStateBuilder_ == null) {
return regionState_;
} else {
return regionStateBuilder_.getMessage();
}
}
/**
* <code>required .RegionState region_state = 2;</code>
*/
public Builder setRegionState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState value) {
if (regionStateBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
regionState_ = value;
onChanged();
} else {
regionStateBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>required .RegionState region_state = 2;</code>
*/
public Builder setRegionState(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder builderForValue) {
if (regionStateBuilder_ == null) {
regionState_ = builderForValue.build();
onChanged();
} else {
regionStateBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>required .RegionState region_state = 2;</code>
*/
public Builder mergeRegionState(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState value) {
if (regionStateBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
regionState_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance()) {
regionState_ =
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.newBuilder(regionState_).mergeFrom(value).buildPartial();
} else {
regionState_ = value;
}
onChanged();
} else {
regionStateBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>required .RegionState region_state = 2;</code>
*/
public Builder clearRegionState() {
if (regionStateBuilder_ == null) {
regionState_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.getDefaultInstance();
onChanged();
} else {
regionStateBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* <code>required .RegionState region_state = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder getRegionStateBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getRegionStateFieldBuilder().getBuilder();
}
/**
* <code>required .RegionState region_state = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder getRegionStateOrBuilder() {
if (regionStateBuilder_ != null) {
return regionStateBuilder_.getMessageOrBuilder();
} else {
return regionState_;
}
}
/**
* <code>required .RegionState region_state = 2;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder>
getRegionStateFieldBuilder() {
if (regionStateBuilder_ == null) {
regionStateBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionState.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStateOrBuilder>(
regionState_,
getParentForChildren(),
isClean());
regionState_ = null;
}
return regionStateBuilder_;
}
// @@protoc_insertion_point(builder_scope:RegionInTransition)
}
static {
defaultInstance = new RegionInTransition(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:RegionInTransition)
}
public interface StoreSequenceIdOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required bytes family_name = 1;
/**
* <code>required bytes family_name = 1;</code>
*/
boolean hasFamilyName();
/**
* <code>required bytes family_name = 1;</code>
*/
com.google.protobuf.ByteString getFamilyName();
// required uint64 sequence_id = 2;
/**
* <code>required uint64 sequence_id = 2;</code>
*/
boolean hasSequenceId();
/**
* <code>required uint64 sequence_id = 2;</code>
*/
long getSequenceId();
}
/**
* Protobuf type {@code StoreSequenceId}
*
* <pre>
**
* sequence Id of a store
* </pre>
*/
public static final class StoreSequenceId extends
com.google.protobuf.GeneratedMessage
implements StoreSequenceIdOrBuilder {
// Use StoreSequenceId.newBuilder() to construct.
private StoreSequenceId(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private StoreSequenceId(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final StoreSequenceId defaultInstance;
public static StoreSequenceId getDefaultInstance() {
return defaultInstance;
}
public StoreSequenceId getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private StoreSequenceId(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
familyName_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
sequenceId_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder.class);
}
public static com.google.protobuf.Parser<StoreSequenceId> PARSER =
new com.google.protobuf.AbstractParser<StoreSequenceId>() {
public StoreSequenceId parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new StoreSequenceId(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<StoreSequenceId> getParserForType() {
return PARSER;
}
private int bitField0_;
// required bytes family_name = 1;
public static final int FAMILY_NAME_FIELD_NUMBER = 1;
private com.google.protobuf.ByteString familyName_;
/**
* <code>required bytes family_name = 1;</code>
*/
public boolean hasFamilyName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required bytes family_name = 1;</code>
*/
public com.google.protobuf.ByteString getFamilyName() {
return familyName_;
}
// required uint64 sequence_id = 2;
public static final int SEQUENCE_ID_FIELD_NUMBER = 2;
private long sequenceId_;
/**
* <code>required uint64 sequence_id = 2;</code>
*/
public boolean hasSequenceId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required uint64 sequence_id = 2;</code>
*/
public long getSequenceId() {
return sequenceId_;
}
private void initFields() {
familyName_ = com.google.protobuf.ByteString.EMPTY;
sequenceId_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasFamilyName()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSequenceId()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, familyName_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, sequenceId_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, familyName_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, sequenceId_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId) obj;
boolean result = true;
result = result && (hasFamilyName() == other.hasFamilyName());
if (hasFamilyName()) {
result = result && getFamilyName()
.equals(other.getFamilyName());
}
result = result && (hasSequenceId() == other.hasSequenceId());
if (hasSequenceId()) {
result = result && (getSequenceId()
== other.getSequenceId());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasFamilyName()) {
hash = (37 * hash) + FAMILY_NAME_FIELD_NUMBER;
hash = (53 * hash) + getFamilyName().hashCode();
}
if (hasSequenceId()) {
hash = (37 * hash) + SEQUENCE_ID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getSequenceId());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code StoreSequenceId}
*
* <pre>
**
* sequence Id of a store
* </pre>
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
familyName_ = com.google.protobuf.ByteString.EMPTY;
bitField0_ = (bitField0_ & ~0x00000001);
sequenceId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_StoreSequenceId_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId build() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.familyName_ = familyName_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.sequenceId_ = sequenceId_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance()) return this;
if (other.hasFamilyName()) {
setFamilyName(other.getFamilyName());
}
if (other.hasSequenceId()) {
setSequenceId(other.getSequenceId());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasFamilyName()) {
return false;
}
if (!hasSequenceId()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required bytes family_name = 1;
private com.google.protobuf.ByteString familyName_ = com.google.protobuf.ByteString.EMPTY;
/**
* <code>required bytes family_name = 1;</code>
*/
public boolean hasFamilyName() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required bytes family_name = 1;</code>
*/
public com.google.protobuf.ByteString getFamilyName() {
return familyName_;
}
/**
* <code>required bytes family_name = 1;</code>
*/
public Builder setFamilyName(com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
familyName_ = value;
onChanged();
return this;
}
/**
* <code>required bytes family_name = 1;</code>
*/
public Builder clearFamilyName() {
bitField0_ = (bitField0_ & ~0x00000001);
familyName_ = getDefaultInstance().getFamilyName();
onChanged();
return this;
}
// required uint64 sequence_id = 2;
private long sequenceId_ ;
/**
* <code>required uint64 sequence_id = 2;</code>
*/
public boolean hasSequenceId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required uint64 sequence_id = 2;</code>
*/
public long getSequenceId() {
return sequenceId_;
}
/**
* <code>required uint64 sequence_id = 2;</code>
*/
public Builder setSequenceId(long value) {
bitField0_ |= 0x00000002;
sequenceId_ = value;
onChanged();
return this;
}
/**
* <code>required uint64 sequence_id = 2;</code>
*/
public Builder clearSequenceId() {
bitField0_ = (bitField0_ & ~0x00000002);
sequenceId_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:StoreSequenceId)
}
static {
defaultInstance = new StoreSequenceId(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:StoreSequenceId)
}
public interface RegionStoreSequenceIdsOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 last_flushed_sequence_id = 1;
/**
* <code>required uint64 last_flushed_sequence_id = 1;</code>
*/
boolean hasLastFlushedSequenceId();
/**
* <code>required uint64 last_flushed_sequence_id = 1;</code>
*/
long getLastFlushedSequenceId();
// repeated .StoreSequenceId store_sequence_id = 2;
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId>
getStoreSequenceIdList();
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreSequenceId(int index);
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
int getStoreSequenceIdCount();
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>
getStoreSequenceIdOrBuilderList();
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder(
int index);
}
/**
* Protobuf type {@code RegionStoreSequenceIds}
*
* <pre>
**
* contains a sequence id of a region which should be the minimum of its store sequence ids and
* list of sequence ids of the region's stores
* </pre>
*/
public static final class RegionStoreSequenceIds extends
com.google.protobuf.GeneratedMessage
implements RegionStoreSequenceIdsOrBuilder {
// Use RegionStoreSequenceIds.newBuilder() to construct.
private RegionStoreSequenceIds(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RegionStoreSequenceIds(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RegionStoreSequenceIds defaultInstance;
public static RegionStoreSequenceIds getDefaultInstance() {
return defaultInstance;
}
public RegionStoreSequenceIds getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RegionStoreSequenceIds(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
lastFlushedSequenceId_ = input.readUInt64();
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
storeSequenceId_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId>();
mutable_bitField0_ |= 0x00000002;
}
storeSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.Builder.class);
}
public static com.google.protobuf.Parser<RegionStoreSequenceIds> PARSER =
new com.google.protobuf.AbstractParser<RegionStoreSequenceIds>() {
public RegionStoreSequenceIds parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new RegionStoreSequenceIds(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<RegionStoreSequenceIds> getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 last_flushed_sequence_id = 1;
public static final int LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER = 1;
private long lastFlushedSequenceId_;
/**
* <code>required uint64 last_flushed_sequence_id = 1;</code>
*/
public boolean hasLastFlushedSequenceId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required uint64 last_flushed_sequence_id = 1;</code>
*/
public long getLastFlushedSequenceId() {
return lastFlushedSequenceId_;
}
// repeated .StoreSequenceId store_sequence_id = 2;
public static final int STORE_SEQUENCE_ID_FIELD_NUMBER = 2;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> storeSequenceId_;
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> getStoreSequenceIdList() {
return storeSequenceId_;
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>
getStoreSequenceIdOrBuilderList() {
return storeSequenceId_;
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public int getStoreSequenceIdCount() {
return storeSequenceId_.size();
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreSequenceId(int index) {
return storeSequenceId_.get(index);
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder(
int index) {
return storeSequenceId_.get(index);
}
private void initFields() {
lastFlushedSequenceId_ = 0L;
storeSequenceId_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasLastFlushedSequenceId()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getStoreSequenceIdCount(); i++) {
if (!getStoreSequenceId(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, lastFlushedSequenceId_);
}
for (int i = 0; i < storeSequenceId_.size(); i++) {
output.writeMessage(2, storeSequenceId_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, lastFlushedSequenceId_);
}
for (int i = 0; i < storeSequenceId_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, storeSequenceId_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) obj;
boolean result = true;
result = result && (hasLastFlushedSequenceId() == other.hasLastFlushedSequenceId());
if (hasLastFlushedSequenceId()) {
result = result && (getLastFlushedSequenceId()
== other.getLastFlushedSequenceId());
}
result = result && getStoreSequenceIdList()
.equals(other.getStoreSequenceIdList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasLastFlushedSequenceId()) {
hash = (37 * hash) + LAST_FLUSHED_SEQUENCE_ID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLastFlushedSequenceId());
}
if (getStoreSequenceIdCount() > 0) {
hash = (37 * hash) + STORE_SEQUENCE_ID_FIELD_NUMBER;
hash = (53 * hash) + getStoreSequenceIdList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code RegionStoreSequenceIds}
*
* <pre>
**
* contains a sequence id of a region which should be the minimum of its store sequence ids and
* list of sequence ids of the region's stores
* </pre>
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIdsOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getStoreSequenceIdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
lastFlushedSequenceId_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
if (storeSequenceIdBuilder_ == null) {
storeSequenceId_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
storeSequenceIdBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionStoreSequenceIds_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds build() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.lastFlushedSequenceId_ = lastFlushedSequenceId_;
if (storeSequenceIdBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
storeSequenceId_ = java.util.Collections.unmodifiableList(storeSequenceId_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.storeSequenceId_ = storeSequenceId_;
} else {
result.storeSequenceId_ = storeSequenceIdBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds.getDefaultInstance()) return this;
if (other.hasLastFlushedSequenceId()) {
setLastFlushedSequenceId(other.getLastFlushedSequenceId());
}
if (storeSequenceIdBuilder_ == null) {
if (!other.storeSequenceId_.isEmpty()) {
if (storeSequenceId_.isEmpty()) {
storeSequenceId_ = other.storeSequenceId_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureStoreSequenceIdIsMutable();
storeSequenceId_.addAll(other.storeSequenceId_);
}
onChanged();
}
} else {
if (!other.storeSequenceId_.isEmpty()) {
if (storeSequenceIdBuilder_.isEmpty()) {
storeSequenceIdBuilder_.dispose();
storeSequenceIdBuilder_ = null;
storeSequenceId_ = other.storeSequenceId_;
bitField0_ = (bitField0_ & ~0x00000002);
storeSequenceIdBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getStoreSequenceIdFieldBuilder() : null;
} else {
storeSequenceIdBuilder_.addAllMessages(other.storeSequenceId_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasLastFlushedSequenceId()) {
return false;
}
for (int i = 0; i < getStoreSequenceIdCount(); i++) {
if (!getStoreSequenceId(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 last_flushed_sequence_id = 1;
private long lastFlushedSequenceId_ ;
/**
* <code>required uint64 last_flushed_sequence_id = 1;</code>
*/
public boolean hasLastFlushedSequenceId() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required uint64 last_flushed_sequence_id = 1;</code>
*/
public long getLastFlushedSequenceId() {
return lastFlushedSequenceId_;
}
/**
* <code>required uint64 last_flushed_sequence_id = 1;</code>
*/
public Builder setLastFlushedSequenceId(long value) {
bitField0_ |= 0x00000001;
lastFlushedSequenceId_ = value;
onChanged();
return this;
}
/**
* <code>required uint64 last_flushed_sequence_id = 1;</code>
*/
public Builder clearLastFlushedSequenceId() {
bitField0_ = (bitField0_ & ~0x00000001);
lastFlushedSequenceId_ = 0L;
onChanged();
return this;
}
// repeated .StoreSequenceId store_sequence_id = 2;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> storeSequenceId_ =
java.util.Collections.emptyList();
private void ensureStoreSequenceIdIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
storeSequenceId_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId>(storeSequenceId_);
bitField0_ |= 0x00000002;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> storeSequenceIdBuilder_;
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> getStoreSequenceIdList() {
if (storeSequenceIdBuilder_ == null) {
return java.util.Collections.unmodifiableList(storeSequenceId_);
} else {
return storeSequenceIdBuilder_.getMessageList();
}
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public int getStoreSequenceIdCount() {
if (storeSequenceIdBuilder_ == null) {
return storeSequenceId_.size();
} else {
return storeSequenceIdBuilder_.getCount();
}
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreSequenceId(int index) {
if (storeSequenceIdBuilder_ == null) {
return storeSequenceId_.get(index);
} else {
return storeSequenceIdBuilder_.getMessage(index);
}
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public Builder setStoreSequenceId(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) {
if (storeSequenceIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureStoreSequenceIdIsMutable();
storeSequenceId_.set(index, value);
onChanged();
} else {
storeSequenceIdBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public Builder setStoreSequenceId(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) {
if (storeSequenceIdBuilder_ == null) {
ensureStoreSequenceIdIsMutable();
storeSequenceId_.set(index, builderForValue.build());
onChanged();
} else {
storeSequenceIdBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public Builder addStoreSequenceId(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) {
if (storeSequenceIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureStoreSequenceIdIsMutable();
storeSequenceId_.add(value);
onChanged();
} else {
storeSequenceIdBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public Builder addStoreSequenceId(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) {
if (storeSequenceIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureStoreSequenceIdIsMutable();
storeSequenceId_.add(index, value);
onChanged();
} else {
storeSequenceIdBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public Builder addStoreSequenceId(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) {
if (storeSequenceIdBuilder_ == null) {
ensureStoreSequenceIdIsMutable();
storeSequenceId_.add(builderForValue.build());
onChanged();
} else {
storeSequenceIdBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public Builder addStoreSequenceId(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) {
if (storeSequenceIdBuilder_ == null) {
ensureStoreSequenceIdIsMutable();
storeSequenceId_.add(index, builderForValue.build());
onChanged();
} else {
storeSequenceIdBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public Builder addAllStoreSequenceId(
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> values) {
if (storeSequenceIdBuilder_ == null) {
ensureStoreSequenceIdIsMutable();
super.addAll(values, storeSequenceId_);
onChanged();
} else {
storeSequenceIdBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public Builder clearStoreSequenceId() {
if (storeSequenceIdBuilder_ == null) {
storeSequenceId_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
storeSequenceIdBuilder_.clear();
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public Builder removeStoreSequenceId(int index) {
if (storeSequenceIdBuilder_ == null) {
ensureStoreSequenceIdIsMutable();
storeSequenceId_.remove(index);
onChanged();
} else {
storeSequenceIdBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder getStoreSequenceIdBuilder(
int index) {
return getStoreSequenceIdFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreSequenceIdOrBuilder(
int index) {
if (storeSequenceIdBuilder_ == null) {
return storeSequenceId_.get(index); } else {
return storeSequenceIdBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>
getStoreSequenceIdOrBuilderList() {
if (storeSequenceIdBuilder_ != null) {
return storeSequenceIdBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(storeSequenceId_);
}
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder() {
return getStoreSequenceIdFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance());
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreSequenceIdBuilder(
int index) {
return getStoreSequenceIdFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance());
}
/**
* <code>repeated .StoreSequenceId store_sequence_id = 2;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder>
getStoreSequenceIdBuilderList() {
return getStoreSequenceIdFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>
getStoreSequenceIdFieldBuilder() {
if (storeSequenceIdBuilder_ == null) {
storeSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>(
storeSequenceId_,
((bitField0_ & 0x00000002) == 0x00000002),
getParentForChildren(),
isClean());
storeSequenceId_ = null;
}
return storeSequenceIdBuilder_;
}
// @@protoc_insertion_point(builder_scope:RegionStoreSequenceIds)
}
static {
defaultInstance = new RegionStoreSequenceIds(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:RegionStoreSequenceIds)
}
public interface RegionLoadOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .RegionSpecifier region_specifier = 1;
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
boolean hasRegionSpecifier();
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionSpecifier();
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionSpecifierOrBuilder();
// optional uint32 stores = 2;
/**
* <code>optional uint32 stores = 2;</code>
*
* <pre>
** the number of stores for the region
* </pre>
*/
boolean hasStores();
/**
* <code>optional uint32 stores = 2;</code>
*
* <pre>
** the number of stores for the region
* </pre>
*/
int getStores();
// optional uint32 storefiles = 3;
/**
* <code>optional uint32 storefiles = 3;</code>
*
* <pre>
** the number of storefiles for the region
* </pre>
*/
boolean hasStorefiles();
/**
* <code>optional uint32 storefiles = 3;</code>
*
* <pre>
** the number of storefiles for the region
* </pre>
*/
int getStorefiles();
// optional uint32 store_uncompressed_size_MB = 4;
/**
* <code>optional uint32 store_uncompressed_size_MB = 4;</code>
*
* <pre>
** the total size of the store files for the region, uncompressed, in MB
* </pre>
*/
boolean hasStoreUncompressedSizeMB();
/**
* <code>optional uint32 store_uncompressed_size_MB = 4;</code>
*
* <pre>
** the total size of the store files for the region, uncompressed, in MB
* </pre>
*/
int getStoreUncompressedSizeMB();
// optional uint32 storefile_size_MB = 5;
/**
* <code>optional uint32 storefile_size_MB = 5;</code>
*
* <pre>
** the current total size of the store files for the region, in MB
* </pre>
*/
boolean hasStorefileSizeMB();
/**
* <code>optional uint32 storefile_size_MB = 5;</code>
*
* <pre>
** the current total size of the store files for the region, in MB
* </pre>
*/
int getStorefileSizeMB();
// optional uint32 memstore_size_MB = 6;
/**
* <code>optional uint32 memstore_size_MB = 6;</code>
*
* <pre>
** the current size of the memstore for the region, in MB
* </pre>
*/
boolean hasMemstoreSizeMB();
/**
* <code>optional uint32 memstore_size_MB = 6;</code>
*
* <pre>
** the current size of the memstore for the region, in MB
* </pre>
*/
int getMemstoreSizeMB();
// optional uint32 storefile_index_size_MB = 7;
/**
* <code>optional uint32 storefile_index_size_MB = 7;</code>
*
* <pre>
**
* The current total size of root-level store file indexes for the region,
* in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
* </pre>
*/
boolean hasStorefileIndexSizeMB();
/**
* <code>optional uint32 storefile_index_size_MB = 7;</code>
*
* <pre>
**
* The current total size of root-level store file indexes for the region,
* in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
* </pre>
*/
int getStorefileIndexSizeMB();
// optional uint64 read_requests_count = 8;
/**
* <code>optional uint64 read_requests_count = 8;</code>
*
* <pre>
** the current total read requests made to region
* </pre>
*/
boolean hasReadRequestsCount();
/**
* <code>optional uint64 read_requests_count = 8;</code>
*
* <pre>
** the current total read requests made to region
* </pre>
*/
long getReadRequestsCount();
// optional uint64 write_requests_count = 9;
/**
* <code>optional uint64 write_requests_count = 9;</code>
*
* <pre>
** the current total write requests made to region
* </pre>
*/
boolean hasWriteRequestsCount();
/**
* <code>optional uint64 write_requests_count = 9;</code>
*
* <pre>
** the current total write requests made to region
* </pre>
*/
long getWriteRequestsCount();
// optional uint64 total_compacting_KVs = 10;
/**
* <code>optional uint64 total_compacting_KVs = 10;</code>
*
* <pre>
** the total compacting key values in currently running compaction
* </pre>
*/
boolean hasTotalCompactingKVs();
/**
* <code>optional uint64 total_compacting_KVs = 10;</code>
*
* <pre>
** the total compacting key values in currently running compaction
* </pre>
*/
long getTotalCompactingKVs();
// optional uint64 current_compacted_KVs = 11;
/**
* <code>optional uint64 current_compacted_KVs = 11;</code>
*
* <pre>
** the completed count of key values in currently running compaction
* </pre>
*/
boolean hasCurrentCompactedKVs();
/**
* <code>optional uint64 current_compacted_KVs = 11;</code>
*
* <pre>
** the completed count of key values in currently running compaction
* </pre>
*/
long getCurrentCompactedKVs();
// optional uint32 root_index_size_KB = 12;
/**
* <code>optional uint32 root_index_size_KB = 12;</code>
*
* <pre>
** The current total size of root-level indexes for the region, in KB.
* </pre>
*/
boolean hasRootIndexSizeKB();
/**
* <code>optional uint32 root_index_size_KB = 12;</code>
*
* <pre>
** The current total size of root-level indexes for the region, in KB.
* </pre>
*/
int getRootIndexSizeKB();
// optional uint32 total_static_index_size_KB = 13;
/**
* <code>optional uint32 total_static_index_size_KB = 13;</code>
*
* <pre>
** The total size of all index blocks, not just the root level, in KB.
* </pre>
*/
boolean hasTotalStaticIndexSizeKB();
/**
* <code>optional uint32 total_static_index_size_KB = 13;</code>
*
* <pre>
** The total size of all index blocks, not just the root level, in KB.
* </pre>
*/
int getTotalStaticIndexSizeKB();
// optional uint32 total_static_bloom_size_KB = 14;
/**
* <code>optional uint32 total_static_bloom_size_KB = 14;</code>
*
* <pre>
**
* The total size of all Bloom filter blocks, not just loaded into the
* block cache, in KB.
* </pre>
*/
boolean hasTotalStaticBloomSizeKB();
/**
* <code>optional uint32 total_static_bloom_size_KB = 14;</code>
*
* <pre>
**
* The total size of all Bloom filter blocks, not just loaded into the
* block cache, in KB.
* </pre>
*/
int getTotalStaticBloomSizeKB();
// optional uint64 complete_sequence_id = 15;
/**
* <code>optional uint64 complete_sequence_id = 15;</code>
*
* <pre>
** the most recent sequence Id from cache flush
* </pre>
*/
boolean hasCompleteSequenceId();
/**
* <code>optional uint64 complete_sequence_id = 15;</code>
*
* <pre>
** the most recent sequence Id from cache flush
* </pre>
*/
long getCompleteSequenceId();
// optional float data_locality = 16;
/**
* <code>optional float data_locality = 16;</code>
*
* <pre>
** The current data locality for region in the regionserver
* </pre>
*/
boolean hasDataLocality();
/**
* <code>optional float data_locality = 16;</code>
*
* <pre>
** The current data locality for region in the regionserver
* </pre>
*/
float getDataLocality();
// optional uint64 last_major_compaction_ts = 17 [default = 0];
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
boolean hasLastMajorCompactionTs();
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
long getLastMajorCompactionTs();
// repeated .StoreSequenceId store_complete_sequence_id = 18;
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId>
getStoreCompleteSequenceIdList();
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreCompleteSequenceId(int index);
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
int getStoreCompleteSequenceIdCount();
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>
getStoreCompleteSequenceIdOrBuilderList();
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder(
int index);
}
/**
* Protobuf type {@code RegionLoad}
*/
public static final class RegionLoad extends
com.google.protobuf.GeneratedMessage
implements RegionLoadOrBuilder {
// Use RegionLoad.newBuilder() to construct.
private RegionLoad(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private RegionLoad(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final RegionLoad defaultInstance;
public static RegionLoad getDefaultInstance() {
return defaultInstance;
}
public RegionLoad getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private RegionLoad(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = regionSpecifier_.toBuilder();
}
regionSpecifier_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(regionSpecifier_);
regionSpecifier_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 16: {
bitField0_ |= 0x00000002;
stores_ = input.readUInt32();
break;
}
case 24: {
bitField0_ |= 0x00000004;
storefiles_ = input.readUInt32();
break;
}
case 32: {
bitField0_ |= 0x00000008;
storeUncompressedSizeMB_ = input.readUInt32();
break;
}
case 40: {
bitField0_ |= 0x00000010;
storefileSizeMB_ = input.readUInt32();
break;
}
case 48: {
bitField0_ |= 0x00000020;
memstoreSizeMB_ = input.readUInt32();
break;
}
case 56: {
bitField0_ |= 0x00000040;
storefileIndexSizeMB_ = input.readUInt32();
break;
}
case 64: {
bitField0_ |= 0x00000080;
readRequestsCount_ = input.readUInt64();
break;
}
case 72: {
bitField0_ |= 0x00000100;
writeRequestsCount_ = input.readUInt64();
break;
}
case 80: {
bitField0_ |= 0x00000200;
totalCompactingKVs_ = input.readUInt64();
break;
}
case 88: {
bitField0_ |= 0x00000400;
currentCompactedKVs_ = input.readUInt64();
break;
}
case 96: {
bitField0_ |= 0x00000800;
rootIndexSizeKB_ = input.readUInt32();
break;
}
case 104: {
bitField0_ |= 0x00001000;
totalStaticIndexSizeKB_ = input.readUInt32();
break;
}
case 112: {
bitField0_ |= 0x00002000;
totalStaticBloomSizeKB_ = input.readUInt32();
break;
}
case 120: {
bitField0_ |= 0x00004000;
completeSequenceId_ = input.readUInt64();
break;
}
case 133: {
bitField0_ |= 0x00008000;
dataLocality_ = input.readFloat();
break;
}
case 136: {
bitField0_ |= 0x00010000;
lastMajorCompactionTs_ = input.readUInt64();
break;
}
case 146: {
if (!((mutable_bitField0_ & 0x00020000) == 0x00020000)) {
storeCompleteSequenceId_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId>();
mutable_bitField0_ |= 0x00020000;
}
storeCompleteSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.PARSER, extensionRegistry));
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00020000) == 0x00020000)) {
storeCompleteSequenceId_ = java.util.Collections.unmodifiableList(storeCompleteSequenceId_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionLoad_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionLoad_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder.class);
}
public static com.google.protobuf.Parser<RegionLoad> PARSER =
new com.google.protobuf.AbstractParser<RegionLoad>() {
public RegionLoad parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new RegionLoad(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<RegionLoad> getParserForType() {
return PARSER;
}
private int bitField0_;
// required .RegionSpecifier region_specifier = 1;
public static final int REGION_SPECIFIER_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier regionSpecifier_;
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
public boolean hasRegionSpecifier() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionSpecifier() {
return regionSpecifier_;
}
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionSpecifierOrBuilder() {
return regionSpecifier_;
}
// optional uint32 stores = 2;
public static final int STORES_FIELD_NUMBER = 2;
private int stores_;
/**
* <code>optional uint32 stores = 2;</code>
*
* <pre>
** the number of stores for the region
* </pre>
*/
public boolean hasStores() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional uint32 stores = 2;</code>
*
* <pre>
** the number of stores for the region
* </pre>
*/
public int getStores() {
return stores_;
}
// optional uint32 storefiles = 3;
public static final int STOREFILES_FIELD_NUMBER = 3;
private int storefiles_;
/**
* <code>optional uint32 storefiles = 3;</code>
*
* <pre>
** the number of storefiles for the region
* </pre>
*/
public boolean hasStorefiles() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional uint32 storefiles = 3;</code>
*
* <pre>
** the number of storefiles for the region
* </pre>
*/
public int getStorefiles() {
return storefiles_;
}
// optional uint32 store_uncompressed_size_MB = 4;
public static final int STORE_UNCOMPRESSED_SIZE_MB_FIELD_NUMBER = 4;
private int storeUncompressedSizeMB_;
/**
* <code>optional uint32 store_uncompressed_size_MB = 4;</code>
*
* <pre>
** the total size of the store files for the region, uncompressed, in MB
* </pre>
*/
public boolean hasStoreUncompressedSizeMB() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>optional uint32 store_uncompressed_size_MB = 4;</code>
*
* <pre>
** the total size of the store files for the region, uncompressed, in MB
* </pre>
*/
public int getStoreUncompressedSizeMB() {
return storeUncompressedSizeMB_;
}
// optional uint32 storefile_size_MB = 5;
public static final int STOREFILE_SIZE_MB_FIELD_NUMBER = 5;
private int storefileSizeMB_;
/**
* <code>optional uint32 storefile_size_MB = 5;</code>
*
* <pre>
** the current total size of the store files for the region, in MB
* </pre>
*/
public boolean hasStorefileSizeMB() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* <code>optional uint32 storefile_size_MB = 5;</code>
*
* <pre>
** the current total size of the store files for the region, in MB
* </pre>
*/
public int getStorefileSizeMB() {
return storefileSizeMB_;
}
// optional uint32 memstore_size_MB = 6;
public static final int MEMSTORE_SIZE_MB_FIELD_NUMBER = 6;
private int memstoreSizeMB_;
/**
* <code>optional uint32 memstore_size_MB = 6;</code>
*
* <pre>
** the current size of the memstore for the region, in MB
* </pre>
*/
public boolean hasMemstoreSizeMB() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* <code>optional uint32 memstore_size_MB = 6;</code>
*
* <pre>
** the current size of the memstore for the region, in MB
* </pre>
*/
public int getMemstoreSizeMB() {
return memstoreSizeMB_;
}
// optional uint32 storefile_index_size_MB = 7;
public static final int STOREFILE_INDEX_SIZE_MB_FIELD_NUMBER = 7;
private int storefileIndexSizeMB_;
/**
* <code>optional uint32 storefile_index_size_MB = 7;</code>
*
* <pre>
**
* The current total size of root-level store file indexes for the region,
* in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
* </pre>
*/
public boolean hasStorefileIndexSizeMB() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* <code>optional uint32 storefile_index_size_MB = 7;</code>
*
* <pre>
**
* The current total size of root-level store file indexes for the region,
* in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
* </pre>
*/
public int getStorefileIndexSizeMB() {
return storefileIndexSizeMB_;
}
// optional uint64 read_requests_count = 8;
public static final int READ_REQUESTS_COUNT_FIELD_NUMBER = 8;
private long readRequestsCount_;
/**
* <code>optional uint64 read_requests_count = 8;</code>
*
* <pre>
** the current total read requests made to region
* </pre>
*/
public boolean hasReadRequestsCount() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* <code>optional uint64 read_requests_count = 8;</code>
*
* <pre>
** the current total read requests made to region
* </pre>
*/
public long getReadRequestsCount() {
return readRequestsCount_;
}
// optional uint64 write_requests_count = 9;
public static final int WRITE_REQUESTS_COUNT_FIELD_NUMBER = 9;
private long writeRequestsCount_;
/**
* <code>optional uint64 write_requests_count = 9;</code>
*
* <pre>
** the current total write requests made to region
* </pre>
*/
public boolean hasWriteRequestsCount() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* <code>optional uint64 write_requests_count = 9;</code>
*
* <pre>
** the current total write requests made to region
* </pre>
*/
public long getWriteRequestsCount() {
return writeRequestsCount_;
}
// optional uint64 total_compacting_KVs = 10;
public static final int TOTAL_COMPACTING_KVS_FIELD_NUMBER = 10;
private long totalCompactingKVs_;
/**
* <code>optional uint64 total_compacting_KVs = 10;</code>
*
* <pre>
** the total compacting key values in currently running compaction
* </pre>
*/
public boolean hasTotalCompactingKVs() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* <code>optional uint64 total_compacting_KVs = 10;</code>
*
* <pre>
** the total compacting key values in currently running compaction
* </pre>
*/
public long getTotalCompactingKVs() {
return totalCompactingKVs_;
}
// optional uint64 current_compacted_KVs = 11;
public static final int CURRENT_COMPACTED_KVS_FIELD_NUMBER = 11;
private long currentCompactedKVs_;
/**
* <code>optional uint64 current_compacted_KVs = 11;</code>
*
* <pre>
** the completed count of key values in currently running compaction
* </pre>
*/
public boolean hasCurrentCompactedKVs() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* <code>optional uint64 current_compacted_KVs = 11;</code>
*
* <pre>
** the completed count of key values in currently running compaction
* </pre>
*/
public long getCurrentCompactedKVs() {
return currentCompactedKVs_;
}
// optional uint32 root_index_size_KB = 12;
public static final int ROOT_INDEX_SIZE_KB_FIELD_NUMBER = 12;
private int rootIndexSizeKB_;
/**
* <code>optional uint32 root_index_size_KB = 12;</code>
*
* <pre>
** The current total size of root-level indexes for the region, in KB.
* </pre>
*/
public boolean hasRootIndexSizeKB() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* <code>optional uint32 root_index_size_KB = 12;</code>
*
* <pre>
** The current total size of root-level indexes for the region, in KB.
* </pre>
*/
public int getRootIndexSizeKB() {
return rootIndexSizeKB_;
}
// optional uint32 total_static_index_size_KB = 13;
public static final int TOTAL_STATIC_INDEX_SIZE_KB_FIELD_NUMBER = 13;
private int totalStaticIndexSizeKB_;
/**
* <code>optional uint32 total_static_index_size_KB = 13;</code>
*
* <pre>
** The total size of all index blocks, not just the root level, in KB.
* </pre>
*/
public boolean hasTotalStaticIndexSizeKB() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
* <code>optional uint32 total_static_index_size_KB = 13;</code>
*
* <pre>
** The total size of all index blocks, not just the root level, in KB.
* </pre>
*/
public int getTotalStaticIndexSizeKB() {
return totalStaticIndexSizeKB_;
}
// optional uint32 total_static_bloom_size_KB = 14;
public static final int TOTAL_STATIC_BLOOM_SIZE_KB_FIELD_NUMBER = 14;
private int totalStaticBloomSizeKB_;
/**
* <code>optional uint32 total_static_bloom_size_KB = 14;</code>
*
* <pre>
**
* The total size of all Bloom filter blocks, not just loaded into the
* block cache, in KB.
* </pre>
*/
public boolean hasTotalStaticBloomSizeKB() {
return ((bitField0_ & 0x00002000) == 0x00002000);
}
/**
* <code>optional uint32 total_static_bloom_size_KB = 14;</code>
*
* <pre>
**
* The total size of all Bloom filter blocks, not just loaded into the
* block cache, in KB.
* </pre>
*/
public int getTotalStaticBloomSizeKB() {
return totalStaticBloomSizeKB_;
}
// optional uint64 complete_sequence_id = 15;
public static final int COMPLETE_SEQUENCE_ID_FIELD_NUMBER = 15;
private long completeSequenceId_;
/**
* <code>optional uint64 complete_sequence_id = 15;</code>
*
* <pre>
** the most recent sequence Id from cache flush
* </pre>
*/
public boolean hasCompleteSequenceId() {
return ((bitField0_ & 0x00004000) == 0x00004000);
}
/**
* <code>optional uint64 complete_sequence_id = 15;</code>
*
* <pre>
** the most recent sequence Id from cache flush
* </pre>
*/
public long getCompleteSequenceId() {
return completeSequenceId_;
}
// optional float data_locality = 16;
public static final int DATA_LOCALITY_FIELD_NUMBER = 16;
private float dataLocality_;
/**
* <code>optional float data_locality = 16;</code>
*
* <pre>
** The current data locality for region in the regionserver
* </pre>
*/
public boolean hasDataLocality() {
return ((bitField0_ & 0x00008000) == 0x00008000);
}
/**
* <code>optional float data_locality = 16;</code>
*
* <pre>
** The current data locality for region in the regionserver
* </pre>
*/
public float getDataLocality() {
return dataLocality_;
}
// optional uint64 last_major_compaction_ts = 17 [default = 0];
public static final int LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER = 17;
private long lastMajorCompactionTs_;
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public boolean hasLastMajorCompactionTs() {
return ((bitField0_ & 0x00010000) == 0x00010000);
}
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public long getLastMajorCompactionTs() {
return lastMajorCompactionTs_;
}
// repeated .StoreSequenceId store_complete_sequence_id = 18;
public static final int STORE_COMPLETE_SEQUENCE_ID_FIELD_NUMBER = 18;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> storeCompleteSequenceId_;
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> getStoreCompleteSequenceIdList() {
return storeCompleteSequenceId_;
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>
getStoreCompleteSequenceIdOrBuilderList() {
return storeCompleteSequenceId_;
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public int getStoreCompleteSequenceIdCount() {
return storeCompleteSequenceId_.size();
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreCompleteSequenceId(int index) {
return storeCompleteSequenceId_.get(index);
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder(
int index) {
return storeCompleteSequenceId_.get(index);
}
private void initFields() {
regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
stores_ = 0;
storefiles_ = 0;
storeUncompressedSizeMB_ = 0;
storefileSizeMB_ = 0;
memstoreSizeMB_ = 0;
storefileIndexSizeMB_ = 0;
readRequestsCount_ = 0L;
writeRequestsCount_ = 0L;
totalCompactingKVs_ = 0L;
currentCompactedKVs_ = 0L;
rootIndexSizeKB_ = 0;
totalStaticIndexSizeKB_ = 0;
totalStaticBloomSizeKB_ = 0;
completeSequenceId_ = 0L;
dataLocality_ = 0F;
lastMajorCompactionTs_ = 0L;
storeCompleteSequenceId_ = java.util.Collections.emptyList();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasRegionSpecifier()) {
memoizedIsInitialized = 0;
return false;
}
if (!getRegionSpecifier().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
for (int i = 0; i < getStoreCompleteSequenceIdCount(); i++) {
if (!getStoreCompleteSequenceId(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, regionSpecifier_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt32(2, stores_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(3, storefiles_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt32(4, storeUncompressedSizeMB_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt32(5, storefileSizeMB_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt32(6, memstoreSizeMB_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt32(7, storefileIndexSizeMB_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeUInt64(8, readRequestsCount_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
output.writeUInt64(9, writeRequestsCount_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
output.writeUInt64(10, totalCompactingKVs_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
output.writeUInt64(11, currentCompactedKVs_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
output.writeUInt32(12, rootIndexSizeKB_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
output.writeUInt32(13, totalStaticIndexSizeKB_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
output.writeUInt32(14, totalStaticBloomSizeKB_);
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
output.writeUInt64(15, completeSequenceId_);
}
if (((bitField0_ & 0x00008000) == 0x00008000)) {
output.writeFloat(16, dataLocality_);
}
if (((bitField0_ & 0x00010000) == 0x00010000)) {
output.writeUInt64(17, lastMajorCompactionTs_);
}
for (int i = 0; i < storeCompleteSequenceId_.size(); i++) {
output.writeMessage(18, storeCompleteSequenceId_.get(i));
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, regionSpecifier_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(2, stores_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, storefiles_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(4, storeUncompressedSizeMB_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(5, storefileSizeMB_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(6, memstoreSizeMB_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(7, storefileIndexSizeMB_);
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(8, readRequestsCount_);
}
if (((bitField0_ & 0x00000100) == 0x00000100)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(9, writeRequestsCount_);
}
if (((bitField0_ & 0x00000200) == 0x00000200)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(10, totalCompactingKVs_);
}
if (((bitField0_ & 0x00000400) == 0x00000400)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(11, currentCompactedKVs_);
}
if (((bitField0_ & 0x00000800) == 0x00000800)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(12, rootIndexSizeKB_);
}
if (((bitField0_ & 0x00001000) == 0x00001000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(13, totalStaticIndexSizeKB_);
}
if (((bitField0_ & 0x00002000) == 0x00002000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(14, totalStaticBloomSizeKB_);
}
if (((bitField0_ & 0x00004000) == 0x00004000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(15, completeSequenceId_);
}
if (((bitField0_ & 0x00008000) == 0x00008000)) {
size += com.google.protobuf.CodedOutputStream
.computeFloatSize(16, dataLocality_);
}
if (((bitField0_ & 0x00010000) == 0x00010000)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(17, lastMajorCompactionTs_);
}
for (int i = 0; i < storeCompleteSequenceId_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(18, storeCompleteSequenceId_.get(i));
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad) obj;
boolean result = true;
result = result && (hasRegionSpecifier() == other.hasRegionSpecifier());
if (hasRegionSpecifier()) {
result = result && getRegionSpecifier()
.equals(other.getRegionSpecifier());
}
result = result && (hasStores() == other.hasStores());
if (hasStores()) {
result = result && (getStores()
== other.getStores());
}
result = result && (hasStorefiles() == other.hasStorefiles());
if (hasStorefiles()) {
result = result && (getStorefiles()
== other.getStorefiles());
}
result = result && (hasStoreUncompressedSizeMB() == other.hasStoreUncompressedSizeMB());
if (hasStoreUncompressedSizeMB()) {
result = result && (getStoreUncompressedSizeMB()
== other.getStoreUncompressedSizeMB());
}
result = result && (hasStorefileSizeMB() == other.hasStorefileSizeMB());
if (hasStorefileSizeMB()) {
result = result && (getStorefileSizeMB()
== other.getStorefileSizeMB());
}
result = result && (hasMemstoreSizeMB() == other.hasMemstoreSizeMB());
if (hasMemstoreSizeMB()) {
result = result && (getMemstoreSizeMB()
== other.getMemstoreSizeMB());
}
result = result && (hasStorefileIndexSizeMB() == other.hasStorefileIndexSizeMB());
if (hasStorefileIndexSizeMB()) {
result = result && (getStorefileIndexSizeMB()
== other.getStorefileIndexSizeMB());
}
result = result && (hasReadRequestsCount() == other.hasReadRequestsCount());
if (hasReadRequestsCount()) {
result = result && (getReadRequestsCount()
== other.getReadRequestsCount());
}
result = result && (hasWriteRequestsCount() == other.hasWriteRequestsCount());
if (hasWriteRequestsCount()) {
result = result && (getWriteRequestsCount()
== other.getWriteRequestsCount());
}
result = result && (hasTotalCompactingKVs() == other.hasTotalCompactingKVs());
if (hasTotalCompactingKVs()) {
result = result && (getTotalCompactingKVs()
== other.getTotalCompactingKVs());
}
result = result && (hasCurrentCompactedKVs() == other.hasCurrentCompactedKVs());
if (hasCurrentCompactedKVs()) {
result = result && (getCurrentCompactedKVs()
== other.getCurrentCompactedKVs());
}
result = result && (hasRootIndexSizeKB() == other.hasRootIndexSizeKB());
if (hasRootIndexSizeKB()) {
result = result && (getRootIndexSizeKB()
== other.getRootIndexSizeKB());
}
result = result && (hasTotalStaticIndexSizeKB() == other.hasTotalStaticIndexSizeKB());
if (hasTotalStaticIndexSizeKB()) {
result = result && (getTotalStaticIndexSizeKB()
== other.getTotalStaticIndexSizeKB());
}
result = result && (hasTotalStaticBloomSizeKB() == other.hasTotalStaticBloomSizeKB());
if (hasTotalStaticBloomSizeKB()) {
result = result && (getTotalStaticBloomSizeKB()
== other.getTotalStaticBloomSizeKB());
}
result = result && (hasCompleteSequenceId() == other.hasCompleteSequenceId());
if (hasCompleteSequenceId()) {
result = result && (getCompleteSequenceId()
== other.getCompleteSequenceId());
}
result = result && (hasDataLocality() == other.hasDataLocality());
if (hasDataLocality()) {
result = result && (Float.floatToIntBits(getDataLocality()) == Float.floatToIntBits(other.getDataLocality()));
}
result = result && (hasLastMajorCompactionTs() == other.hasLastMajorCompactionTs());
if (hasLastMajorCompactionTs()) {
result = result && (getLastMajorCompactionTs()
== other.getLastMajorCompactionTs());
}
result = result && getStoreCompleteSequenceIdList()
.equals(other.getStoreCompleteSequenceIdList());
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasRegionSpecifier()) {
hash = (37 * hash) + REGION_SPECIFIER_FIELD_NUMBER;
hash = (53 * hash) + getRegionSpecifier().hashCode();
}
if (hasStores()) {
hash = (37 * hash) + STORES_FIELD_NUMBER;
hash = (53 * hash) + getStores();
}
if (hasStorefiles()) {
hash = (37 * hash) + STOREFILES_FIELD_NUMBER;
hash = (53 * hash) + getStorefiles();
}
if (hasStoreUncompressedSizeMB()) {
hash = (37 * hash) + STORE_UNCOMPRESSED_SIZE_MB_FIELD_NUMBER;
hash = (53 * hash) + getStoreUncompressedSizeMB();
}
if (hasStorefileSizeMB()) {
hash = (37 * hash) + STOREFILE_SIZE_MB_FIELD_NUMBER;
hash = (53 * hash) + getStorefileSizeMB();
}
if (hasMemstoreSizeMB()) {
hash = (37 * hash) + MEMSTORE_SIZE_MB_FIELD_NUMBER;
hash = (53 * hash) + getMemstoreSizeMB();
}
if (hasStorefileIndexSizeMB()) {
hash = (37 * hash) + STOREFILE_INDEX_SIZE_MB_FIELD_NUMBER;
hash = (53 * hash) + getStorefileIndexSizeMB();
}
if (hasReadRequestsCount()) {
hash = (37 * hash) + READ_REQUESTS_COUNT_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getReadRequestsCount());
}
if (hasWriteRequestsCount()) {
hash = (37 * hash) + WRITE_REQUESTS_COUNT_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getWriteRequestsCount());
}
if (hasTotalCompactingKVs()) {
hash = (37 * hash) + TOTAL_COMPACTING_KVS_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getTotalCompactingKVs());
}
if (hasCurrentCompactedKVs()) {
hash = (37 * hash) + CURRENT_COMPACTED_KVS_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCurrentCompactedKVs());
}
if (hasRootIndexSizeKB()) {
hash = (37 * hash) + ROOT_INDEX_SIZE_KB_FIELD_NUMBER;
hash = (53 * hash) + getRootIndexSizeKB();
}
if (hasTotalStaticIndexSizeKB()) {
hash = (37 * hash) + TOTAL_STATIC_INDEX_SIZE_KB_FIELD_NUMBER;
hash = (53 * hash) + getTotalStaticIndexSizeKB();
}
if (hasTotalStaticBloomSizeKB()) {
hash = (37 * hash) + TOTAL_STATIC_BLOOM_SIZE_KB_FIELD_NUMBER;
hash = (53 * hash) + getTotalStaticBloomSizeKB();
}
if (hasCompleteSequenceId()) {
hash = (37 * hash) + COMPLETE_SEQUENCE_ID_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getCompleteSequenceId());
}
if (hasDataLocality()) {
hash = (37 * hash) + DATA_LOCALITY_FIELD_NUMBER;
hash = (53 * hash) + Float.floatToIntBits(
getDataLocality());
}
if (hasLastMajorCompactionTs()) {
hash = (37 * hash) + LAST_MAJOR_COMPACTION_TS_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getLastMajorCompactionTs());
}
if (getStoreCompleteSequenceIdCount() > 0) {
hash = (37 * hash) + STORE_COMPLETE_SEQUENCE_ID_FIELD_NUMBER;
hash = (53 * hash) + getStoreCompleteSequenceIdList().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code RegionLoad}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionLoad_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionLoad_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegionSpecifierFieldBuilder();
getStoreCompleteSequenceIdFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (regionSpecifierBuilder_ == null) {
regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
} else {
regionSpecifierBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
stores_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
storefiles_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
storeUncompressedSizeMB_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
storefileSizeMB_ = 0;
bitField0_ = (bitField0_ & ~0x00000010);
memstoreSizeMB_ = 0;
bitField0_ = (bitField0_ & ~0x00000020);
storefileIndexSizeMB_ = 0;
bitField0_ = (bitField0_ & ~0x00000040);
readRequestsCount_ = 0L;
bitField0_ = (bitField0_ & ~0x00000080);
writeRequestsCount_ = 0L;
bitField0_ = (bitField0_ & ~0x00000100);
totalCompactingKVs_ = 0L;
bitField0_ = (bitField0_ & ~0x00000200);
currentCompactedKVs_ = 0L;
bitField0_ = (bitField0_ & ~0x00000400);
rootIndexSizeKB_ = 0;
bitField0_ = (bitField0_ & ~0x00000800);
totalStaticIndexSizeKB_ = 0;
bitField0_ = (bitField0_ & ~0x00001000);
totalStaticBloomSizeKB_ = 0;
bitField0_ = (bitField0_ & ~0x00002000);
completeSequenceId_ = 0L;
bitField0_ = (bitField0_ & ~0x00004000);
dataLocality_ = 0F;
bitField0_ = (bitField0_ & ~0x00008000);
lastMajorCompactionTs_ = 0L;
bitField0_ = (bitField0_ & ~0x00010000);
if (storeCompleteSequenceIdBuilder_ == null) {
storeCompleteSequenceId_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00020000);
} else {
storeCompleteSequenceIdBuilder_.clear();
}
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_RegionLoad_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad build() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (regionSpecifierBuilder_ == null) {
result.regionSpecifier_ = regionSpecifier_;
} else {
result.regionSpecifier_ = regionSpecifierBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.stores_ = stores_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.storefiles_ = storefiles_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.storeUncompressedSizeMB_ = storeUncompressedSizeMB_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.storefileSizeMB_ = storefileSizeMB_;
if (((from_bitField0_ & 0x00000020) == 0x00000020)) {
to_bitField0_ |= 0x00000020;
}
result.memstoreSizeMB_ = memstoreSizeMB_;
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000040;
}
result.storefileIndexSizeMB_ = storefileIndexSizeMB_;
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000080;
}
result.readRequestsCount_ = readRequestsCount_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000100;
}
result.writeRequestsCount_ = writeRequestsCount_;
if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
to_bitField0_ |= 0x00000200;
}
result.totalCompactingKVs_ = totalCompactingKVs_;
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
to_bitField0_ |= 0x00000400;
}
result.currentCompactedKVs_ = currentCompactedKVs_;
if (((from_bitField0_ & 0x00000800) == 0x00000800)) {
to_bitField0_ |= 0x00000800;
}
result.rootIndexSizeKB_ = rootIndexSizeKB_;
if (((from_bitField0_ & 0x00001000) == 0x00001000)) {
to_bitField0_ |= 0x00001000;
}
result.totalStaticIndexSizeKB_ = totalStaticIndexSizeKB_;
if (((from_bitField0_ & 0x00002000) == 0x00002000)) {
to_bitField0_ |= 0x00002000;
}
result.totalStaticBloomSizeKB_ = totalStaticBloomSizeKB_;
if (((from_bitField0_ & 0x00004000) == 0x00004000)) {
to_bitField0_ |= 0x00004000;
}
result.completeSequenceId_ = completeSequenceId_;
if (((from_bitField0_ & 0x00008000) == 0x00008000)) {
to_bitField0_ |= 0x00008000;
}
result.dataLocality_ = dataLocality_;
if (((from_bitField0_ & 0x00010000) == 0x00010000)) {
to_bitField0_ |= 0x00010000;
}
result.lastMajorCompactionTs_ = lastMajorCompactionTs_;
if (storeCompleteSequenceIdBuilder_ == null) {
if (((bitField0_ & 0x00020000) == 0x00020000)) {
storeCompleteSequenceId_ = java.util.Collections.unmodifiableList(storeCompleteSequenceId_);
bitField0_ = (bitField0_ & ~0x00020000);
}
result.storeCompleteSequenceId_ = storeCompleteSequenceId_;
} else {
result.storeCompleteSequenceId_ = storeCompleteSequenceIdBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance()) return this;
if (other.hasRegionSpecifier()) {
mergeRegionSpecifier(other.getRegionSpecifier());
}
if (other.hasStores()) {
setStores(other.getStores());
}
if (other.hasStorefiles()) {
setStorefiles(other.getStorefiles());
}
if (other.hasStoreUncompressedSizeMB()) {
setStoreUncompressedSizeMB(other.getStoreUncompressedSizeMB());
}
if (other.hasStorefileSizeMB()) {
setStorefileSizeMB(other.getStorefileSizeMB());
}
if (other.hasMemstoreSizeMB()) {
setMemstoreSizeMB(other.getMemstoreSizeMB());
}
if (other.hasStorefileIndexSizeMB()) {
setStorefileIndexSizeMB(other.getStorefileIndexSizeMB());
}
if (other.hasReadRequestsCount()) {
setReadRequestsCount(other.getReadRequestsCount());
}
if (other.hasWriteRequestsCount()) {
setWriteRequestsCount(other.getWriteRequestsCount());
}
if (other.hasTotalCompactingKVs()) {
setTotalCompactingKVs(other.getTotalCompactingKVs());
}
if (other.hasCurrentCompactedKVs()) {
setCurrentCompactedKVs(other.getCurrentCompactedKVs());
}
if (other.hasRootIndexSizeKB()) {
setRootIndexSizeKB(other.getRootIndexSizeKB());
}
if (other.hasTotalStaticIndexSizeKB()) {
setTotalStaticIndexSizeKB(other.getTotalStaticIndexSizeKB());
}
if (other.hasTotalStaticBloomSizeKB()) {
setTotalStaticBloomSizeKB(other.getTotalStaticBloomSizeKB());
}
if (other.hasCompleteSequenceId()) {
setCompleteSequenceId(other.getCompleteSequenceId());
}
if (other.hasDataLocality()) {
setDataLocality(other.getDataLocality());
}
if (other.hasLastMajorCompactionTs()) {
setLastMajorCompactionTs(other.getLastMajorCompactionTs());
}
if (storeCompleteSequenceIdBuilder_ == null) {
if (!other.storeCompleteSequenceId_.isEmpty()) {
if (storeCompleteSequenceId_.isEmpty()) {
storeCompleteSequenceId_ = other.storeCompleteSequenceId_;
bitField0_ = (bitField0_ & ~0x00020000);
} else {
ensureStoreCompleteSequenceIdIsMutable();
storeCompleteSequenceId_.addAll(other.storeCompleteSequenceId_);
}
onChanged();
}
} else {
if (!other.storeCompleteSequenceId_.isEmpty()) {
if (storeCompleteSequenceIdBuilder_.isEmpty()) {
storeCompleteSequenceIdBuilder_.dispose();
storeCompleteSequenceIdBuilder_ = null;
storeCompleteSequenceId_ = other.storeCompleteSequenceId_;
bitField0_ = (bitField0_ & ~0x00020000);
storeCompleteSequenceIdBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getStoreCompleteSequenceIdFieldBuilder() : null;
} else {
storeCompleteSequenceIdBuilder_.addAllMessages(other.storeCompleteSequenceId_);
}
}
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasRegionSpecifier()) {
return false;
}
if (!getRegionSpecifier().isInitialized()) {
return false;
}
for (int i = 0; i < getStoreCompleteSequenceIdCount(); i++) {
if (!getStoreCompleteSequenceId(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .RegionSpecifier region_specifier = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder> regionSpecifierBuilder_;
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
public boolean hasRegionSpecifier() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier getRegionSpecifier() {
if (regionSpecifierBuilder_ == null) {
return regionSpecifier_;
} else {
return regionSpecifierBuilder_.getMessage();
}
}
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
public Builder setRegionSpecifier(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
if (regionSpecifierBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
regionSpecifier_ = value;
onChanged();
} else {
regionSpecifierBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
public Builder setRegionSpecifier(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder builderForValue) {
if (regionSpecifierBuilder_ == null) {
regionSpecifier_ = builderForValue.build();
onChanged();
} else {
regionSpecifierBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
public Builder mergeRegionSpecifier(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier value) {
if (regionSpecifierBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
regionSpecifier_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance()) {
regionSpecifier_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.newBuilder(regionSpecifier_).mergeFrom(value).buildPartial();
} else {
regionSpecifier_ = value;
}
onChanged();
} else {
regionSpecifierBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
public Builder clearRegionSpecifier() {
if (regionSpecifierBuilder_ == null) {
regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
onChanged();
} else {
regionSpecifierBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder getRegionSpecifierBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getRegionSpecifierFieldBuilder().getBuilder();
}
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder getRegionSpecifierOrBuilder() {
if (regionSpecifierBuilder_ != null) {
return regionSpecifierBuilder_.getMessageOrBuilder();
} else {
return regionSpecifier_;
}
}
/**
* <code>required .RegionSpecifier region_specifier = 1;</code>
*
* <pre>
** the region specifier
* </pre>
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>
getRegionSpecifierFieldBuilder() {
if (regionSpecifierBuilder_ == null) {
regionSpecifierBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifierOrBuilder>(
regionSpecifier_,
getParentForChildren(),
isClean());
regionSpecifier_ = null;
}
return regionSpecifierBuilder_;
}
// optional uint32 stores = 2;
private int stores_ ;
/**
* <code>optional uint32 stores = 2;</code>
*
* <pre>
** the number of stores for the region
* </pre>
*/
public boolean hasStores() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional uint32 stores = 2;</code>
*
* <pre>
** the number of stores for the region
* </pre>
*/
public int getStores() {
return stores_;
}
/**
* <code>optional uint32 stores = 2;</code>
*
* <pre>
** the number of stores for the region
* </pre>
*/
public Builder setStores(int value) {
bitField0_ |= 0x00000002;
stores_ = value;
onChanged();
return this;
}
/**
* <code>optional uint32 stores = 2;</code>
*
* <pre>
** the number of stores for the region
* </pre>
*/
public Builder clearStores() {
bitField0_ = (bitField0_ & ~0x00000002);
stores_ = 0;
onChanged();
return this;
}
// optional uint32 storefiles = 3;
private int storefiles_ ;
/**
* <code>optional uint32 storefiles = 3;</code>
*
* <pre>
** the number of storefiles for the region
* </pre>
*/
public boolean hasStorefiles() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional uint32 storefiles = 3;</code>
*
* <pre>
** the number of storefiles for the region
* </pre>
*/
public int getStorefiles() {
return storefiles_;
}
/**
* <code>optional uint32 storefiles = 3;</code>
*
* <pre>
** the number of storefiles for the region
* </pre>
*/
public Builder setStorefiles(int value) {
bitField0_ |= 0x00000004;
storefiles_ = value;
onChanged();
return this;
}
/**
* <code>optional uint32 storefiles = 3;</code>
*
* <pre>
** the number of storefiles for the region
* </pre>
*/
public Builder clearStorefiles() {
bitField0_ = (bitField0_ & ~0x00000004);
storefiles_ = 0;
onChanged();
return this;
}
// optional uint32 store_uncompressed_size_MB = 4;
private int storeUncompressedSizeMB_ ;
/**
* <code>optional uint32 store_uncompressed_size_MB = 4;</code>
*
* <pre>
** the total size of the store files for the region, uncompressed, in MB
* </pre>
*/
public boolean hasStoreUncompressedSizeMB() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>optional uint32 store_uncompressed_size_MB = 4;</code>
*
* <pre>
** the total size of the store files for the region, uncompressed, in MB
* </pre>
*/
public int getStoreUncompressedSizeMB() {
return storeUncompressedSizeMB_;
}
/**
* <code>optional uint32 store_uncompressed_size_MB = 4;</code>
*
* <pre>
** the total size of the store files for the region, uncompressed, in MB
* </pre>
*/
public Builder setStoreUncompressedSizeMB(int value) {
bitField0_ |= 0x00000008;
storeUncompressedSizeMB_ = value;
onChanged();
return this;
}
/**
* <code>optional uint32 store_uncompressed_size_MB = 4;</code>
*
* <pre>
** the total size of the store files for the region, uncompressed, in MB
* </pre>
*/
public Builder clearStoreUncompressedSizeMB() {
bitField0_ = (bitField0_ & ~0x00000008);
storeUncompressedSizeMB_ = 0;
onChanged();
return this;
}
// optional uint32 storefile_size_MB = 5;
private int storefileSizeMB_ ;
/**
* <code>optional uint32 storefile_size_MB = 5;</code>
*
* <pre>
** the current total size of the store files for the region, in MB
* </pre>
*/
public boolean hasStorefileSizeMB() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* <code>optional uint32 storefile_size_MB = 5;</code>
*
* <pre>
** the current total size of the store files for the region, in MB
* </pre>
*/
public int getStorefileSizeMB() {
return storefileSizeMB_;
}
/**
* <code>optional uint32 storefile_size_MB = 5;</code>
*
* <pre>
** the current total size of the store files for the region, in MB
* </pre>
*/
public Builder setStorefileSizeMB(int value) {
bitField0_ |= 0x00000010;
storefileSizeMB_ = value;
onChanged();
return this;
}
/**
* <code>optional uint32 storefile_size_MB = 5;</code>
*
* <pre>
** the current total size of the store files for the region, in MB
* </pre>
*/
public Builder clearStorefileSizeMB() {
bitField0_ = (bitField0_ & ~0x00000010);
storefileSizeMB_ = 0;
onChanged();
return this;
}
// optional uint32 memstore_size_MB = 6;
private int memstoreSizeMB_ ;
/**
* <code>optional uint32 memstore_size_MB = 6;</code>
*
* <pre>
** the current size of the memstore for the region, in MB
* </pre>
*/
public boolean hasMemstoreSizeMB() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* <code>optional uint32 memstore_size_MB = 6;</code>
*
* <pre>
** the current size of the memstore for the region, in MB
* </pre>
*/
public int getMemstoreSizeMB() {
return memstoreSizeMB_;
}
/**
* <code>optional uint32 memstore_size_MB = 6;</code>
*
* <pre>
** the current size of the memstore for the region, in MB
* </pre>
*/
public Builder setMemstoreSizeMB(int value) {
bitField0_ |= 0x00000020;
memstoreSizeMB_ = value;
onChanged();
return this;
}
/**
* <code>optional uint32 memstore_size_MB = 6;</code>
*
* <pre>
** the current size of the memstore for the region, in MB
* </pre>
*/
public Builder clearMemstoreSizeMB() {
bitField0_ = (bitField0_ & ~0x00000020);
memstoreSizeMB_ = 0;
onChanged();
return this;
}
// optional uint32 storefile_index_size_MB = 7;
private int storefileIndexSizeMB_ ;
/**
* <code>optional uint32 storefile_index_size_MB = 7;</code>
*
* <pre>
**
* The current total size of root-level store file indexes for the region,
* in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
* </pre>
*/
public boolean hasStorefileIndexSizeMB() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* <code>optional uint32 storefile_index_size_MB = 7;</code>
*
* <pre>
**
* The current total size of root-level store file indexes for the region,
* in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
* </pre>
*/
public int getStorefileIndexSizeMB() {
return storefileIndexSizeMB_;
}
/**
* <code>optional uint32 storefile_index_size_MB = 7;</code>
*
* <pre>
**
* The current total size of root-level store file indexes for the region,
* in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
* </pre>
*/
public Builder setStorefileIndexSizeMB(int value) {
bitField0_ |= 0x00000040;
storefileIndexSizeMB_ = value;
onChanged();
return this;
}
/**
* <code>optional uint32 storefile_index_size_MB = 7;</code>
*
* <pre>
**
* The current total size of root-level store file indexes for the region,
* in MB. The same as {&#64;link #rootIndexSizeKB} but in MB.
* </pre>
*/
public Builder clearStorefileIndexSizeMB() {
bitField0_ = (bitField0_ & ~0x00000040);
storefileIndexSizeMB_ = 0;
onChanged();
return this;
}
// optional uint64 read_requests_count = 8;
private long readRequestsCount_ ;
/**
* <code>optional uint64 read_requests_count = 8;</code>
*
* <pre>
** the current total read requests made to region
* </pre>
*/
public boolean hasReadRequestsCount() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* <code>optional uint64 read_requests_count = 8;</code>
*
* <pre>
** the current total read requests made to region
* </pre>
*/
public long getReadRequestsCount() {
return readRequestsCount_;
}
/**
* <code>optional uint64 read_requests_count = 8;</code>
*
* <pre>
** the current total read requests made to region
* </pre>
*/
public Builder setReadRequestsCount(long value) {
bitField0_ |= 0x00000080;
readRequestsCount_ = value;
onChanged();
return this;
}
/**
* <code>optional uint64 read_requests_count = 8;</code>
*
* <pre>
** the current total read requests made to region
* </pre>
*/
public Builder clearReadRequestsCount() {
bitField0_ = (bitField0_ & ~0x00000080);
readRequestsCount_ = 0L;
onChanged();
return this;
}
// optional uint64 write_requests_count = 9;
private long writeRequestsCount_ ;
/**
* <code>optional uint64 write_requests_count = 9;</code>
*
* <pre>
** the current total write requests made to region
* </pre>
*/
public boolean hasWriteRequestsCount() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* <code>optional uint64 write_requests_count = 9;</code>
*
* <pre>
** the current total write requests made to region
* </pre>
*/
public long getWriteRequestsCount() {
return writeRequestsCount_;
}
/**
* <code>optional uint64 write_requests_count = 9;</code>
*
* <pre>
** the current total write requests made to region
* </pre>
*/
public Builder setWriteRequestsCount(long value) {
bitField0_ |= 0x00000100;
writeRequestsCount_ = value;
onChanged();
return this;
}
/**
* <code>optional uint64 write_requests_count = 9;</code>
*
* <pre>
** the current total write requests made to region
* </pre>
*/
public Builder clearWriteRequestsCount() {
bitField0_ = (bitField0_ & ~0x00000100);
writeRequestsCount_ = 0L;
onChanged();
return this;
}
// optional uint64 total_compacting_KVs = 10;
private long totalCompactingKVs_ ;
/**
* <code>optional uint64 total_compacting_KVs = 10;</code>
*
* <pre>
** the total compacting key values in currently running compaction
* </pre>
*/
public boolean hasTotalCompactingKVs() {
return ((bitField0_ & 0x00000200) == 0x00000200);
}
/**
* <code>optional uint64 total_compacting_KVs = 10;</code>
*
* <pre>
** the total compacting key values in currently running compaction
* </pre>
*/
public long getTotalCompactingKVs() {
return totalCompactingKVs_;
}
/**
* <code>optional uint64 total_compacting_KVs = 10;</code>
*
* <pre>
** the total compacting key values in currently running compaction
* </pre>
*/
public Builder setTotalCompactingKVs(long value) {
bitField0_ |= 0x00000200;
totalCompactingKVs_ = value;
onChanged();
return this;
}
/**
* <code>optional uint64 total_compacting_KVs = 10;</code>
*
* <pre>
** the total compacting key values in currently running compaction
* </pre>
*/
public Builder clearTotalCompactingKVs() {
bitField0_ = (bitField0_ & ~0x00000200);
totalCompactingKVs_ = 0L;
onChanged();
return this;
}
// optional uint64 current_compacted_KVs = 11;
private long currentCompactedKVs_ ;
/**
* <code>optional uint64 current_compacted_KVs = 11;</code>
*
* <pre>
** the completed count of key values in currently running compaction
* </pre>
*/
public boolean hasCurrentCompactedKVs() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* <code>optional uint64 current_compacted_KVs = 11;</code>
*
* <pre>
** the completed count of key values in currently running compaction
* </pre>
*/
public long getCurrentCompactedKVs() {
return currentCompactedKVs_;
}
/**
* <code>optional uint64 current_compacted_KVs = 11;</code>
*
* <pre>
** the completed count of key values in currently running compaction
* </pre>
*/
public Builder setCurrentCompactedKVs(long value) {
bitField0_ |= 0x00000400;
currentCompactedKVs_ = value;
onChanged();
return this;
}
/**
* <code>optional uint64 current_compacted_KVs = 11;</code>
*
* <pre>
** the completed count of key values in currently running compaction
* </pre>
*/
public Builder clearCurrentCompactedKVs() {
bitField0_ = (bitField0_ & ~0x00000400);
currentCompactedKVs_ = 0L;
onChanged();
return this;
}
// optional uint32 root_index_size_KB = 12;
private int rootIndexSizeKB_ ;
/**
* <code>optional uint32 root_index_size_KB = 12;</code>
*
* <pre>
** The current total size of root-level indexes for the region, in KB.
* </pre>
*/
public boolean hasRootIndexSizeKB() {
return ((bitField0_ & 0x00000800) == 0x00000800);
}
/**
* <code>optional uint32 root_index_size_KB = 12;</code>
*
* <pre>
** The current total size of root-level indexes for the region, in KB.
* </pre>
*/
public int getRootIndexSizeKB() {
return rootIndexSizeKB_;
}
/**
* <code>optional uint32 root_index_size_KB = 12;</code>
*
* <pre>
** The current total size of root-level indexes for the region, in KB.
* </pre>
*/
public Builder setRootIndexSizeKB(int value) {
bitField0_ |= 0x00000800;
rootIndexSizeKB_ = value;
onChanged();
return this;
}
/**
* <code>optional uint32 root_index_size_KB = 12;</code>
*
* <pre>
** The current total size of root-level indexes for the region, in KB.
* </pre>
*/
public Builder clearRootIndexSizeKB() {
bitField0_ = (bitField0_ & ~0x00000800);
rootIndexSizeKB_ = 0;
onChanged();
return this;
}
// optional uint32 total_static_index_size_KB = 13;
private int totalStaticIndexSizeKB_ ;
/**
* <code>optional uint32 total_static_index_size_KB = 13;</code>
*
* <pre>
** The total size of all index blocks, not just the root level, in KB.
* </pre>
*/
public boolean hasTotalStaticIndexSizeKB() {
return ((bitField0_ & 0x00001000) == 0x00001000);
}
/**
* <code>optional uint32 total_static_index_size_KB = 13;</code>
*
* <pre>
** The total size of all index blocks, not just the root level, in KB.
* </pre>
*/
public int getTotalStaticIndexSizeKB() {
return totalStaticIndexSizeKB_;
}
/**
* <code>optional uint32 total_static_index_size_KB = 13;</code>
*
* <pre>
** The total size of all index blocks, not just the root level, in KB.
* </pre>
*/
public Builder setTotalStaticIndexSizeKB(int value) {
bitField0_ |= 0x00001000;
totalStaticIndexSizeKB_ = value;
onChanged();
return this;
}
/**
* <code>optional uint32 total_static_index_size_KB = 13;</code>
*
* <pre>
** The total size of all index blocks, not just the root level, in KB.
* </pre>
*/
public Builder clearTotalStaticIndexSizeKB() {
bitField0_ = (bitField0_ & ~0x00001000);
totalStaticIndexSizeKB_ = 0;
onChanged();
return this;
}
// optional uint32 total_static_bloom_size_KB = 14;
private int totalStaticBloomSizeKB_ ;
/**
* <code>optional uint32 total_static_bloom_size_KB = 14;</code>
*
* <pre>
**
* The total size of all Bloom filter blocks, not just loaded into the
* block cache, in KB.
* </pre>
*/
public boolean hasTotalStaticBloomSizeKB() {
return ((bitField0_ & 0x00002000) == 0x00002000);
}
/**
* <code>optional uint32 total_static_bloom_size_KB = 14;</code>
*
* <pre>
**
* The total size of all Bloom filter blocks, not just loaded into the
* block cache, in KB.
* </pre>
*/
public int getTotalStaticBloomSizeKB() {
return totalStaticBloomSizeKB_;
}
/**
* <code>optional uint32 total_static_bloom_size_KB = 14;</code>
*
* <pre>
**
* The total size of all Bloom filter blocks, not just loaded into the
* block cache, in KB.
* </pre>
*/
public Builder setTotalStaticBloomSizeKB(int value) {
bitField0_ |= 0x00002000;
totalStaticBloomSizeKB_ = value;
onChanged();
return this;
}
/**
* <code>optional uint32 total_static_bloom_size_KB = 14;</code>
*
* <pre>
**
* The total size of all Bloom filter blocks, not just loaded into the
* block cache, in KB.
* </pre>
*/
public Builder clearTotalStaticBloomSizeKB() {
bitField0_ = (bitField0_ & ~0x00002000);
totalStaticBloomSizeKB_ = 0;
onChanged();
return this;
}
// optional uint64 complete_sequence_id = 15;
private long completeSequenceId_ ;
/**
* <code>optional uint64 complete_sequence_id = 15;</code>
*
* <pre>
** the most recent sequence Id from cache flush
* </pre>
*/
public boolean hasCompleteSequenceId() {
return ((bitField0_ & 0x00004000) == 0x00004000);
}
/**
* <code>optional uint64 complete_sequence_id = 15;</code>
*
* <pre>
** the most recent sequence Id from cache flush
* </pre>
*/
public long getCompleteSequenceId() {
return completeSequenceId_;
}
/**
* <code>optional uint64 complete_sequence_id = 15;</code>
*
* <pre>
** the most recent sequence Id from cache flush
* </pre>
*/
public Builder setCompleteSequenceId(long value) {
bitField0_ |= 0x00004000;
completeSequenceId_ = value;
onChanged();
return this;
}
/**
* <code>optional uint64 complete_sequence_id = 15;</code>
*
* <pre>
** the most recent sequence Id from cache flush
* </pre>
*/
public Builder clearCompleteSequenceId() {
bitField0_ = (bitField0_ & ~0x00004000);
completeSequenceId_ = 0L;
onChanged();
return this;
}
// optional float data_locality = 16;
private float dataLocality_ ;
/**
* <code>optional float data_locality = 16;</code>
*
* <pre>
** The current data locality for region in the regionserver
* </pre>
*/
public boolean hasDataLocality() {
return ((bitField0_ & 0x00008000) == 0x00008000);
}
/**
* <code>optional float data_locality = 16;</code>
*
* <pre>
** The current data locality for region in the regionserver
* </pre>
*/
public float getDataLocality() {
return dataLocality_;
}
/**
* <code>optional float data_locality = 16;</code>
*
* <pre>
** The current data locality for region in the regionserver
* </pre>
*/
public Builder setDataLocality(float value) {
bitField0_ |= 0x00008000;
dataLocality_ = value;
onChanged();
return this;
}
/**
* <code>optional float data_locality = 16;</code>
*
* <pre>
** The current data locality for region in the regionserver
* </pre>
*/
public Builder clearDataLocality() {
bitField0_ = (bitField0_ & ~0x00008000);
dataLocality_ = 0F;
onChanged();
return this;
}
// optional uint64 last_major_compaction_ts = 17 [default = 0];
private long lastMajorCompactionTs_ ;
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public boolean hasLastMajorCompactionTs() {
return ((bitField0_ & 0x00010000) == 0x00010000);
}
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public long getLastMajorCompactionTs() {
return lastMajorCompactionTs_;
}
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public Builder setLastMajorCompactionTs(long value) {
bitField0_ |= 0x00010000;
lastMajorCompactionTs_ = value;
onChanged();
return this;
}
/**
* <code>optional uint64 last_major_compaction_ts = 17 [default = 0];</code>
*/
public Builder clearLastMajorCompactionTs() {
bitField0_ = (bitField0_ & ~0x00010000);
lastMajorCompactionTs_ = 0L;
onChanged();
return this;
}
// repeated .StoreSequenceId store_complete_sequence_id = 18;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> storeCompleteSequenceId_ =
java.util.Collections.emptyList();
private void ensureStoreCompleteSequenceIdIsMutable() {
if (!((bitField0_ & 0x00020000) == 0x00020000)) {
storeCompleteSequenceId_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId>(storeCompleteSequenceId_);
bitField0_ |= 0x00020000;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder> storeCompleteSequenceIdBuilder_;
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> getStoreCompleteSequenceIdList() {
if (storeCompleteSequenceIdBuilder_ == null) {
return java.util.Collections.unmodifiableList(storeCompleteSequenceId_);
} else {
return storeCompleteSequenceIdBuilder_.getMessageList();
}
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public int getStoreCompleteSequenceIdCount() {
if (storeCompleteSequenceIdBuilder_ == null) {
return storeCompleteSequenceId_.size();
} else {
return storeCompleteSequenceIdBuilder_.getCount();
}
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId getStoreCompleteSequenceId(int index) {
if (storeCompleteSequenceIdBuilder_ == null) {
return storeCompleteSequenceId_.get(index);
} else {
return storeCompleteSequenceIdBuilder_.getMessage(index);
}
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public Builder setStoreCompleteSequenceId(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) {
if (storeCompleteSequenceIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureStoreCompleteSequenceIdIsMutable();
storeCompleteSequenceId_.set(index, value);
onChanged();
} else {
storeCompleteSequenceIdBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public Builder setStoreCompleteSequenceId(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) {
if (storeCompleteSequenceIdBuilder_ == null) {
ensureStoreCompleteSequenceIdIsMutable();
storeCompleteSequenceId_.set(index, builderForValue.build());
onChanged();
} else {
storeCompleteSequenceIdBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public Builder addStoreCompleteSequenceId(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) {
if (storeCompleteSequenceIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureStoreCompleteSequenceIdIsMutable();
storeCompleteSequenceId_.add(value);
onChanged();
} else {
storeCompleteSequenceIdBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public Builder addStoreCompleteSequenceId(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId value) {
if (storeCompleteSequenceIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureStoreCompleteSequenceIdIsMutable();
storeCompleteSequenceId_.add(index, value);
onChanged();
} else {
storeCompleteSequenceIdBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public Builder addStoreCompleteSequenceId(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) {
if (storeCompleteSequenceIdBuilder_ == null) {
ensureStoreCompleteSequenceIdIsMutable();
storeCompleteSequenceId_.add(builderForValue.build());
onChanged();
} else {
storeCompleteSequenceIdBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public Builder addStoreCompleteSequenceId(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder builderForValue) {
if (storeCompleteSequenceIdBuilder_ == null) {
ensureStoreCompleteSequenceIdIsMutable();
storeCompleteSequenceId_.add(index, builderForValue.build());
onChanged();
} else {
storeCompleteSequenceIdBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public Builder addAllStoreCompleteSequenceId(
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId> values) {
if (storeCompleteSequenceIdBuilder_ == null) {
ensureStoreCompleteSequenceIdIsMutable();
super.addAll(values, storeCompleteSequenceId_);
onChanged();
} else {
storeCompleteSequenceIdBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public Builder clearStoreCompleteSequenceId() {
if (storeCompleteSequenceIdBuilder_ == null) {
storeCompleteSequenceId_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00020000);
onChanged();
} else {
storeCompleteSequenceIdBuilder_.clear();
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public Builder removeStoreCompleteSequenceId(int index) {
if (storeCompleteSequenceIdBuilder_ == null) {
ensureStoreCompleteSequenceIdIsMutable();
storeCompleteSequenceId_.remove(index);
onChanged();
} else {
storeCompleteSequenceIdBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder getStoreCompleteSequenceIdBuilder(
int index) {
return getStoreCompleteSequenceIdFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder(
int index) {
if (storeCompleteSequenceIdBuilder_ == null) {
return storeCompleteSequenceId_.get(index); } else {
return storeCompleteSequenceIdBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>
getStoreCompleteSequenceIdOrBuilderList() {
if (storeCompleteSequenceIdBuilder_ != null) {
return storeCompleteSequenceIdBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(storeCompleteSequenceId_);
}
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreCompleteSequenceIdBuilder() {
return getStoreCompleteSequenceIdFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance());
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder addStoreCompleteSequenceIdBuilder(
int index) {
return getStoreCompleteSequenceIdFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.getDefaultInstance());
}
/**
* <code>repeated .StoreSequenceId store_complete_sequence_id = 18;</code>
*
* <pre>
** the most recent sequence Id of store from cache flush
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder>
getStoreCompleteSequenceIdBuilderList() {
return getStoreCompleteSequenceIdFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>
getStoreCompleteSequenceIdFieldBuilder() {
if (storeCompleteSequenceIdBuilder_ == null) {
storeCompleteSequenceIdBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder>(
storeCompleteSequenceId_,
((bitField0_ & 0x00020000) == 0x00020000),
getParentForChildren(),
isClean());
storeCompleteSequenceId_ = null;
}
return storeCompleteSequenceIdBuilder_;
}
// @@protoc_insertion_point(builder_scope:RegionLoad)
}
static {
defaultInstance = new RegionLoad(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:RegionLoad)
}
public interface ReplicationLoadSinkOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required uint64 ageOfLastAppliedOp = 1;
/**
* <code>required uint64 ageOfLastAppliedOp = 1;</code>
*/
boolean hasAgeOfLastAppliedOp();
/**
* <code>required uint64 ageOfLastAppliedOp = 1;</code>
*/
long getAgeOfLastAppliedOp();
// required uint64 timeStampsOfLastAppliedOp = 2;
/**
* <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
*/
boolean hasTimeStampsOfLastAppliedOp();
/**
* <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
*/
long getTimeStampsOfLastAppliedOp();
}
/**
* Protobuf type {@code ReplicationLoadSink}
*/
public static final class ReplicationLoadSink extends
com.google.protobuf.GeneratedMessage
implements ReplicationLoadSinkOrBuilder {
// Use ReplicationLoadSink.newBuilder() to construct.
private ReplicationLoadSink(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ReplicationLoadSink(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ReplicationLoadSink defaultInstance;
public static ReplicationLoadSink getDefaultInstance() {
return defaultInstance;
}
public ReplicationLoadSink getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ReplicationLoadSink(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
ageOfLastAppliedOp_ = input.readUInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
timeStampsOfLastAppliedOp_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class);
}
public static com.google.protobuf.Parser<ReplicationLoadSink> PARSER =
new com.google.protobuf.AbstractParser<ReplicationLoadSink>() {
public ReplicationLoadSink parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ReplicationLoadSink(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<ReplicationLoadSink> getParserForType() {
return PARSER;
}
private int bitField0_;
// required uint64 ageOfLastAppliedOp = 1;
public static final int AGEOFLASTAPPLIEDOP_FIELD_NUMBER = 1;
private long ageOfLastAppliedOp_;
/**
* <code>required uint64 ageOfLastAppliedOp = 1;</code>
*/
public boolean hasAgeOfLastAppliedOp() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required uint64 ageOfLastAppliedOp = 1;</code>
*/
public long getAgeOfLastAppliedOp() {
return ageOfLastAppliedOp_;
}
// required uint64 timeStampsOfLastAppliedOp = 2;
public static final int TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER = 2;
private long timeStampsOfLastAppliedOp_;
/**
* <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
*/
public boolean hasTimeStampsOfLastAppliedOp() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
*/
public long getTimeStampsOfLastAppliedOp() {
return timeStampsOfLastAppliedOp_;
}
private void initFields() {
ageOfLastAppliedOp_ = 0L;
timeStampsOfLastAppliedOp_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasAgeOfLastAppliedOp()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasTimeStampsOfLastAppliedOp()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, ageOfLastAppliedOp_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, timeStampsOfLastAppliedOp_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, ageOfLastAppliedOp_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, timeStampsOfLastAppliedOp_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) obj;
boolean result = true;
result = result && (hasAgeOfLastAppliedOp() == other.hasAgeOfLastAppliedOp());
if (hasAgeOfLastAppliedOp()) {
result = result && (getAgeOfLastAppliedOp()
== other.getAgeOfLastAppliedOp());
}
result = result && (hasTimeStampsOfLastAppliedOp() == other.hasTimeStampsOfLastAppliedOp());
if (hasTimeStampsOfLastAppliedOp()) {
result = result && (getTimeStampsOfLastAppliedOp()
== other.getTimeStampsOfLastAppliedOp());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasAgeOfLastAppliedOp()) {
hash = (37 * hash) + AGEOFLASTAPPLIEDOP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getAgeOfLastAppliedOp());
}
if (hasTimeStampsOfLastAppliedOp()) {
hash = (37 * hash) + TIMESTAMPSOFLASTAPPLIEDOP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getTimeStampsOfLastAppliedOp());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code ReplicationLoadSink}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
ageOfLastAppliedOp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
timeStampsOfLastAppliedOp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSink_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink build() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.ageOfLastAppliedOp_ = ageOfLastAppliedOp_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.timeStampsOfLastAppliedOp_ = timeStampsOfLastAppliedOp_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance()) return this;
if (other.hasAgeOfLastAppliedOp()) {
setAgeOfLastAppliedOp(other.getAgeOfLastAppliedOp());
}
if (other.hasTimeStampsOfLastAppliedOp()) {
setTimeStampsOfLastAppliedOp(other.getTimeStampsOfLastAppliedOp());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasAgeOfLastAppliedOp()) {
return false;
}
if (!hasTimeStampsOfLastAppliedOp()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required uint64 ageOfLastAppliedOp = 1;
private long ageOfLastAppliedOp_ ;
/**
* <code>required uint64 ageOfLastAppliedOp = 1;</code>
*/
public boolean hasAgeOfLastAppliedOp() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required uint64 ageOfLastAppliedOp = 1;</code>
*/
public long getAgeOfLastAppliedOp() {
return ageOfLastAppliedOp_;
}
/**
* <code>required uint64 ageOfLastAppliedOp = 1;</code>
*/
public Builder setAgeOfLastAppliedOp(long value) {
bitField0_ |= 0x00000001;
ageOfLastAppliedOp_ = value;
onChanged();
return this;
}
/**
* <code>required uint64 ageOfLastAppliedOp = 1;</code>
*/
public Builder clearAgeOfLastAppliedOp() {
bitField0_ = (bitField0_ & ~0x00000001);
ageOfLastAppliedOp_ = 0L;
onChanged();
return this;
}
// required uint64 timeStampsOfLastAppliedOp = 2;
private long timeStampsOfLastAppliedOp_ ;
/**
* <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
*/
public boolean hasTimeStampsOfLastAppliedOp() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
*/
public long getTimeStampsOfLastAppliedOp() {
return timeStampsOfLastAppliedOp_;
}
/**
* <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
*/
public Builder setTimeStampsOfLastAppliedOp(long value) {
bitField0_ |= 0x00000002;
timeStampsOfLastAppliedOp_ = value;
onChanged();
return this;
}
/**
* <code>required uint64 timeStampsOfLastAppliedOp = 2;</code>
*/
public Builder clearTimeStampsOfLastAppliedOp() {
bitField0_ = (bitField0_ & ~0x00000002);
timeStampsOfLastAppliedOp_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:ReplicationLoadSink)
}
static {
defaultInstance = new ReplicationLoadSink(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:ReplicationLoadSink)
}
public interface ReplicationLoadSourceOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required string peerID = 1;
/**
* <code>required string peerID = 1;</code>
*/
boolean hasPeerID();
/**
* <code>required string peerID = 1;</code>
*/
java.lang.String getPeerID();
/**
* <code>required string peerID = 1;</code>
*/
com.google.protobuf.ByteString
getPeerIDBytes();
// required uint64 ageOfLastShippedOp = 2;
/**
* <code>required uint64 ageOfLastShippedOp = 2;</code>
*/
boolean hasAgeOfLastShippedOp();
/**
* <code>required uint64 ageOfLastShippedOp = 2;</code>
*/
long getAgeOfLastShippedOp();
// required uint32 sizeOfLogQueue = 3;
/**
* <code>required uint32 sizeOfLogQueue = 3;</code>
*/
boolean hasSizeOfLogQueue();
/**
* <code>required uint32 sizeOfLogQueue = 3;</code>
*/
int getSizeOfLogQueue();
// required uint64 timeStampOfLastShippedOp = 4;
/**
* <code>required uint64 timeStampOfLastShippedOp = 4;</code>
*/
boolean hasTimeStampOfLastShippedOp();
/**
* <code>required uint64 timeStampOfLastShippedOp = 4;</code>
*/
long getTimeStampOfLastShippedOp();
// required uint64 replicationLag = 5;
/**
* <code>required uint64 replicationLag = 5;</code>
*/
boolean hasReplicationLag();
/**
* <code>required uint64 replicationLag = 5;</code>
*/
long getReplicationLag();
}
/**
* Protobuf type {@code ReplicationLoadSource}
*/
public static final class ReplicationLoadSource extends
com.google.protobuf.GeneratedMessage
implements ReplicationLoadSourceOrBuilder {
// Use ReplicationLoadSource.newBuilder() to construct.
private ReplicationLoadSource(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ReplicationLoadSource(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ReplicationLoadSource defaultInstance;
public static ReplicationLoadSource getDefaultInstance() {
return defaultInstance;
}
public ReplicationLoadSource getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ReplicationLoadSource(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
bitField0_ |= 0x00000001;
peerID_ = input.readBytes();
break;
}
case 16: {
bitField0_ |= 0x00000002;
ageOfLastShippedOp_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
sizeOfLogQueue_ = input.readUInt32();
break;
}
case 32: {
bitField0_ |= 0x00000008;
timeStampOfLastShippedOp_ = input.readUInt64();
break;
}
case 40: {
bitField0_ |= 0x00000010;
replicationLag_ = input.readUInt64();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class);
}
public static com.google.protobuf.Parser<ReplicationLoadSource> PARSER =
new com.google.protobuf.AbstractParser<ReplicationLoadSource>() {
public ReplicationLoadSource parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ReplicationLoadSource(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<ReplicationLoadSource> getParserForType() {
return PARSER;
}
private int bitField0_;
// required string peerID = 1;
public static final int PEERID_FIELD_NUMBER = 1;
private java.lang.Object peerID_;
/**
* <code>required string peerID = 1;</code>
*/
public boolean hasPeerID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required string peerID = 1;</code>
*/
public java.lang.String getPeerID() {
java.lang.Object ref = peerID_;
if (ref instanceof java.lang.String) {
return (java.lang.String) ref;
} else {
com.google.protobuf.ByteString bs =
(com.google.protobuf.ByteString) ref;
java.lang.String s = bs.toStringUtf8();
if (bs.isValidUtf8()) {
peerID_ = s;
}
return s;
}
}
/**
* <code>required string peerID = 1;</code>
*/
public com.google.protobuf.ByteString
getPeerIDBytes() {
java.lang.Object ref = peerID_;
if (ref instanceof java.lang.String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
peerID_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
// required uint64 ageOfLastShippedOp = 2;
public static final int AGEOFLASTSHIPPEDOP_FIELD_NUMBER = 2;
private long ageOfLastShippedOp_;
/**
* <code>required uint64 ageOfLastShippedOp = 2;</code>
*/
public boolean hasAgeOfLastShippedOp() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required uint64 ageOfLastShippedOp = 2;</code>
*/
public long getAgeOfLastShippedOp() {
return ageOfLastShippedOp_;
}
// required uint32 sizeOfLogQueue = 3;
public static final int SIZEOFLOGQUEUE_FIELD_NUMBER = 3;
private int sizeOfLogQueue_;
/**
* <code>required uint32 sizeOfLogQueue = 3;</code>
*/
public boolean hasSizeOfLogQueue() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>required uint32 sizeOfLogQueue = 3;</code>
*/
public int getSizeOfLogQueue() {
return sizeOfLogQueue_;
}
// required uint64 timeStampOfLastShippedOp = 4;
public static final int TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER = 4;
private long timeStampOfLastShippedOp_;
/**
* <code>required uint64 timeStampOfLastShippedOp = 4;</code>
*/
public boolean hasTimeStampOfLastShippedOp() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>required uint64 timeStampOfLastShippedOp = 4;</code>
*/
public long getTimeStampOfLastShippedOp() {
return timeStampOfLastShippedOp_;
}
// required uint64 replicationLag = 5;
public static final int REPLICATIONLAG_FIELD_NUMBER = 5;
private long replicationLag_;
/**
* <code>required uint64 replicationLag = 5;</code>
*/
public boolean hasReplicationLag() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* <code>required uint64 replicationLag = 5;</code>
*/
public long getReplicationLag() {
return replicationLag_;
}
private void initFields() {
peerID_ = "";
ageOfLastShippedOp_ = 0L;
sizeOfLogQueue_ = 0;
timeStampOfLastShippedOp_ = 0L;
replicationLag_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasPeerID()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasAgeOfLastShippedOp()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasSizeOfLogQueue()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasTimeStampOfLastShippedOp()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasReplicationLag()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeBytes(1, getPeerIDBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, ageOfLastShippedOp_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(3, sizeOfLogQueue_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt64(4, timeStampOfLastShippedOp_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(5, replicationLag_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeBytesSize(1, getPeerIDBytes());
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, ageOfLastShippedOp_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, sizeOfLogQueue_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(4, timeStampOfLastShippedOp_);
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(5, replicationLag_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) obj;
boolean result = true;
result = result && (hasPeerID() == other.hasPeerID());
if (hasPeerID()) {
result = result && getPeerID()
.equals(other.getPeerID());
}
result = result && (hasAgeOfLastShippedOp() == other.hasAgeOfLastShippedOp());
if (hasAgeOfLastShippedOp()) {
result = result && (getAgeOfLastShippedOp()
== other.getAgeOfLastShippedOp());
}
result = result && (hasSizeOfLogQueue() == other.hasSizeOfLogQueue());
if (hasSizeOfLogQueue()) {
result = result && (getSizeOfLogQueue()
== other.getSizeOfLogQueue());
}
result = result && (hasTimeStampOfLastShippedOp() == other.hasTimeStampOfLastShippedOp());
if (hasTimeStampOfLastShippedOp()) {
result = result && (getTimeStampOfLastShippedOp()
== other.getTimeStampOfLastShippedOp());
}
result = result && (hasReplicationLag() == other.hasReplicationLag());
if (hasReplicationLag()) {
result = result && (getReplicationLag()
== other.getReplicationLag());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasPeerID()) {
hash = (37 * hash) + PEERID_FIELD_NUMBER;
hash = (53 * hash) + getPeerID().hashCode();
}
if (hasAgeOfLastShippedOp()) {
hash = (37 * hash) + AGEOFLASTSHIPPEDOP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getAgeOfLastShippedOp());
}
if (hasSizeOfLogQueue()) {
hash = (37 * hash) + SIZEOFLOGQUEUE_FIELD_NUMBER;
hash = (53 * hash) + getSizeOfLogQueue();
}
if (hasTimeStampOfLastShippedOp()) {
hash = (37 * hash) + TIMESTAMPOFLASTSHIPPEDOP_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getTimeStampOfLastShippedOp());
}
if (hasReplicationLag()) {
hash = (37 * hash) + REPLICATIONLAG_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getReplicationLag());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code ReplicationLoadSource}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
peerID_ = "";
bitField0_ = (bitField0_ & ~0x00000001);
ageOfLastShippedOp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
sizeOfLogQueue_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
timeStampOfLastShippedOp_ = 0L;
bitField0_ = (bitField0_ & ~0x00000008);
replicationLag_ = 0L;
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ReplicationLoadSource_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource build() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.peerID_ = peerID_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.ageOfLastShippedOp_ = ageOfLastShippedOp_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.sizeOfLogQueue_ = sizeOfLogQueue_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.timeStampOfLastShippedOp_ = timeStampOfLastShippedOp_;
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000010;
}
result.replicationLag_ = replicationLag_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance()) return this;
if (other.hasPeerID()) {
bitField0_ |= 0x00000001;
peerID_ = other.peerID_;
onChanged();
}
if (other.hasAgeOfLastShippedOp()) {
setAgeOfLastShippedOp(other.getAgeOfLastShippedOp());
}
if (other.hasSizeOfLogQueue()) {
setSizeOfLogQueue(other.getSizeOfLogQueue());
}
if (other.hasTimeStampOfLastShippedOp()) {
setTimeStampOfLastShippedOp(other.getTimeStampOfLastShippedOp());
}
if (other.hasReplicationLag()) {
setReplicationLag(other.getReplicationLag());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasPeerID()) {
return false;
}
if (!hasAgeOfLastShippedOp()) {
return false;
}
if (!hasSizeOfLogQueue()) {
return false;
}
if (!hasTimeStampOfLastShippedOp()) {
return false;
}
if (!hasReplicationLag()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required string peerID = 1;
private java.lang.Object peerID_ = "";
/**
* <code>required string peerID = 1;</code>
*/
public boolean hasPeerID() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required string peerID = 1;</code>
*/
public java.lang.String getPeerID() {
java.lang.Object ref = peerID_;
if (!(ref instanceof java.lang.String)) {
java.lang.String s = ((com.google.protobuf.ByteString) ref)
.toStringUtf8();
peerID_ = s;
return s;
} else {
return (java.lang.String) ref;
}
}
/**
* <code>required string peerID = 1;</code>
*/
public com.google.protobuf.ByteString
getPeerIDBytes() {
java.lang.Object ref = peerID_;
if (ref instanceof String) {
com.google.protobuf.ByteString b =
com.google.protobuf.ByteString.copyFromUtf8(
(java.lang.String) ref);
peerID_ = b;
return b;
} else {
return (com.google.protobuf.ByteString) ref;
}
}
/**
* <code>required string peerID = 1;</code>
*/
public Builder setPeerID(
java.lang.String value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
peerID_ = value;
onChanged();
return this;
}
/**
* <code>required string peerID = 1;</code>
*/
public Builder clearPeerID() {
bitField0_ = (bitField0_ & ~0x00000001);
peerID_ = getDefaultInstance().getPeerID();
onChanged();
return this;
}
/**
* <code>required string peerID = 1;</code>
*/
public Builder setPeerIDBytes(
com.google.protobuf.ByteString value) {
if (value == null) {
throw new NullPointerException();
}
bitField0_ |= 0x00000001;
peerID_ = value;
onChanged();
return this;
}
// required uint64 ageOfLastShippedOp = 2;
private long ageOfLastShippedOp_ ;
/**
* <code>required uint64 ageOfLastShippedOp = 2;</code>
*/
public boolean hasAgeOfLastShippedOp() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required uint64 ageOfLastShippedOp = 2;</code>
*/
public long getAgeOfLastShippedOp() {
return ageOfLastShippedOp_;
}
/**
* <code>required uint64 ageOfLastShippedOp = 2;</code>
*/
public Builder setAgeOfLastShippedOp(long value) {
bitField0_ |= 0x00000002;
ageOfLastShippedOp_ = value;
onChanged();
return this;
}
/**
* <code>required uint64 ageOfLastShippedOp = 2;</code>
*/
public Builder clearAgeOfLastShippedOp() {
bitField0_ = (bitField0_ & ~0x00000002);
ageOfLastShippedOp_ = 0L;
onChanged();
return this;
}
// required uint32 sizeOfLogQueue = 3;
private int sizeOfLogQueue_ ;
/**
* <code>required uint32 sizeOfLogQueue = 3;</code>
*/
public boolean hasSizeOfLogQueue() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>required uint32 sizeOfLogQueue = 3;</code>
*/
public int getSizeOfLogQueue() {
return sizeOfLogQueue_;
}
/**
* <code>required uint32 sizeOfLogQueue = 3;</code>
*/
public Builder setSizeOfLogQueue(int value) {
bitField0_ |= 0x00000004;
sizeOfLogQueue_ = value;
onChanged();
return this;
}
/**
* <code>required uint32 sizeOfLogQueue = 3;</code>
*/
public Builder clearSizeOfLogQueue() {
bitField0_ = (bitField0_ & ~0x00000004);
sizeOfLogQueue_ = 0;
onChanged();
return this;
}
// required uint64 timeStampOfLastShippedOp = 4;
private long timeStampOfLastShippedOp_ ;
/**
* <code>required uint64 timeStampOfLastShippedOp = 4;</code>
*/
public boolean hasTimeStampOfLastShippedOp() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>required uint64 timeStampOfLastShippedOp = 4;</code>
*/
public long getTimeStampOfLastShippedOp() {
return timeStampOfLastShippedOp_;
}
/**
* <code>required uint64 timeStampOfLastShippedOp = 4;</code>
*/
public Builder setTimeStampOfLastShippedOp(long value) {
bitField0_ |= 0x00000008;
timeStampOfLastShippedOp_ = value;
onChanged();
return this;
}
/**
* <code>required uint64 timeStampOfLastShippedOp = 4;</code>
*/
public Builder clearTimeStampOfLastShippedOp() {
bitField0_ = (bitField0_ & ~0x00000008);
timeStampOfLastShippedOp_ = 0L;
onChanged();
return this;
}
// required uint64 replicationLag = 5;
private long replicationLag_ ;
/**
* <code>required uint64 replicationLag = 5;</code>
*/
public boolean hasReplicationLag() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* <code>required uint64 replicationLag = 5;</code>
*/
public long getReplicationLag() {
return replicationLag_;
}
/**
* <code>required uint64 replicationLag = 5;</code>
*/
public Builder setReplicationLag(long value) {
bitField0_ |= 0x00000010;
replicationLag_ = value;
onChanged();
return this;
}
/**
* <code>required uint64 replicationLag = 5;</code>
*/
public Builder clearReplicationLag() {
bitField0_ = (bitField0_ & ~0x00000010);
replicationLag_ = 0L;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:ReplicationLoadSource)
}
static {
defaultInstance = new ReplicationLoadSource(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:ReplicationLoadSource)
}
public interface ServerLoadOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional uint64 number_of_requests = 1;
/**
* <code>optional uint64 number_of_requests = 1;</code>
*
* <pre>
** Number of requests since last report.
* </pre>
*/
boolean hasNumberOfRequests();
/**
* <code>optional uint64 number_of_requests = 1;</code>
*
* <pre>
** Number of requests since last report.
* </pre>
*/
long getNumberOfRequests();
// optional uint64 total_number_of_requests = 2;
/**
* <code>optional uint64 total_number_of_requests = 2;</code>
*
* <pre>
** Total Number of requests from the start of the region server.
* </pre>
*/
boolean hasTotalNumberOfRequests();
/**
* <code>optional uint64 total_number_of_requests = 2;</code>
*
* <pre>
** Total Number of requests from the start of the region server.
* </pre>
*/
long getTotalNumberOfRequests();
// optional uint32 used_heap_MB = 3;
/**
* <code>optional uint32 used_heap_MB = 3;</code>
*
* <pre>
** the amount of used heap, in MB.
* </pre>
*/
boolean hasUsedHeapMB();
/**
* <code>optional uint32 used_heap_MB = 3;</code>
*
* <pre>
** the amount of used heap, in MB.
* </pre>
*/
int getUsedHeapMB();
// optional uint32 max_heap_MB = 4;
/**
* <code>optional uint32 max_heap_MB = 4;</code>
*
* <pre>
** the maximum allowable size of the heap, in MB.
* </pre>
*/
boolean hasMaxHeapMB();
/**
* <code>optional uint32 max_heap_MB = 4;</code>
*
* <pre>
** the maximum allowable size of the heap, in MB.
* </pre>
*/
int getMaxHeapMB();
// repeated .RegionLoad region_loads = 5;
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad>
getRegionLoadsList();
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index);
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
int getRegionLoadsCount();
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>
getRegionLoadsOrBuilderList();
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder(
int index);
// repeated .Coprocessor coprocessors = 6;
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor>
getCoprocessorsList();
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index);
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
int getCoprocessorsCount();
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>
getCoprocessorsOrBuilderList();
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder(
int index);
// optional uint64 report_start_time = 7;
/**
* <code>optional uint64 report_start_time = 7;</code>
*
* <pre>
**
* Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
boolean hasReportStartTime();
/**
* <code>optional uint64 report_start_time = 7;</code>
*
* <pre>
**
* Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
long getReportStartTime();
// optional uint64 report_end_time = 8;
/**
* <code>optional uint64 report_end_time = 8;</code>
*
* <pre>
**
* Time when report was generated.
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
boolean hasReportEndTime();
/**
* <code>optional uint64 report_end_time = 8;</code>
*
* <pre>
**
* Time when report was generated.
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
long getReportEndTime();
// optional uint32 info_server_port = 9;
/**
* <code>optional uint32 info_server_port = 9;</code>
*
* <pre>
**
* The port number that this region server is hosing an info server on.
* </pre>
*/
boolean hasInfoServerPort();
/**
* <code>optional uint32 info_server_port = 9;</code>
*
* <pre>
**
* The port number that this region server is hosing an info server on.
* </pre>
*/
int getInfoServerPort();
// repeated .ReplicationLoadSource replLoadSource = 10;
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource>
getReplLoadSourceList();
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index);
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
int getReplLoadSourceCount();
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder>
getReplLoadSourceOrBuilderList();
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder(
int index);
// optional .ReplicationLoadSink replLoadSink = 11;
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
boolean hasReplLoadSink();
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink();
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder();
}
/**
* Protobuf type {@code ServerLoad}
*/
public static final class ServerLoad extends
com.google.protobuf.GeneratedMessage
implements ServerLoadOrBuilder {
// Use ServerLoad.newBuilder() to construct.
private ServerLoad(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ServerLoad(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ServerLoad defaultInstance;
public static ServerLoad getDefaultInstance() {
return defaultInstance;
}
public ServerLoad getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ServerLoad(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 8: {
bitField0_ |= 0x00000001;
numberOfRequests_ = input.readUInt64();
break;
}
case 16: {
bitField0_ |= 0x00000002;
totalNumberOfRequests_ = input.readUInt64();
break;
}
case 24: {
bitField0_ |= 0x00000004;
usedHeapMB_ = input.readUInt32();
break;
}
case 32: {
bitField0_ |= 0x00000008;
maxHeapMB_ = input.readUInt32();
break;
}
case 42: {
if (!((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
regionLoads_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad>();
mutable_bitField0_ |= 0x00000010;
}
regionLoads_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.PARSER, extensionRegistry));
break;
}
case 50: {
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
coprocessors_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor>();
mutable_bitField0_ |= 0x00000020;
}
coprocessors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.PARSER, extensionRegistry));
break;
}
case 56: {
bitField0_ |= 0x00000010;
reportStartTime_ = input.readUInt64();
break;
}
case 64: {
bitField0_ |= 0x00000020;
reportEndTime_ = input.readUInt64();
break;
}
case 72: {
bitField0_ |= 0x00000040;
infoServerPort_ = input.readUInt32();
break;
}
case 82: {
if (!((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
replLoadSource_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource>();
mutable_bitField0_ |= 0x00000200;
}
replLoadSource_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.PARSER, extensionRegistry));
break;
}
case 90: {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder subBuilder = null;
if (((bitField0_ & 0x00000080) == 0x00000080)) {
subBuilder = replLoadSink_.toBuilder();
}
replLoadSink_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(replLoadSink_);
replLoadSink_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000080;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000010) == 0x00000010)) {
regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_);
}
if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_);
}
if (((mutable_bitField0_ & 0x00000200) == 0x00000200)) {
replLoadSource_ = java.util.Collections.unmodifiableList(replLoadSource_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder.class);
}
public static com.google.protobuf.Parser<ServerLoad> PARSER =
new com.google.protobuf.AbstractParser<ServerLoad>() {
public ServerLoad parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ServerLoad(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<ServerLoad> getParserForType() {
return PARSER;
}
private int bitField0_;
// optional uint64 number_of_requests = 1;
public static final int NUMBER_OF_REQUESTS_FIELD_NUMBER = 1;
private long numberOfRequests_;
/**
* <code>optional uint64 number_of_requests = 1;</code>
*
* <pre>
** Number of requests since last report.
* </pre>
*/
public boolean hasNumberOfRequests() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional uint64 number_of_requests = 1;</code>
*
* <pre>
** Number of requests since last report.
* </pre>
*/
public long getNumberOfRequests() {
return numberOfRequests_;
}
// optional uint64 total_number_of_requests = 2;
public static final int TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER = 2;
private long totalNumberOfRequests_;
/**
* <code>optional uint64 total_number_of_requests = 2;</code>
*
* <pre>
** Total Number of requests from the start of the region server.
* </pre>
*/
public boolean hasTotalNumberOfRequests() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional uint64 total_number_of_requests = 2;</code>
*
* <pre>
** Total Number of requests from the start of the region server.
* </pre>
*/
public long getTotalNumberOfRequests() {
return totalNumberOfRequests_;
}
// optional uint32 used_heap_MB = 3;
public static final int USED_HEAP_MB_FIELD_NUMBER = 3;
private int usedHeapMB_;
/**
* <code>optional uint32 used_heap_MB = 3;</code>
*
* <pre>
** the amount of used heap, in MB.
* </pre>
*/
public boolean hasUsedHeapMB() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional uint32 used_heap_MB = 3;</code>
*
* <pre>
** the amount of used heap, in MB.
* </pre>
*/
public int getUsedHeapMB() {
return usedHeapMB_;
}
// optional uint32 max_heap_MB = 4;
public static final int MAX_HEAP_MB_FIELD_NUMBER = 4;
private int maxHeapMB_;
/**
* <code>optional uint32 max_heap_MB = 4;</code>
*
* <pre>
** the maximum allowable size of the heap, in MB.
* </pre>
*/
public boolean hasMaxHeapMB() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>optional uint32 max_heap_MB = 4;</code>
*
* <pre>
** the maximum allowable size of the heap, in MB.
* </pre>
*/
public int getMaxHeapMB() {
return maxHeapMB_;
}
// repeated .RegionLoad region_loads = 5;
public static final int REGION_LOADS_FIELD_NUMBER = 5;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> regionLoads_;
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> getRegionLoadsList() {
return regionLoads_;
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>
getRegionLoadsOrBuilderList() {
return regionLoads_;
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public int getRegionLoadsCount() {
return regionLoads_.size();
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) {
return regionLoads_.get(index);
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder(
int index) {
return regionLoads_.get(index);
}
// repeated .Coprocessor coprocessors = 6;
public static final int COPROCESSORS_FIELD_NUMBER = 6;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> coprocessors_;
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> getCoprocessorsList() {
return coprocessors_;
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>
getCoprocessorsOrBuilderList() {
return coprocessors_;
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public int getCoprocessorsCount() {
return coprocessors_.size();
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) {
return coprocessors_.get(index);
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder(
int index) {
return coprocessors_.get(index);
}
// optional uint64 report_start_time = 7;
public static final int REPORT_START_TIME_FIELD_NUMBER = 7;
private long reportStartTime_;
/**
* <code>optional uint64 report_start_time = 7;</code>
*
* <pre>
**
* Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
public boolean hasReportStartTime() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* <code>optional uint64 report_start_time = 7;</code>
*
* <pre>
**
* Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
public long getReportStartTime() {
return reportStartTime_;
}
// optional uint64 report_end_time = 8;
public static final int REPORT_END_TIME_FIELD_NUMBER = 8;
private long reportEndTime_;
/**
* <code>optional uint64 report_end_time = 8;</code>
*
* <pre>
**
* Time when report was generated.
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
public boolean hasReportEndTime() {
return ((bitField0_ & 0x00000020) == 0x00000020);
}
/**
* <code>optional uint64 report_end_time = 8;</code>
*
* <pre>
**
* Time when report was generated.
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
public long getReportEndTime() {
return reportEndTime_;
}
// optional uint32 info_server_port = 9;
public static final int INFO_SERVER_PORT_FIELD_NUMBER = 9;
private int infoServerPort_;
/**
* <code>optional uint32 info_server_port = 9;</code>
*
* <pre>
**
* The port number that this region server is hosing an info server on.
* </pre>
*/
public boolean hasInfoServerPort() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* <code>optional uint32 info_server_port = 9;</code>
*
* <pre>
**
* The port number that this region server is hosing an info server on.
* </pre>
*/
public int getInfoServerPort() {
return infoServerPort_;
}
// repeated .ReplicationLoadSource replLoadSource = 10;
public static final int REPLLOADSOURCE_FIELD_NUMBER = 10;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource> replLoadSource_;
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource> getReplLoadSourceList() {
return replLoadSource_;
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder>
getReplLoadSourceOrBuilderList() {
return replLoadSource_;
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public int getReplLoadSourceCount() {
return replLoadSource_.size();
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index) {
return replLoadSource_.get(index);
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder(
int index) {
return replLoadSource_.get(index);
}
// optional .ReplicationLoadSink replLoadSink = 11;
public static final int REPLLOADSINK_FIELD_NUMBER = 11;
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink replLoadSink_;
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
public boolean hasReplLoadSink() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink() {
return replLoadSink_;
}
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder() {
return replLoadSink_;
}
private void initFields() {
numberOfRequests_ = 0L;
totalNumberOfRequests_ = 0L;
usedHeapMB_ = 0;
maxHeapMB_ = 0;
regionLoads_ = java.util.Collections.emptyList();
coprocessors_ = java.util.Collections.emptyList();
reportStartTime_ = 0L;
reportEndTime_ = 0L;
infoServerPort_ = 0;
replLoadSource_ = java.util.Collections.emptyList();
replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
for (int i = 0; i < getRegionLoadsCount(); i++) {
if (!getRegionLoads(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getCoprocessorsCount(); i++) {
if (!getCoprocessors(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getReplLoadSourceCount(); i++) {
if (!getReplLoadSource(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasReplLoadSink()) {
if (!getReplLoadSink().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeUInt64(1, numberOfRequests_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeUInt64(2, totalNumberOfRequests_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeUInt32(3, usedHeapMB_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeUInt32(4, maxHeapMB_);
}
for (int i = 0; i < regionLoads_.size(); i++) {
output.writeMessage(5, regionLoads_.get(i));
}
for (int i = 0; i < coprocessors_.size(); i++) {
output.writeMessage(6, coprocessors_.get(i));
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
output.writeUInt64(7, reportStartTime_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
output.writeUInt64(8, reportEndTime_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
output.writeUInt32(9, infoServerPort_);
}
for (int i = 0; i < replLoadSource_.size(); i++) {
output.writeMessage(10, replLoadSource_.get(i));
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
output.writeMessage(11, replLoadSink_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(1, numberOfRequests_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(2, totalNumberOfRequests_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(3, usedHeapMB_);
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(4, maxHeapMB_);
}
for (int i = 0; i < regionLoads_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, regionLoads_.get(i));
}
for (int i = 0; i < coprocessors_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, coprocessors_.get(i));
}
if (((bitField0_ & 0x00000010) == 0x00000010)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(7, reportStartTime_);
}
if (((bitField0_ & 0x00000020) == 0x00000020)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt64Size(8, reportEndTime_);
}
if (((bitField0_ & 0x00000040) == 0x00000040)) {
size += com.google.protobuf.CodedOutputStream
.computeUInt32Size(9, infoServerPort_);
}
for (int i = 0; i < replLoadSource_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(10, replLoadSource_.get(i));
}
if (((bitField0_ & 0x00000080) == 0x00000080)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(11, replLoadSink_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) obj;
boolean result = true;
result = result && (hasNumberOfRequests() == other.hasNumberOfRequests());
if (hasNumberOfRequests()) {
result = result && (getNumberOfRequests()
== other.getNumberOfRequests());
}
result = result && (hasTotalNumberOfRequests() == other.hasTotalNumberOfRequests());
if (hasTotalNumberOfRequests()) {
result = result && (getTotalNumberOfRequests()
== other.getTotalNumberOfRequests());
}
result = result && (hasUsedHeapMB() == other.hasUsedHeapMB());
if (hasUsedHeapMB()) {
result = result && (getUsedHeapMB()
== other.getUsedHeapMB());
}
result = result && (hasMaxHeapMB() == other.hasMaxHeapMB());
if (hasMaxHeapMB()) {
result = result && (getMaxHeapMB()
== other.getMaxHeapMB());
}
result = result && getRegionLoadsList()
.equals(other.getRegionLoadsList());
result = result && getCoprocessorsList()
.equals(other.getCoprocessorsList());
result = result && (hasReportStartTime() == other.hasReportStartTime());
if (hasReportStartTime()) {
result = result && (getReportStartTime()
== other.getReportStartTime());
}
result = result && (hasReportEndTime() == other.hasReportEndTime());
if (hasReportEndTime()) {
result = result && (getReportEndTime()
== other.getReportEndTime());
}
result = result && (hasInfoServerPort() == other.hasInfoServerPort());
if (hasInfoServerPort()) {
result = result && (getInfoServerPort()
== other.getInfoServerPort());
}
result = result && getReplLoadSourceList()
.equals(other.getReplLoadSourceList());
result = result && (hasReplLoadSink() == other.hasReplLoadSink());
if (hasReplLoadSink()) {
result = result && getReplLoadSink()
.equals(other.getReplLoadSink());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasNumberOfRequests()) {
hash = (37 * hash) + NUMBER_OF_REQUESTS_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getNumberOfRequests());
}
if (hasTotalNumberOfRequests()) {
hash = (37 * hash) + TOTAL_NUMBER_OF_REQUESTS_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getTotalNumberOfRequests());
}
if (hasUsedHeapMB()) {
hash = (37 * hash) + USED_HEAP_MB_FIELD_NUMBER;
hash = (53 * hash) + getUsedHeapMB();
}
if (hasMaxHeapMB()) {
hash = (37 * hash) + MAX_HEAP_MB_FIELD_NUMBER;
hash = (53 * hash) + getMaxHeapMB();
}
if (getRegionLoadsCount() > 0) {
hash = (37 * hash) + REGION_LOADS_FIELD_NUMBER;
hash = (53 * hash) + getRegionLoadsList().hashCode();
}
if (getCoprocessorsCount() > 0) {
hash = (37 * hash) + COPROCESSORS_FIELD_NUMBER;
hash = (53 * hash) + getCoprocessorsList().hashCode();
}
if (hasReportStartTime()) {
hash = (37 * hash) + REPORT_START_TIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getReportStartTime());
}
if (hasReportEndTime()) {
hash = (37 * hash) + REPORT_END_TIME_FIELD_NUMBER;
hash = (53 * hash) + hashLong(getReportEndTime());
}
if (hasInfoServerPort()) {
hash = (37 * hash) + INFO_SERVER_PORT_FIELD_NUMBER;
hash = (53 * hash) + getInfoServerPort();
}
if (getReplLoadSourceCount() > 0) {
hash = (37 * hash) + REPLLOADSOURCE_FIELD_NUMBER;
hash = (53 * hash) + getReplLoadSourceList().hashCode();
}
if (hasReplLoadSink()) {
hash = (37 * hash) + REPLLOADSINK_FIELD_NUMBER;
hash = (53 * hash) + getReplLoadSink().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code ServerLoad}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getRegionLoadsFieldBuilder();
getCoprocessorsFieldBuilder();
getReplLoadSourceFieldBuilder();
getReplLoadSinkFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
numberOfRequests_ = 0L;
bitField0_ = (bitField0_ & ~0x00000001);
totalNumberOfRequests_ = 0L;
bitField0_ = (bitField0_ & ~0x00000002);
usedHeapMB_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
maxHeapMB_ = 0;
bitField0_ = (bitField0_ & ~0x00000008);
if (regionLoadsBuilder_ == null) {
regionLoads_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
} else {
regionLoadsBuilder_.clear();
}
if (coprocessorsBuilder_ == null) {
coprocessors_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
} else {
coprocessorsBuilder_.clear();
}
reportStartTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000040);
reportEndTime_ = 0L;
bitField0_ = (bitField0_ & ~0x00000080);
infoServerPort_ = 0;
bitField0_ = (bitField0_ & ~0x00000100);
if (replLoadSourceBuilder_ == null) {
replLoadSource_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000200);
} else {
replLoadSourceBuilder_.clear();
}
if (replLoadSinkBuilder_ == null) {
replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance();
} else {
replLoadSinkBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000400);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ServerLoad_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad build() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
result.numberOfRequests_ = numberOfRequests_;
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
result.totalNumberOfRequests_ = totalNumberOfRequests_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.usedHeapMB_ = usedHeapMB_;
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
to_bitField0_ |= 0x00000008;
}
result.maxHeapMB_ = maxHeapMB_;
if (regionLoadsBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010)) {
regionLoads_ = java.util.Collections.unmodifiableList(regionLoads_);
bitField0_ = (bitField0_ & ~0x00000010);
}
result.regionLoads_ = regionLoads_;
} else {
result.regionLoads_ = regionLoadsBuilder_.build();
}
if (coprocessorsBuilder_ == null) {
if (((bitField0_ & 0x00000020) == 0x00000020)) {
coprocessors_ = java.util.Collections.unmodifiableList(coprocessors_);
bitField0_ = (bitField0_ & ~0x00000020);
}
result.coprocessors_ = coprocessors_;
} else {
result.coprocessors_ = coprocessorsBuilder_.build();
}
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000010;
}
result.reportStartTime_ = reportStartTime_;
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
to_bitField0_ |= 0x00000020;
}
result.reportEndTime_ = reportEndTime_;
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000040;
}
result.infoServerPort_ = infoServerPort_;
if (replLoadSourceBuilder_ == null) {
if (((bitField0_ & 0x00000200) == 0x00000200)) {
replLoadSource_ = java.util.Collections.unmodifiableList(replLoadSource_);
bitField0_ = (bitField0_ & ~0x00000200);
}
result.replLoadSource_ = replLoadSource_;
} else {
result.replLoadSource_ = replLoadSourceBuilder_.build();
}
if (((from_bitField0_ & 0x00000400) == 0x00000400)) {
to_bitField0_ |= 0x00000080;
}
if (replLoadSinkBuilder_ == null) {
result.replLoadSink_ = replLoadSink_;
} else {
result.replLoadSink_ = replLoadSinkBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) return this;
if (other.hasNumberOfRequests()) {
setNumberOfRequests(other.getNumberOfRequests());
}
if (other.hasTotalNumberOfRequests()) {
setTotalNumberOfRequests(other.getTotalNumberOfRequests());
}
if (other.hasUsedHeapMB()) {
setUsedHeapMB(other.getUsedHeapMB());
}
if (other.hasMaxHeapMB()) {
setMaxHeapMB(other.getMaxHeapMB());
}
if (regionLoadsBuilder_ == null) {
if (!other.regionLoads_.isEmpty()) {
if (regionLoads_.isEmpty()) {
regionLoads_ = other.regionLoads_;
bitField0_ = (bitField0_ & ~0x00000010);
} else {
ensureRegionLoadsIsMutable();
regionLoads_.addAll(other.regionLoads_);
}
onChanged();
}
} else {
if (!other.regionLoads_.isEmpty()) {
if (regionLoadsBuilder_.isEmpty()) {
regionLoadsBuilder_.dispose();
regionLoadsBuilder_ = null;
regionLoads_ = other.regionLoads_;
bitField0_ = (bitField0_ & ~0x00000010);
regionLoadsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getRegionLoadsFieldBuilder() : null;
} else {
regionLoadsBuilder_.addAllMessages(other.regionLoads_);
}
}
}
if (coprocessorsBuilder_ == null) {
if (!other.coprocessors_.isEmpty()) {
if (coprocessors_.isEmpty()) {
coprocessors_ = other.coprocessors_;
bitField0_ = (bitField0_ & ~0x00000020);
} else {
ensureCoprocessorsIsMutable();
coprocessors_.addAll(other.coprocessors_);
}
onChanged();
}
} else {
if (!other.coprocessors_.isEmpty()) {
if (coprocessorsBuilder_.isEmpty()) {
coprocessorsBuilder_.dispose();
coprocessorsBuilder_ = null;
coprocessors_ = other.coprocessors_;
bitField0_ = (bitField0_ & ~0x00000020);
coprocessorsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getCoprocessorsFieldBuilder() : null;
} else {
coprocessorsBuilder_.addAllMessages(other.coprocessors_);
}
}
}
if (other.hasReportStartTime()) {
setReportStartTime(other.getReportStartTime());
}
if (other.hasReportEndTime()) {
setReportEndTime(other.getReportEndTime());
}
if (other.hasInfoServerPort()) {
setInfoServerPort(other.getInfoServerPort());
}
if (replLoadSourceBuilder_ == null) {
if (!other.replLoadSource_.isEmpty()) {
if (replLoadSource_.isEmpty()) {
replLoadSource_ = other.replLoadSource_;
bitField0_ = (bitField0_ & ~0x00000200);
} else {
ensureReplLoadSourceIsMutable();
replLoadSource_.addAll(other.replLoadSource_);
}
onChanged();
}
} else {
if (!other.replLoadSource_.isEmpty()) {
if (replLoadSourceBuilder_.isEmpty()) {
replLoadSourceBuilder_.dispose();
replLoadSourceBuilder_ = null;
replLoadSource_ = other.replLoadSource_;
bitField0_ = (bitField0_ & ~0x00000200);
replLoadSourceBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getReplLoadSourceFieldBuilder() : null;
} else {
replLoadSourceBuilder_.addAllMessages(other.replLoadSource_);
}
}
}
if (other.hasReplLoadSink()) {
mergeReplLoadSink(other.getReplLoadSink());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
for (int i = 0; i < getRegionLoadsCount(); i++) {
if (!getRegionLoads(i).isInitialized()) {
return false;
}
}
for (int i = 0; i < getCoprocessorsCount(); i++) {
if (!getCoprocessors(i).isInitialized()) {
return false;
}
}
for (int i = 0; i < getReplLoadSourceCount(); i++) {
if (!getReplLoadSource(i).isInitialized()) {
return false;
}
}
if (hasReplLoadSink()) {
if (!getReplLoadSink().isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional uint64 number_of_requests = 1;
private long numberOfRequests_ ;
/**
* <code>optional uint64 number_of_requests = 1;</code>
*
* <pre>
** Number of requests since last report.
* </pre>
*/
public boolean hasNumberOfRequests() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional uint64 number_of_requests = 1;</code>
*
* <pre>
** Number of requests since last report.
* </pre>
*/
public long getNumberOfRequests() {
return numberOfRequests_;
}
/**
* <code>optional uint64 number_of_requests = 1;</code>
*
* <pre>
** Number of requests since last report.
* </pre>
*/
public Builder setNumberOfRequests(long value) {
bitField0_ |= 0x00000001;
numberOfRequests_ = value;
onChanged();
return this;
}
/**
* <code>optional uint64 number_of_requests = 1;</code>
*
* <pre>
** Number of requests since last report.
* </pre>
*/
public Builder clearNumberOfRequests() {
bitField0_ = (bitField0_ & ~0x00000001);
numberOfRequests_ = 0L;
onChanged();
return this;
}
// optional uint64 total_number_of_requests = 2;
private long totalNumberOfRequests_ ;
/**
* <code>optional uint64 total_number_of_requests = 2;</code>
*
* <pre>
** Total Number of requests from the start of the region server.
* </pre>
*/
public boolean hasTotalNumberOfRequests() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional uint64 total_number_of_requests = 2;</code>
*
* <pre>
** Total Number of requests from the start of the region server.
* </pre>
*/
public long getTotalNumberOfRequests() {
return totalNumberOfRequests_;
}
/**
* <code>optional uint64 total_number_of_requests = 2;</code>
*
* <pre>
** Total Number of requests from the start of the region server.
* </pre>
*/
public Builder setTotalNumberOfRequests(long value) {
bitField0_ |= 0x00000002;
totalNumberOfRequests_ = value;
onChanged();
return this;
}
/**
* <code>optional uint64 total_number_of_requests = 2;</code>
*
* <pre>
** Total Number of requests from the start of the region server.
* </pre>
*/
public Builder clearTotalNumberOfRequests() {
bitField0_ = (bitField0_ & ~0x00000002);
totalNumberOfRequests_ = 0L;
onChanged();
return this;
}
// optional uint32 used_heap_MB = 3;
private int usedHeapMB_ ;
/**
* <code>optional uint32 used_heap_MB = 3;</code>
*
* <pre>
** the amount of used heap, in MB.
* </pre>
*/
public boolean hasUsedHeapMB() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional uint32 used_heap_MB = 3;</code>
*
* <pre>
** the amount of used heap, in MB.
* </pre>
*/
public int getUsedHeapMB() {
return usedHeapMB_;
}
/**
* <code>optional uint32 used_heap_MB = 3;</code>
*
* <pre>
** the amount of used heap, in MB.
* </pre>
*/
public Builder setUsedHeapMB(int value) {
bitField0_ |= 0x00000004;
usedHeapMB_ = value;
onChanged();
return this;
}
/**
* <code>optional uint32 used_heap_MB = 3;</code>
*
* <pre>
** the amount of used heap, in MB.
* </pre>
*/
public Builder clearUsedHeapMB() {
bitField0_ = (bitField0_ & ~0x00000004);
usedHeapMB_ = 0;
onChanged();
return this;
}
// optional uint32 max_heap_MB = 4;
private int maxHeapMB_ ;
/**
* <code>optional uint32 max_heap_MB = 4;</code>
*
* <pre>
** the maximum allowable size of the heap, in MB.
* </pre>
*/
public boolean hasMaxHeapMB() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>optional uint32 max_heap_MB = 4;</code>
*
* <pre>
** the maximum allowable size of the heap, in MB.
* </pre>
*/
public int getMaxHeapMB() {
return maxHeapMB_;
}
/**
* <code>optional uint32 max_heap_MB = 4;</code>
*
* <pre>
** the maximum allowable size of the heap, in MB.
* </pre>
*/
public Builder setMaxHeapMB(int value) {
bitField0_ |= 0x00000008;
maxHeapMB_ = value;
onChanged();
return this;
}
/**
* <code>optional uint32 max_heap_MB = 4;</code>
*
* <pre>
** the maximum allowable size of the heap, in MB.
* </pre>
*/
public Builder clearMaxHeapMB() {
bitField0_ = (bitField0_ & ~0x00000008);
maxHeapMB_ = 0;
onChanged();
return this;
}
// repeated .RegionLoad region_loads = 5;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> regionLoads_ =
java.util.Collections.emptyList();
private void ensureRegionLoadsIsMutable() {
if (!((bitField0_ & 0x00000010) == 0x00000010)) {
regionLoads_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad>(regionLoads_);
bitField0_ |= 0x00000010;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder> regionLoadsBuilder_;
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> getRegionLoadsList() {
if (regionLoadsBuilder_ == null) {
return java.util.Collections.unmodifiableList(regionLoads_);
} else {
return regionLoadsBuilder_.getMessageList();
}
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public int getRegionLoadsCount() {
if (regionLoadsBuilder_ == null) {
return regionLoads_.size();
} else {
return regionLoadsBuilder_.getCount();
}
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad getRegionLoads(int index) {
if (regionLoadsBuilder_ == null) {
return regionLoads_.get(index);
} else {
return regionLoadsBuilder_.getMessage(index);
}
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public Builder setRegionLoads(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) {
if (regionLoadsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureRegionLoadsIsMutable();
regionLoads_.set(index, value);
onChanged();
} else {
regionLoadsBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public Builder setRegionLoads(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
if (regionLoadsBuilder_ == null) {
ensureRegionLoadsIsMutable();
regionLoads_.set(index, builderForValue.build());
onChanged();
} else {
regionLoadsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public Builder addRegionLoads(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) {
if (regionLoadsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureRegionLoadsIsMutable();
regionLoads_.add(value);
onChanged();
} else {
regionLoadsBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public Builder addRegionLoads(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad value) {
if (regionLoadsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureRegionLoadsIsMutable();
regionLoads_.add(index, value);
onChanged();
} else {
regionLoadsBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public Builder addRegionLoads(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
if (regionLoadsBuilder_ == null) {
ensureRegionLoadsIsMutable();
regionLoads_.add(builderForValue.build());
onChanged();
} else {
regionLoadsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public Builder addRegionLoads(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder builderForValue) {
if (regionLoadsBuilder_ == null) {
ensureRegionLoadsIsMutable();
regionLoads_.add(index, builderForValue.build());
onChanged();
} else {
regionLoadsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public Builder addAllRegionLoads(
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad> values) {
if (regionLoadsBuilder_ == null) {
ensureRegionLoadsIsMutable();
super.addAll(values, regionLoads_);
onChanged();
} else {
regionLoadsBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public Builder clearRegionLoads() {
if (regionLoadsBuilder_ == null) {
regionLoads_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000010);
onChanged();
} else {
regionLoadsBuilder_.clear();
}
return this;
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public Builder removeRegionLoads(int index) {
if (regionLoadsBuilder_ == null) {
ensureRegionLoadsIsMutable();
regionLoads_.remove(index);
onChanged();
} else {
regionLoadsBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder getRegionLoadsBuilder(
int index) {
return getRegionLoadsFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder getRegionLoadsOrBuilder(
int index) {
if (regionLoadsBuilder_ == null) {
return regionLoads_.get(index); } else {
return regionLoadsBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>
getRegionLoadsOrBuilderList() {
if (regionLoadsBuilder_ != null) {
return regionLoadsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(regionLoads_);
}
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder addRegionLoadsBuilder() {
return getRegionLoadsFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance());
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder addRegionLoadsBuilder(
int index) {
return getRegionLoadsFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.getDefaultInstance());
}
/**
* <code>repeated .RegionLoad region_loads = 5;</code>
*
* <pre>
** Information on the load of individual regions.
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder>
getRegionLoadsBuilderList() {
return getRegionLoadsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>
getRegionLoadsFieldBuilder() {
if (regionLoadsBuilder_ == null) {
regionLoadsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionLoadOrBuilder>(
regionLoads_,
((bitField0_ & 0x00000010) == 0x00000010),
getParentForChildren(),
isClean());
regionLoads_ = null;
}
return regionLoadsBuilder_;
}
// repeated .Coprocessor coprocessors = 6;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> coprocessors_ =
java.util.Collections.emptyList();
private void ensureCoprocessorsIsMutable() {
if (!((bitField0_ & 0x00000020) == 0x00000020)) {
coprocessors_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor>(coprocessors_);
bitField0_ |= 0x00000020;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> coprocessorsBuilder_;
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> getCoprocessorsList() {
if (coprocessorsBuilder_ == null) {
return java.util.Collections.unmodifiableList(coprocessors_);
} else {
return coprocessorsBuilder_.getMessageList();
}
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public int getCoprocessorsCount() {
if (coprocessorsBuilder_ == null) {
return coprocessors_.size();
} else {
return coprocessorsBuilder_.getCount();
}
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getCoprocessors(int index) {
if (coprocessorsBuilder_ == null) {
return coprocessors_.get(index);
} else {
return coprocessorsBuilder_.getMessage(index);
}
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public Builder setCoprocessors(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) {
if (coprocessorsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCoprocessorsIsMutable();
coprocessors_.set(index, value);
onChanged();
} else {
coprocessorsBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public Builder setCoprocessors(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) {
if (coprocessorsBuilder_ == null) {
ensureCoprocessorsIsMutable();
coprocessors_.set(index, builderForValue.build());
onChanged();
} else {
coprocessorsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public Builder addCoprocessors(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) {
if (coprocessorsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCoprocessorsIsMutable();
coprocessors_.add(value);
onChanged();
} else {
coprocessorsBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public Builder addCoprocessors(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) {
if (coprocessorsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureCoprocessorsIsMutable();
coprocessors_.add(index, value);
onChanged();
} else {
coprocessorsBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public Builder addCoprocessors(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) {
if (coprocessorsBuilder_ == null) {
ensureCoprocessorsIsMutable();
coprocessors_.add(builderForValue.build());
onChanged();
} else {
coprocessorsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public Builder addCoprocessors(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) {
if (coprocessorsBuilder_ == null) {
ensureCoprocessorsIsMutable();
coprocessors_.add(index, builderForValue.build());
onChanged();
} else {
coprocessorsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public Builder addAllCoprocessors(
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> values) {
if (coprocessorsBuilder_ == null) {
ensureCoprocessorsIsMutable();
super.addAll(values, coprocessors_);
onChanged();
} else {
coprocessorsBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public Builder clearCoprocessors() {
if (coprocessorsBuilder_ == null) {
coprocessors_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
onChanged();
} else {
coprocessorsBuilder_.clear();
}
return this;
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public Builder removeCoprocessors(int index) {
if (coprocessorsBuilder_ == null) {
ensureCoprocessorsIsMutable();
coprocessors_.remove(index);
onChanged();
} else {
coprocessorsBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder getCoprocessorsBuilder(
int index) {
return getCoprocessorsFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getCoprocessorsOrBuilder(
int index) {
if (coprocessorsBuilder_ == null) {
return coprocessors_.get(index); } else {
return coprocessorsBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>
getCoprocessorsOrBuilderList() {
if (coprocessorsBuilder_ != null) {
return coprocessorsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(coprocessors_);
}
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addCoprocessorsBuilder() {
return getCoprocessorsFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance());
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addCoprocessorsBuilder(
int index) {
return getCoprocessorsFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance());
}
/**
* <code>repeated .Coprocessor coprocessors = 6;</code>
*
* <pre>
**
* Regionserver-level coprocessors, e.g., WALObserver implementations.
* Region-level coprocessors, on the other hand, are stored inside RegionLoad
* objects.
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder>
getCoprocessorsBuilderList() {
return getCoprocessorsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>
getCoprocessorsFieldBuilder() {
if (coprocessorsBuilder_ == null) {
coprocessorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>(
coprocessors_,
((bitField0_ & 0x00000020) == 0x00000020),
getParentForChildren(),
isClean());
coprocessors_ = null;
}
return coprocessorsBuilder_;
}
// optional uint64 report_start_time = 7;
private long reportStartTime_ ;
/**
* <code>optional uint64 report_start_time = 7;</code>
*
* <pre>
**
* Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
public boolean hasReportStartTime() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* <code>optional uint64 report_start_time = 7;</code>
*
* <pre>
**
* Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
public long getReportStartTime() {
return reportStartTime_;
}
/**
* <code>optional uint64 report_start_time = 7;</code>
*
* <pre>
**
* Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
public Builder setReportStartTime(long value) {
bitField0_ |= 0x00000040;
reportStartTime_ = value;
onChanged();
return this;
}
/**
* <code>optional uint64 report_start_time = 7;</code>
*
* <pre>
**
* Time when incremental (non-total) counts began being calculated (e.g. number_of_requests)
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
public Builder clearReportStartTime() {
bitField0_ = (bitField0_ & ~0x00000040);
reportStartTime_ = 0L;
onChanged();
return this;
}
// optional uint64 report_end_time = 8;
private long reportEndTime_ ;
/**
* <code>optional uint64 report_end_time = 8;</code>
*
* <pre>
**
* Time when report was generated.
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
public boolean hasReportEndTime() {
return ((bitField0_ & 0x00000080) == 0x00000080);
}
/**
* <code>optional uint64 report_end_time = 8;</code>
*
* <pre>
**
* Time when report was generated.
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
public long getReportEndTime() {
return reportEndTime_;
}
/**
* <code>optional uint64 report_end_time = 8;</code>
*
* <pre>
**
* Time when report was generated.
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
public Builder setReportEndTime(long value) {
bitField0_ |= 0x00000080;
reportEndTime_ = value;
onChanged();
return this;
}
/**
* <code>optional uint64 report_end_time = 8;</code>
*
* <pre>
**
* Time when report was generated.
* time is measured as the difference, measured in milliseconds, between the current time
* and midnight, January 1, 1970 UTC.
* </pre>
*/
public Builder clearReportEndTime() {
bitField0_ = (bitField0_ & ~0x00000080);
reportEndTime_ = 0L;
onChanged();
return this;
}
// optional uint32 info_server_port = 9;
private int infoServerPort_ ;
/**
* <code>optional uint32 info_server_port = 9;</code>
*
* <pre>
**
* The port number that this region server is hosing an info server on.
* </pre>
*/
public boolean hasInfoServerPort() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* <code>optional uint32 info_server_port = 9;</code>
*
* <pre>
**
* The port number that this region server is hosing an info server on.
* </pre>
*/
public int getInfoServerPort() {
return infoServerPort_;
}
/**
* <code>optional uint32 info_server_port = 9;</code>
*
* <pre>
**
* The port number that this region server is hosing an info server on.
* </pre>
*/
public Builder setInfoServerPort(int value) {
bitField0_ |= 0x00000100;
infoServerPort_ = value;
onChanged();
return this;
}
/**
* <code>optional uint32 info_server_port = 9;</code>
*
* <pre>
**
* The port number that this region server is hosing an info server on.
* </pre>
*/
public Builder clearInfoServerPort() {
bitField0_ = (bitField0_ & ~0x00000100);
infoServerPort_ = 0;
onChanged();
return this;
}
// repeated .ReplicationLoadSource replLoadSource = 10;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource> replLoadSource_ =
java.util.Collections.emptyList();
private void ensureReplLoadSourceIsMutable() {
if (!((bitField0_ & 0x00000200) == 0x00000200)) {
replLoadSource_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource>(replLoadSource_);
bitField0_ |= 0x00000200;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder> replLoadSourceBuilder_;
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource> getReplLoadSourceList() {
if (replLoadSourceBuilder_ == null) {
return java.util.Collections.unmodifiableList(replLoadSource_);
} else {
return replLoadSourceBuilder_.getMessageList();
}
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public int getReplLoadSourceCount() {
if (replLoadSourceBuilder_ == null) {
return replLoadSource_.size();
} else {
return replLoadSourceBuilder_.getCount();
}
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource getReplLoadSource(int index) {
if (replLoadSourceBuilder_ == null) {
return replLoadSource_.get(index);
} else {
return replLoadSourceBuilder_.getMessage(index);
}
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public Builder setReplLoadSource(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) {
if (replLoadSourceBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReplLoadSourceIsMutable();
replLoadSource_.set(index, value);
onChanged();
} else {
replLoadSourceBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public Builder setReplLoadSource(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) {
if (replLoadSourceBuilder_ == null) {
ensureReplLoadSourceIsMutable();
replLoadSource_.set(index, builderForValue.build());
onChanged();
} else {
replLoadSourceBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public Builder addReplLoadSource(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) {
if (replLoadSourceBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReplLoadSourceIsMutable();
replLoadSource_.add(value);
onChanged();
} else {
replLoadSourceBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public Builder addReplLoadSource(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource value) {
if (replLoadSourceBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureReplLoadSourceIsMutable();
replLoadSource_.add(index, value);
onChanged();
} else {
replLoadSourceBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public Builder addReplLoadSource(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) {
if (replLoadSourceBuilder_ == null) {
ensureReplLoadSourceIsMutable();
replLoadSource_.add(builderForValue.build());
onChanged();
} else {
replLoadSourceBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public Builder addReplLoadSource(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder builderForValue) {
if (replLoadSourceBuilder_ == null) {
ensureReplLoadSourceIsMutable();
replLoadSource_.add(index, builderForValue.build());
onChanged();
} else {
replLoadSourceBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public Builder addAllReplLoadSource(
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource> values) {
if (replLoadSourceBuilder_ == null) {
ensureReplLoadSourceIsMutable();
super.addAll(values, replLoadSource_);
onChanged();
} else {
replLoadSourceBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public Builder clearReplLoadSource() {
if (replLoadSourceBuilder_ == null) {
replLoadSource_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000200);
onChanged();
} else {
replLoadSourceBuilder_.clear();
}
return this;
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public Builder removeReplLoadSource(int index) {
if (replLoadSourceBuilder_ == null) {
ensureReplLoadSourceIsMutable();
replLoadSource_.remove(index);
onChanged();
} else {
replLoadSourceBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder getReplLoadSourceBuilder(
int index) {
return getReplLoadSourceFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder getReplLoadSourceOrBuilder(
int index) {
if (replLoadSourceBuilder_ == null) {
return replLoadSource_.get(index); } else {
return replLoadSourceBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder>
getReplLoadSourceOrBuilderList() {
if (replLoadSourceBuilder_ != null) {
return replLoadSourceBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(replLoadSource_);
}
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder addReplLoadSourceBuilder() {
return getReplLoadSourceFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance());
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder addReplLoadSourceBuilder(
int index) {
return getReplLoadSourceFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.getDefaultInstance());
}
/**
* <code>repeated .ReplicationLoadSource replLoadSource = 10;</code>
*
* <pre>
**
* The replicationLoadSource for the replication Source status of this region server.
* </pre>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder>
getReplLoadSourceBuilderList() {
return getReplLoadSourceFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder>
getReplLoadSourceFieldBuilder() {
if (replLoadSourceBuilder_ == null) {
replLoadSourceBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSource.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSourceOrBuilder>(
replLoadSource_,
((bitField0_ & 0x00000200) == 0x00000200),
getParentForChildren(),
isClean());
replLoadSource_ = null;
}
return replLoadSourceBuilder_;
}
// optional .ReplicationLoadSink replLoadSink = 11;
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder> replLoadSinkBuilder_;
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
public boolean hasReplLoadSink() {
return ((bitField0_ & 0x00000400) == 0x00000400);
}
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink getReplLoadSink() {
if (replLoadSinkBuilder_ == null) {
return replLoadSink_;
} else {
return replLoadSinkBuilder_.getMessage();
}
}
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
public Builder setReplLoadSink(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink value) {
if (replLoadSinkBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
replLoadSink_ = value;
onChanged();
} else {
replLoadSinkBuilder_.setMessage(value);
}
bitField0_ |= 0x00000400;
return this;
}
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
public Builder setReplLoadSink(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder builderForValue) {
if (replLoadSinkBuilder_ == null) {
replLoadSink_ = builderForValue.build();
onChanged();
} else {
replLoadSinkBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000400;
return this;
}
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
public Builder mergeReplLoadSink(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink value) {
if (replLoadSinkBuilder_ == null) {
if (((bitField0_ & 0x00000400) == 0x00000400) &&
replLoadSink_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance()) {
replLoadSink_ =
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.newBuilder(replLoadSink_).mergeFrom(value).buildPartial();
} else {
replLoadSink_ = value;
}
onChanged();
} else {
replLoadSinkBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000400;
return this;
}
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
public Builder clearReplLoadSink() {
if (replLoadSinkBuilder_ == null) {
replLoadSink_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.getDefaultInstance();
onChanged();
} else {
replLoadSinkBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000400);
return this;
}
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder getReplLoadSinkBuilder() {
bitField0_ |= 0x00000400;
onChanged();
return getReplLoadSinkFieldBuilder().getBuilder();
}
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder getReplLoadSinkOrBuilder() {
if (replLoadSinkBuilder_ != null) {
return replLoadSinkBuilder_.getMessageOrBuilder();
} else {
return replLoadSink_;
}
}
/**
* <code>optional .ReplicationLoadSink replLoadSink = 11;</code>
*
* <pre>
**
* The replicationLoadSink for the replication Sink status of this region server.
* </pre>
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder>
getReplLoadSinkFieldBuilder() {
if (replLoadSinkBuilder_ == null) {
replLoadSinkBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSink.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ReplicationLoadSinkOrBuilder>(
replLoadSink_,
getParentForChildren(),
isClean());
replLoadSink_ = null;
}
return replLoadSinkBuilder_;
}
// @@protoc_insertion_point(builder_scope:ServerLoad)
}
static {
defaultInstance = new ServerLoad(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:ServerLoad)
}
public interface LiveServerInfoOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// required .ServerName server = 1;
/**
* <code>required .ServerName server = 1;</code>
*/
boolean hasServer();
/**
* <code>required .ServerName server = 1;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer();
/**
* <code>required .ServerName server = 1;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder();
// required .ServerLoad server_load = 2;
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
boolean hasServerLoad();
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getServerLoad();
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getServerLoadOrBuilder();
}
/**
* Protobuf type {@code LiveServerInfo}
*/
public static final class LiveServerInfo extends
com.google.protobuf.GeneratedMessage
implements LiveServerInfoOrBuilder {
// Use LiveServerInfo.newBuilder() to construct.
private LiveServerInfo(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private LiveServerInfo(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final LiveServerInfo defaultInstance;
public static LiveServerInfo getDefaultInstance() {
return defaultInstance;
}
public LiveServerInfo getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private LiveServerInfo(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = server_.toBuilder();
}
server_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(server_);
server_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = serverLoad_.toBuilder();
}
serverLoad_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(serverLoad_);
serverLoad_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_LiveServerInfo_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_LiveServerInfo_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder.class);
}
public static com.google.protobuf.Parser<LiveServerInfo> PARSER =
new com.google.protobuf.AbstractParser<LiveServerInfo>() {
public LiveServerInfo parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new LiveServerInfo(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<LiveServerInfo> getParserForType() {
return PARSER;
}
private int bitField0_;
// required .ServerName server = 1;
public static final int SERVER_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_;
/**
* <code>required .ServerName server = 1;</code>
*/
public boolean hasServer() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required .ServerName server = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() {
return server_;
}
/**
* <code>required .ServerName server = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() {
return server_;
}
// required .ServerLoad server_load = 2;
public static final int SERVER_LOAD_FIELD_NUMBER = 2;
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad serverLoad_;
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
public boolean hasServerLoad() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getServerLoad() {
return serverLoad_;
}
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getServerLoadOrBuilder() {
return serverLoad_;
}
private void initFields() {
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
serverLoad_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (!hasServer()) {
memoizedIsInitialized = 0;
return false;
}
if (!hasServerLoad()) {
memoizedIsInitialized = 0;
return false;
}
if (!getServer().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
if (!getServerLoad().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, server_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(2, serverLoad_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, server_);
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, serverLoad_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo) obj;
boolean result = true;
result = result && (hasServer() == other.hasServer());
if (hasServer()) {
result = result && getServer()
.equals(other.getServer());
}
result = result && (hasServerLoad() == other.hasServerLoad());
if (hasServerLoad()) {
result = result && getServerLoad()
.equals(other.getServerLoad());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasServer()) {
hash = (37 * hash) + SERVER_FIELD_NUMBER;
hash = (53 * hash) + getServer().hashCode();
}
if (hasServerLoad()) {
hash = (37 * hash) + SERVER_LOAD_FIELD_NUMBER;
hash = (53 * hash) + getServerLoad().hashCode();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code LiveServerInfo}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_LiveServerInfo_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_LiveServerInfo_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getServerFieldBuilder();
getServerLoadFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (serverBuilder_ == null) {
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
} else {
serverBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (serverLoadBuilder_ == null) {
serverLoad_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
} else {
serverLoadBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_LiveServerInfo_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo build() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (serverBuilder_ == null) {
result.server_ = server_;
} else {
result.server_ = serverBuilder_.build();
}
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
to_bitField0_ |= 0x00000002;
}
if (serverLoadBuilder_ == null) {
result.serverLoad_ = serverLoad_;
} else {
result.serverLoad_ = serverLoadBuilder_.build();
}
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.getDefaultInstance()) return this;
if (other.hasServer()) {
mergeServer(other.getServer());
}
if (other.hasServerLoad()) {
mergeServerLoad(other.getServerLoad());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (!hasServer()) {
return false;
}
if (!hasServerLoad()) {
return false;
}
if (!getServer().isInitialized()) {
return false;
}
if (!getServerLoad().isInitialized()) {
return false;
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// required .ServerName server = 1;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> serverBuilder_;
/**
* <code>required .ServerName server = 1;</code>
*/
public boolean hasServer() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>required .ServerName server = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getServer() {
if (serverBuilder_ == null) {
return server_;
} else {
return serverBuilder_.getMessage();
}
}
/**
* <code>required .ServerName server = 1;</code>
*/
public Builder setServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (serverBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
server_ = value;
onChanged();
} else {
serverBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .ServerName server = 1;</code>
*/
public Builder setServer(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (serverBuilder_ == null) {
server_ = builderForValue.build();
onChanged();
} else {
serverBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .ServerName server = 1;</code>
*/
public Builder mergeServer(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (serverBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
server_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
server_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(server_).mergeFrom(value).buildPartial();
} else {
server_ = value;
}
onChanged();
} else {
serverBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>required .ServerName server = 1;</code>
*/
public Builder clearServer() {
if (serverBuilder_ == null) {
server_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
onChanged();
} else {
serverBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* <code>required .ServerName server = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getServerBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getServerFieldBuilder().getBuilder();
}
/**
* <code>required .ServerName server = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getServerOrBuilder() {
if (serverBuilder_ != null) {
return serverBuilder_.getMessageOrBuilder();
} else {
return server_;
}
}
/**
* <code>required .ServerName server = 1;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getServerFieldBuilder() {
if (serverBuilder_ == null) {
serverBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
server_,
getParentForChildren(),
isClean());
server_ = null;
}
return serverBuilder_;
}
// required .ServerLoad server_load = 2;
private org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad serverLoad_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder> serverLoadBuilder_;
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
public boolean hasServerLoad() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad getServerLoad() {
if (serverLoadBuilder_ == null) {
return serverLoad_;
} else {
return serverLoadBuilder_.getMessage();
}
}
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
public Builder setServerLoad(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
if (serverLoadBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
serverLoad_ = value;
onChanged();
} else {
serverLoadBuilder_.setMessage(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
public Builder setServerLoad(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder builderForValue) {
if (serverLoadBuilder_ == null) {
serverLoad_ = builderForValue.build();
onChanged();
} else {
serverLoadBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
public Builder mergeServerLoad(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad value) {
if (serverLoadBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002) &&
serverLoad_ != org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance()) {
serverLoad_ =
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.newBuilder(serverLoad_).mergeFrom(value).buildPartial();
} else {
serverLoad_ = value;
}
onChanged();
} else {
serverLoadBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000002;
return this;
}
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
public Builder clearServerLoad() {
if (serverLoadBuilder_ == null) {
serverLoad_ = org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.getDefaultInstance();
onChanged();
} else {
serverLoadBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000002);
return this;
}
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder getServerLoadBuilder() {
bitField0_ |= 0x00000002;
onChanged();
return getServerLoadFieldBuilder().getBuilder();
}
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder getServerLoadOrBuilder() {
if (serverLoadBuilder_ != null) {
return serverLoadBuilder_.getMessageOrBuilder();
} else {
return serverLoad_;
}
}
/**
* <code>required .ServerLoad server_load = 2;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder>
getServerLoadFieldBuilder() {
if (serverLoadBuilder_ == null) {
serverLoadBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoad.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ServerLoadOrBuilder>(
serverLoad_,
getParentForChildren(),
isClean());
serverLoad_ = null;
}
return serverLoadBuilder_;
}
// @@protoc_insertion_point(builder_scope:LiveServerInfo)
}
static {
defaultInstance = new LiveServerInfo(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:LiveServerInfo)
}
public interface ClusterStatusOrBuilder
extends com.google.protobuf.MessageOrBuilder {
// optional .HBaseVersionFileContent hbase_version = 1;
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
boolean hasHbaseVersion();
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent getHbaseVersion();
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContentOrBuilder getHbaseVersionOrBuilder();
// repeated .LiveServerInfo live_servers = 2;
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo>
getLiveServersList();
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo getLiveServers(int index);
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
int getLiveServersCount();
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder>
getLiveServersOrBuilderList();
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder getLiveServersOrBuilder(
int index);
// repeated .ServerName dead_servers = 3;
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>
getDeadServersList();
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getDeadServers(int index);
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
int getDeadServersCount();
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getDeadServersOrBuilderList();
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDeadServersOrBuilder(
int index);
// repeated .RegionInTransition regions_in_transition = 4;
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition>
getRegionsInTransitionList();
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition getRegionsInTransition(int index);
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
int getRegionsInTransitionCount();
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder>
getRegionsInTransitionOrBuilderList();
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder getRegionsInTransitionOrBuilder(
int index);
// optional .ClusterId cluster_id = 5;
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
boolean hasClusterId();
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId getClusterId();
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterIdOrBuilder getClusterIdOrBuilder();
// repeated .Coprocessor master_coprocessors = 6;
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor>
getMasterCoprocessorsList();
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getMasterCoprocessors(int index);
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
int getMasterCoprocessorsCount();
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>
getMasterCoprocessorsOrBuilderList();
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getMasterCoprocessorsOrBuilder(
int index);
// optional .ServerName master = 7;
/**
* <code>optional .ServerName master = 7;</code>
*/
boolean hasMaster();
/**
* <code>optional .ServerName master = 7;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster();
/**
* <code>optional .ServerName master = 7;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder();
// repeated .ServerName backup_masters = 8;
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>
getBackupMastersList();
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getBackupMasters(int index);
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
int getBackupMastersCount();
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getBackupMastersOrBuilderList();
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getBackupMastersOrBuilder(
int index);
// optional bool balancer_on = 9;
/**
* <code>optional bool balancer_on = 9;</code>
*/
boolean hasBalancerOn();
/**
* <code>optional bool balancer_on = 9;</code>
*/
boolean getBalancerOn();
}
/**
* Protobuf type {@code ClusterStatus}
*/
public static final class ClusterStatus extends
com.google.protobuf.GeneratedMessage
implements ClusterStatusOrBuilder {
// Use ClusterStatus.newBuilder() to construct.
private ClusterStatus(com.google.protobuf.GeneratedMessage.Builder<?> builder) {
super(builder);
this.unknownFields = builder.getUnknownFields();
}
private ClusterStatus(boolean noInit) { this.unknownFields = com.google.protobuf.UnknownFieldSet.getDefaultInstance(); }
private static final ClusterStatus defaultInstance;
public static ClusterStatus getDefaultInstance() {
return defaultInstance;
}
public ClusterStatus getDefaultInstanceForType() {
return defaultInstance;
}
private final com.google.protobuf.UnknownFieldSet unknownFields;
@java.lang.Override
public final com.google.protobuf.UnknownFieldSet
getUnknownFields() {
return this.unknownFields;
}
private ClusterStatus(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
initFields();
int mutable_bitField0_ = 0;
com.google.protobuf.UnknownFieldSet.Builder unknownFields =
com.google.protobuf.UnknownFieldSet.newBuilder();
try {
boolean done = false;
while (!done) {
int tag = input.readTag();
switch (tag) {
case 0:
done = true;
break;
default: {
if (!parseUnknownField(input, unknownFields,
extensionRegistry, tag)) {
done = true;
}
break;
}
case 10: {
org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder subBuilder = null;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
subBuilder = hbaseVersion_.toBuilder();
}
hbaseVersion_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(hbaseVersion_);
hbaseVersion_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000001;
break;
}
case 18: {
if (!((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
liveServers_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo>();
mutable_bitField0_ |= 0x00000002;
}
liveServers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.PARSER, extensionRegistry));
break;
}
case 26: {
if (!((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
deadServers_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>();
mutable_bitField0_ |= 0x00000004;
}
deadServers_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry));
break;
}
case 34: {
if (!((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
regionsInTransition_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition>();
mutable_bitField0_ |= 0x00000008;
}
regionsInTransition_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.PARSER, extensionRegistry));
break;
}
case 42: {
org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder subBuilder = null;
if (((bitField0_ & 0x00000002) == 0x00000002)) {
subBuilder = clusterId_.toBuilder();
}
clusterId_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(clusterId_);
clusterId_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000002;
break;
}
case 50: {
if (!((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
masterCoprocessors_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor>();
mutable_bitField0_ |= 0x00000020;
}
masterCoprocessors_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.PARSER, extensionRegistry));
break;
}
case 58: {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder subBuilder = null;
if (((bitField0_ & 0x00000004) == 0x00000004)) {
subBuilder = master_.toBuilder();
}
master_ = input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry);
if (subBuilder != null) {
subBuilder.mergeFrom(master_);
master_ = subBuilder.buildPartial();
}
bitField0_ |= 0x00000004;
break;
}
case 66: {
if (!((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
backupMasters_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>();
mutable_bitField0_ |= 0x00000080;
}
backupMasters_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.PARSER, extensionRegistry));
break;
}
case 72: {
bitField0_ |= 0x00000008;
balancerOn_ = input.readBool();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
throw e.setUnfinishedMessage(this);
} catch (java.io.IOException e) {
throw new com.google.protobuf.InvalidProtocolBufferException(
e.getMessage()).setUnfinishedMessage(this);
} finally {
if (((mutable_bitField0_ & 0x00000002) == 0x00000002)) {
liveServers_ = java.util.Collections.unmodifiableList(liveServers_);
}
if (((mutable_bitField0_ & 0x00000004) == 0x00000004)) {
deadServers_ = java.util.Collections.unmodifiableList(deadServers_);
}
if (((mutable_bitField0_ & 0x00000008) == 0x00000008)) {
regionsInTransition_ = java.util.Collections.unmodifiableList(regionsInTransition_);
}
if (((mutable_bitField0_ & 0x00000020) == 0x00000020)) {
masterCoprocessors_ = java.util.Collections.unmodifiableList(masterCoprocessors_);
}
if (((mutable_bitField0_ & 0x00000080) == 0x00000080)) {
backupMasters_ = java.util.Collections.unmodifiableList(backupMasters_);
}
this.unknownFields = unknownFields.build();
makeExtensionsImmutable();
}
}
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ClusterStatus_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ClusterStatus_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder.class);
}
public static com.google.protobuf.Parser<ClusterStatus> PARSER =
new com.google.protobuf.AbstractParser<ClusterStatus>() {
public ClusterStatus parsePartialFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return new ClusterStatus(input, extensionRegistry);
}
};
@java.lang.Override
public com.google.protobuf.Parser<ClusterStatus> getParserForType() {
return PARSER;
}
private int bitField0_;
// optional .HBaseVersionFileContent hbase_version = 1;
public static final int HBASE_VERSION_FIELD_NUMBER = 1;
private org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent hbaseVersion_;
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
public boolean hasHbaseVersion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent getHbaseVersion() {
return hbaseVersion_;
}
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContentOrBuilder getHbaseVersionOrBuilder() {
return hbaseVersion_;
}
// repeated .LiveServerInfo live_servers = 2;
public static final int LIVE_SERVERS_FIELD_NUMBER = 2;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo> liveServers_;
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo> getLiveServersList() {
return liveServers_;
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder>
getLiveServersOrBuilderList() {
return liveServers_;
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public int getLiveServersCount() {
return liveServers_.size();
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo getLiveServers(int index) {
return liveServers_.get(index);
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder getLiveServersOrBuilder(
int index) {
return liveServers_.get(index);
}
// repeated .ServerName dead_servers = 3;
public static final int DEAD_SERVERS_FIELD_NUMBER = 3;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> deadServers_;
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getDeadServersList() {
return deadServers_;
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getDeadServersOrBuilderList() {
return deadServers_;
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public int getDeadServersCount() {
return deadServers_.size();
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getDeadServers(int index) {
return deadServers_.get(index);
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDeadServersOrBuilder(
int index) {
return deadServers_.get(index);
}
// repeated .RegionInTransition regions_in_transition = 4;
public static final int REGIONS_IN_TRANSITION_FIELD_NUMBER = 4;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition> regionsInTransition_;
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition> getRegionsInTransitionList() {
return regionsInTransition_;
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder>
getRegionsInTransitionOrBuilderList() {
return regionsInTransition_;
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public int getRegionsInTransitionCount() {
return regionsInTransition_.size();
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition getRegionsInTransition(int index) {
return regionsInTransition_.get(index);
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder getRegionsInTransitionOrBuilder(
int index) {
return regionsInTransition_.get(index);
}
// optional .ClusterId cluster_id = 5;
public static final int CLUSTER_ID_FIELD_NUMBER = 5;
private org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId clusterId_;
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
public boolean hasClusterId() {
return ((bitField0_ & 0x00000002) == 0x00000002);
}
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId getClusterId() {
return clusterId_;
}
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterIdOrBuilder getClusterIdOrBuilder() {
return clusterId_;
}
// repeated .Coprocessor master_coprocessors = 6;
public static final int MASTER_COPROCESSORS_FIELD_NUMBER = 6;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> masterCoprocessors_;
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> getMasterCoprocessorsList() {
return masterCoprocessors_;
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>
getMasterCoprocessorsOrBuilderList() {
return masterCoprocessors_;
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public int getMasterCoprocessorsCount() {
return masterCoprocessors_.size();
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getMasterCoprocessors(int index) {
return masterCoprocessors_.get(index);
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getMasterCoprocessorsOrBuilder(
int index) {
return masterCoprocessors_.get(index);
}
// optional .ServerName master = 7;
public static final int MASTER_FIELD_NUMBER = 7;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_;
/**
* <code>optional .ServerName master = 7;</code>
*/
public boolean hasMaster() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional .ServerName master = 7;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() {
return master_;
}
/**
* <code>optional .ServerName master = 7;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() {
return master_;
}
// repeated .ServerName backup_masters = 8;
public static final int BACKUP_MASTERS_FIELD_NUMBER = 8;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> backupMasters_;
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getBackupMastersList() {
return backupMasters_;
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getBackupMastersOrBuilderList() {
return backupMasters_;
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public int getBackupMastersCount() {
return backupMasters_.size();
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getBackupMasters(int index) {
return backupMasters_.get(index);
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getBackupMastersOrBuilder(
int index) {
return backupMasters_.get(index);
}
// optional bool balancer_on = 9;
public static final int BALANCER_ON_FIELD_NUMBER = 9;
private boolean balancerOn_;
/**
* <code>optional bool balancer_on = 9;</code>
*/
public boolean hasBalancerOn() {
return ((bitField0_ & 0x00000008) == 0x00000008);
}
/**
* <code>optional bool balancer_on = 9;</code>
*/
public boolean getBalancerOn() {
return balancerOn_;
}
private void initFields() {
hbaseVersion_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.getDefaultInstance();
liveServers_ = java.util.Collections.emptyList();
deadServers_ = java.util.Collections.emptyList();
regionsInTransition_ = java.util.Collections.emptyList();
clusterId_ = org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.getDefaultInstance();
masterCoprocessors_ = java.util.Collections.emptyList();
master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
backupMasters_ = java.util.Collections.emptyList();
balancerOn_ = false;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
byte isInitialized = memoizedIsInitialized;
if (isInitialized != -1) return isInitialized == 1;
if (hasHbaseVersion()) {
if (!getHbaseVersion().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getLiveServersCount(); i++) {
if (!getLiveServers(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getDeadServersCount(); i++) {
if (!getDeadServers(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getRegionsInTransitionCount(); i++) {
if (!getRegionsInTransition(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasClusterId()) {
if (!getClusterId().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getMasterCoprocessorsCount(); i++) {
if (!getMasterCoprocessors(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
if (hasMaster()) {
if (!getMaster().isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
for (int i = 0; i < getBackupMastersCount(); i++) {
if (!getBackupMasters(i).isInitialized()) {
memoizedIsInitialized = 0;
return false;
}
}
memoizedIsInitialized = 1;
return true;
}
public void writeTo(com.google.protobuf.CodedOutputStream output)
throws java.io.IOException {
getSerializedSize();
if (((bitField0_ & 0x00000001) == 0x00000001)) {
output.writeMessage(1, hbaseVersion_);
}
for (int i = 0; i < liveServers_.size(); i++) {
output.writeMessage(2, liveServers_.get(i));
}
for (int i = 0; i < deadServers_.size(); i++) {
output.writeMessage(3, deadServers_.get(i));
}
for (int i = 0; i < regionsInTransition_.size(); i++) {
output.writeMessage(4, regionsInTransition_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeMessage(5, clusterId_);
}
for (int i = 0; i < masterCoprocessors_.size(); i++) {
output.writeMessage(6, masterCoprocessors_.get(i));
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeMessage(7, master_);
}
for (int i = 0; i < backupMasters_.size(); i++) {
output.writeMessage(8, backupMasters_.get(i));
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
output.writeBool(9, balancerOn_);
}
getUnknownFields().writeTo(output);
}
private int memoizedSerializedSize = -1;
public int getSerializedSize() {
int size = memoizedSerializedSize;
if (size != -1) return size;
size = 0;
if (((bitField0_ & 0x00000001) == 0x00000001)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(1, hbaseVersion_);
}
for (int i = 0; i < liveServers_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(2, liveServers_.get(i));
}
for (int i = 0; i < deadServers_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(3, deadServers_.get(i));
}
for (int i = 0; i < regionsInTransition_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(4, regionsInTransition_.get(i));
}
if (((bitField0_ & 0x00000002) == 0x00000002)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(5, clusterId_);
}
for (int i = 0; i < masterCoprocessors_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(6, masterCoprocessors_.get(i));
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(7, master_);
}
for (int i = 0; i < backupMasters_.size(); i++) {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(8, backupMasters_.get(i));
}
if (((bitField0_ & 0x00000008) == 0x00000008)) {
size += com.google.protobuf.CodedOutputStream
.computeBoolSize(9, balancerOn_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
}
private static final long serialVersionUID = 0L;
@java.lang.Override
protected java.lang.Object writeReplace()
throws java.io.ObjectStreamException {
return super.writeReplace();
}
@java.lang.Override
public boolean equals(final java.lang.Object obj) {
if (obj == this) {
return true;
}
if (!(obj instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus)) {
return super.equals(obj);
}
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus other = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus) obj;
boolean result = true;
result = result && (hasHbaseVersion() == other.hasHbaseVersion());
if (hasHbaseVersion()) {
result = result && getHbaseVersion()
.equals(other.getHbaseVersion());
}
result = result && getLiveServersList()
.equals(other.getLiveServersList());
result = result && getDeadServersList()
.equals(other.getDeadServersList());
result = result && getRegionsInTransitionList()
.equals(other.getRegionsInTransitionList());
result = result && (hasClusterId() == other.hasClusterId());
if (hasClusterId()) {
result = result && getClusterId()
.equals(other.getClusterId());
}
result = result && getMasterCoprocessorsList()
.equals(other.getMasterCoprocessorsList());
result = result && (hasMaster() == other.hasMaster());
if (hasMaster()) {
result = result && getMaster()
.equals(other.getMaster());
}
result = result && getBackupMastersList()
.equals(other.getBackupMastersList());
result = result && (hasBalancerOn() == other.hasBalancerOn());
if (hasBalancerOn()) {
result = result && (getBalancerOn()
== other.getBalancerOn());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
}
private int memoizedHashCode = 0;
@java.lang.Override
public int hashCode() {
if (memoizedHashCode != 0) {
return memoizedHashCode;
}
int hash = 41;
hash = (19 * hash) + getDescriptorForType().hashCode();
if (hasHbaseVersion()) {
hash = (37 * hash) + HBASE_VERSION_FIELD_NUMBER;
hash = (53 * hash) + getHbaseVersion().hashCode();
}
if (getLiveServersCount() > 0) {
hash = (37 * hash) + LIVE_SERVERS_FIELD_NUMBER;
hash = (53 * hash) + getLiveServersList().hashCode();
}
if (getDeadServersCount() > 0) {
hash = (37 * hash) + DEAD_SERVERS_FIELD_NUMBER;
hash = (53 * hash) + getDeadServersList().hashCode();
}
if (getRegionsInTransitionCount() > 0) {
hash = (37 * hash) + REGIONS_IN_TRANSITION_FIELD_NUMBER;
hash = (53 * hash) + getRegionsInTransitionList().hashCode();
}
if (hasClusterId()) {
hash = (37 * hash) + CLUSTER_ID_FIELD_NUMBER;
hash = (53 * hash) + getClusterId().hashCode();
}
if (getMasterCoprocessorsCount() > 0) {
hash = (37 * hash) + MASTER_COPROCESSORS_FIELD_NUMBER;
hash = (53 * hash) + getMasterCoprocessorsList().hashCode();
}
if (hasMaster()) {
hash = (37 * hash) + MASTER_FIELD_NUMBER;
hash = (53 * hash) + getMaster().hashCode();
}
if (getBackupMastersCount() > 0) {
hash = (37 * hash) + BACKUP_MASTERS_FIELD_NUMBER;
hash = (53 * hash) + getBackupMastersList().hashCode();
}
if (hasBalancerOn()) {
hash = (37 * hash) + BALANCER_ON_FIELD_NUMBER;
hash = (53 * hash) + hashBoolean(getBalancerOn());
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(
com.google.protobuf.ByteString data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(
com.google.protobuf.ByteString data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(byte[] data)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(
byte[] data,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws com.google.protobuf.InvalidProtocolBufferException {
return PARSER.parseFrom(data, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseDelimitedFrom(java.io.InputStream input)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseDelimitedFrom(
java.io.InputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseDelimitedFrom(input, extensionRegistry);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(
com.google.protobuf.CodedInputStream input)
throws java.io.IOException {
return PARSER.parseFrom(input);
}
public static org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parseFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
return PARSER.parseFrom(input, extensionRegistry);
}
public static Builder newBuilder() { return Builder.create(); }
public Builder newBuilderForType() { return newBuilder(); }
public static Builder newBuilder(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus prototype) {
return newBuilder().mergeFrom(prototype);
}
public Builder toBuilder() { return newBuilder(this); }
@java.lang.Override
protected Builder newBuilderForType(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
Builder builder = new Builder(parent);
return builder;
}
/**
* Protobuf type {@code ClusterStatus}
*/
public static final class Builder extends
com.google.protobuf.GeneratedMessage.Builder<Builder>
implements org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatusOrBuilder {
public static final com.google.protobuf.Descriptors.Descriptor
getDescriptor() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ClusterStatus_descriptor;
}
protected com.google.protobuf.GeneratedMessage.FieldAccessorTable
internalGetFieldAccessorTable() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ClusterStatus_fieldAccessorTable
.ensureFieldAccessorsInitialized(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.class, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.Builder.class);
}
// Construct using org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.newBuilder()
private Builder() {
maybeForceBuilderInitialization();
}
private Builder(
com.google.protobuf.GeneratedMessage.BuilderParent parent) {
super(parent);
maybeForceBuilderInitialization();
}
private void maybeForceBuilderInitialization() {
if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) {
getHbaseVersionFieldBuilder();
getLiveServersFieldBuilder();
getDeadServersFieldBuilder();
getRegionsInTransitionFieldBuilder();
getClusterIdFieldBuilder();
getMasterCoprocessorsFieldBuilder();
getMasterFieldBuilder();
getBackupMastersFieldBuilder();
}
}
private static Builder create() {
return new Builder();
}
public Builder clear() {
super.clear();
if (hbaseVersionBuilder_ == null) {
hbaseVersion_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.getDefaultInstance();
} else {
hbaseVersionBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
if (liveServersBuilder_ == null) {
liveServers_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
} else {
liveServersBuilder_.clear();
}
if (deadServersBuilder_ == null) {
deadServers_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
} else {
deadServersBuilder_.clear();
}
if (regionsInTransitionBuilder_ == null) {
regionsInTransition_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
} else {
regionsInTransitionBuilder_.clear();
}
if (clusterIdBuilder_ == null) {
clusterId_ = org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.getDefaultInstance();
} else {
clusterIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
if (masterCoprocessorsBuilder_ == null) {
masterCoprocessors_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
} else {
masterCoprocessorsBuilder_.clear();
}
if (masterBuilder_ == null) {
master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
} else {
masterBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
if (backupMastersBuilder_ == null) {
backupMasters_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000080);
} else {
backupMastersBuilder_.clear();
}
balancerOn_ = false;
bitField0_ = (bitField0_ & ~0x00000100);
return this;
}
public Builder clone() {
return create().mergeFrom(buildPartial());
}
public com.google.protobuf.Descriptors.Descriptor
getDescriptorForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.internal_static_ClusterStatus_descriptor;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus getDefaultInstanceForType() {
return org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance();
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus build() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus result = buildPartial();
if (!result.isInitialized()) {
throw newUninitializedMessageException(result);
}
return result;
}
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus buildPartial() {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus result = new org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus(this);
int from_bitField0_ = bitField0_;
int to_bitField0_ = 0;
if (((from_bitField0_ & 0x00000001) == 0x00000001)) {
to_bitField0_ |= 0x00000001;
}
if (hbaseVersionBuilder_ == null) {
result.hbaseVersion_ = hbaseVersion_;
} else {
result.hbaseVersion_ = hbaseVersionBuilder_.build();
}
if (liveServersBuilder_ == null) {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
liveServers_ = java.util.Collections.unmodifiableList(liveServers_);
bitField0_ = (bitField0_ & ~0x00000002);
}
result.liveServers_ = liveServers_;
} else {
result.liveServers_ = liveServersBuilder_.build();
}
if (deadServersBuilder_ == null) {
if (((bitField0_ & 0x00000004) == 0x00000004)) {
deadServers_ = java.util.Collections.unmodifiableList(deadServers_);
bitField0_ = (bitField0_ & ~0x00000004);
}
result.deadServers_ = deadServers_;
} else {
result.deadServers_ = deadServersBuilder_.build();
}
if (regionsInTransitionBuilder_ == null) {
if (((bitField0_ & 0x00000008) == 0x00000008)) {
regionsInTransition_ = java.util.Collections.unmodifiableList(regionsInTransition_);
bitField0_ = (bitField0_ & ~0x00000008);
}
result.regionsInTransition_ = regionsInTransition_;
} else {
result.regionsInTransition_ = regionsInTransitionBuilder_.build();
}
if (((from_bitField0_ & 0x00000010) == 0x00000010)) {
to_bitField0_ |= 0x00000002;
}
if (clusterIdBuilder_ == null) {
result.clusterId_ = clusterId_;
} else {
result.clusterId_ = clusterIdBuilder_.build();
}
if (masterCoprocessorsBuilder_ == null) {
if (((bitField0_ & 0x00000020) == 0x00000020)) {
masterCoprocessors_ = java.util.Collections.unmodifiableList(masterCoprocessors_);
bitField0_ = (bitField0_ & ~0x00000020);
}
result.masterCoprocessors_ = masterCoprocessors_;
} else {
result.masterCoprocessors_ = masterCoprocessorsBuilder_.build();
}
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
to_bitField0_ |= 0x00000004;
}
if (masterBuilder_ == null) {
result.master_ = master_;
} else {
result.master_ = masterBuilder_.build();
}
if (backupMastersBuilder_ == null) {
if (((bitField0_ & 0x00000080) == 0x00000080)) {
backupMasters_ = java.util.Collections.unmodifiableList(backupMasters_);
bitField0_ = (bitField0_ & ~0x00000080);
}
result.backupMasters_ = backupMasters_;
} else {
result.backupMasters_ = backupMastersBuilder_.build();
}
if (((from_bitField0_ & 0x00000100) == 0x00000100)) {
to_bitField0_ |= 0x00000008;
}
result.balancerOn_ = balancerOn_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
}
public Builder mergeFrom(com.google.protobuf.Message other) {
if (other instanceof org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus) {
return mergeFrom((org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus)other);
} else {
super.mergeFrom(other);
return this;
}
}
public Builder mergeFrom(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus other) {
if (other == org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus.getDefaultInstance()) return this;
if (other.hasHbaseVersion()) {
mergeHbaseVersion(other.getHbaseVersion());
}
if (liveServersBuilder_ == null) {
if (!other.liveServers_.isEmpty()) {
if (liveServers_.isEmpty()) {
liveServers_ = other.liveServers_;
bitField0_ = (bitField0_ & ~0x00000002);
} else {
ensureLiveServersIsMutable();
liveServers_.addAll(other.liveServers_);
}
onChanged();
}
} else {
if (!other.liveServers_.isEmpty()) {
if (liveServersBuilder_.isEmpty()) {
liveServersBuilder_.dispose();
liveServersBuilder_ = null;
liveServers_ = other.liveServers_;
bitField0_ = (bitField0_ & ~0x00000002);
liveServersBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getLiveServersFieldBuilder() : null;
} else {
liveServersBuilder_.addAllMessages(other.liveServers_);
}
}
}
if (deadServersBuilder_ == null) {
if (!other.deadServers_.isEmpty()) {
if (deadServers_.isEmpty()) {
deadServers_ = other.deadServers_;
bitField0_ = (bitField0_ & ~0x00000004);
} else {
ensureDeadServersIsMutable();
deadServers_.addAll(other.deadServers_);
}
onChanged();
}
} else {
if (!other.deadServers_.isEmpty()) {
if (deadServersBuilder_.isEmpty()) {
deadServersBuilder_.dispose();
deadServersBuilder_ = null;
deadServers_ = other.deadServers_;
bitField0_ = (bitField0_ & ~0x00000004);
deadServersBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getDeadServersFieldBuilder() : null;
} else {
deadServersBuilder_.addAllMessages(other.deadServers_);
}
}
}
if (regionsInTransitionBuilder_ == null) {
if (!other.regionsInTransition_.isEmpty()) {
if (regionsInTransition_.isEmpty()) {
regionsInTransition_ = other.regionsInTransition_;
bitField0_ = (bitField0_ & ~0x00000008);
} else {
ensureRegionsInTransitionIsMutable();
regionsInTransition_.addAll(other.regionsInTransition_);
}
onChanged();
}
} else {
if (!other.regionsInTransition_.isEmpty()) {
if (regionsInTransitionBuilder_.isEmpty()) {
regionsInTransitionBuilder_.dispose();
regionsInTransitionBuilder_ = null;
regionsInTransition_ = other.regionsInTransition_;
bitField0_ = (bitField0_ & ~0x00000008);
regionsInTransitionBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getRegionsInTransitionFieldBuilder() : null;
} else {
regionsInTransitionBuilder_.addAllMessages(other.regionsInTransition_);
}
}
}
if (other.hasClusterId()) {
mergeClusterId(other.getClusterId());
}
if (masterCoprocessorsBuilder_ == null) {
if (!other.masterCoprocessors_.isEmpty()) {
if (masterCoprocessors_.isEmpty()) {
masterCoprocessors_ = other.masterCoprocessors_;
bitField0_ = (bitField0_ & ~0x00000020);
} else {
ensureMasterCoprocessorsIsMutable();
masterCoprocessors_.addAll(other.masterCoprocessors_);
}
onChanged();
}
} else {
if (!other.masterCoprocessors_.isEmpty()) {
if (masterCoprocessorsBuilder_.isEmpty()) {
masterCoprocessorsBuilder_.dispose();
masterCoprocessorsBuilder_ = null;
masterCoprocessors_ = other.masterCoprocessors_;
bitField0_ = (bitField0_ & ~0x00000020);
masterCoprocessorsBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getMasterCoprocessorsFieldBuilder() : null;
} else {
masterCoprocessorsBuilder_.addAllMessages(other.masterCoprocessors_);
}
}
}
if (other.hasMaster()) {
mergeMaster(other.getMaster());
}
if (backupMastersBuilder_ == null) {
if (!other.backupMasters_.isEmpty()) {
if (backupMasters_.isEmpty()) {
backupMasters_ = other.backupMasters_;
bitField0_ = (bitField0_ & ~0x00000080);
} else {
ensureBackupMastersIsMutable();
backupMasters_.addAll(other.backupMasters_);
}
onChanged();
}
} else {
if (!other.backupMasters_.isEmpty()) {
if (backupMastersBuilder_.isEmpty()) {
backupMastersBuilder_.dispose();
backupMastersBuilder_ = null;
backupMasters_ = other.backupMasters_;
bitField0_ = (bitField0_ & ~0x00000080);
backupMastersBuilder_ =
com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ?
getBackupMastersFieldBuilder() : null;
} else {
backupMastersBuilder_.addAllMessages(other.backupMasters_);
}
}
}
if (other.hasBalancerOn()) {
setBalancerOn(other.getBalancerOn());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
public final boolean isInitialized() {
if (hasHbaseVersion()) {
if (!getHbaseVersion().isInitialized()) {
return false;
}
}
for (int i = 0; i < getLiveServersCount(); i++) {
if (!getLiveServers(i).isInitialized()) {
return false;
}
}
for (int i = 0; i < getDeadServersCount(); i++) {
if (!getDeadServers(i).isInitialized()) {
return false;
}
}
for (int i = 0; i < getRegionsInTransitionCount(); i++) {
if (!getRegionsInTransition(i).isInitialized()) {
return false;
}
}
if (hasClusterId()) {
if (!getClusterId().isInitialized()) {
return false;
}
}
for (int i = 0; i < getMasterCoprocessorsCount(); i++) {
if (!getMasterCoprocessors(i).isInitialized()) {
return false;
}
}
if (hasMaster()) {
if (!getMaster().isInitialized()) {
return false;
}
}
for (int i = 0; i < getBackupMastersCount(); i++) {
if (!getBackupMasters(i).isInitialized()) {
return false;
}
}
return true;
}
public Builder mergeFrom(
com.google.protobuf.CodedInputStream input,
com.google.protobuf.ExtensionRegistryLite extensionRegistry)
throws java.io.IOException {
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus parsedMessage = null;
try {
parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry);
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
parsedMessage = (org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.ClusterStatus) e.getUnfinishedMessage();
throw e;
} finally {
if (parsedMessage != null) {
mergeFrom(parsedMessage);
}
}
return this;
}
private int bitField0_;
// optional .HBaseVersionFileContent hbase_version = 1;
private org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent hbaseVersion_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent, org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder, org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContentOrBuilder> hbaseVersionBuilder_;
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
public boolean hasHbaseVersion() {
return ((bitField0_ & 0x00000001) == 0x00000001);
}
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent getHbaseVersion() {
if (hbaseVersionBuilder_ == null) {
return hbaseVersion_;
} else {
return hbaseVersionBuilder_.getMessage();
}
}
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
public Builder setHbaseVersion(org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent value) {
if (hbaseVersionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
hbaseVersion_ = value;
onChanged();
} else {
hbaseVersionBuilder_.setMessage(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
public Builder setHbaseVersion(
org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder builderForValue) {
if (hbaseVersionBuilder_ == null) {
hbaseVersion_ = builderForValue.build();
onChanged();
} else {
hbaseVersionBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
public Builder mergeHbaseVersion(org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent value) {
if (hbaseVersionBuilder_ == null) {
if (((bitField0_ & 0x00000001) == 0x00000001) &&
hbaseVersion_ != org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.getDefaultInstance()) {
hbaseVersion_ =
org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.newBuilder(hbaseVersion_).mergeFrom(value).buildPartial();
} else {
hbaseVersion_ = value;
}
onChanged();
} else {
hbaseVersionBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000001;
return this;
}
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
public Builder clearHbaseVersion() {
if (hbaseVersionBuilder_ == null) {
hbaseVersion_ = org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.getDefaultInstance();
onChanged();
} else {
hbaseVersionBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000001);
return this;
}
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder getHbaseVersionBuilder() {
bitField0_ |= 0x00000001;
onChanged();
return getHbaseVersionFieldBuilder().getBuilder();
}
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContentOrBuilder getHbaseVersionOrBuilder() {
if (hbaseVersionBuilder_ != null) {
return hbaseVersionBuilder_.getMessageOrBuilder();
} else {
return hbaseVersion_;
}
}
/**
* <code>optional .HBaseVersionFileContent hbase_version = 1;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent, org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder, org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContentOrBuilder>
getHbaseVersionFieldBuilder() {
if (hbaseVersionBuilder_ == null) {
hbaseVersionBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent, org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContent.Builder, org.apache.hadoop.hbase.protobuf.generated.FSProtos.HBaseVersionFileContentOrBuilder>(
hbaseVersion_,
getParentForChildren(),
isClean());
hbaseVersion_ = null;
}
return hbaseVersionBuilder_;
}
// repeated .LiveServerInfo live_servers = 2;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo> liveServers_ =
java.util.Collections.emptyList();
private void ensureLiveServersIsMutable() {
if (!((bitField0_ & 0x00000002) == 0x00000002)) {
liveServers_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo>(liveServers_);
bitField0_ |= 0x00000002;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder> liveServersBuilder_;
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo> getLiveServersList() {
if (liveServersBuilder_ == null) {
return java.util.Collections.unmodifiableList(liveServers_);
} else {
return liveServersBuilder_.getMessageList();
}
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public int getLiveServersCount() {
if (liveServersBuilder_ == null) {
return liveServers_.size();
} else {
return liveServersBuilder_.getCount();
}
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo getLiveServers(int index) {
if (liveServersBuilder_ == null) {
return liveServers_.get(index);
} else {
return liveServersBuilder_.getMessage(index);
}
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public Builder setLiveServers(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo value) {
if (liveServersBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLiveServersIsMutable();
liveServers_.set(index, value);
onChanged();
} else {
liveServersBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public Builder setLiveServers(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder builderForValue) {
if (liveServersBuilder_ == null) {
ensureLiveServersIsMutable();
liveServers_.set(index, builderForValue.build());
onChanged();
} else {
liveServersBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public Builder addLiveServers(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo value) {
if (liveServersBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLiveServersIsMutable();
liveServers_.add(value);
onChanged();
} else {
liveServersBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public Builder addLiveServers(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo value) {
if (liveServersBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureLiveServersIsMutable();
liveServers_.add(index, value);
onChanged();
} else {
liveServersBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public Builder addLiveServers(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder builderForValue) {
if (liveServersBuilder_ == null) {
ensureLiveServersIsMutable();
liveServers_.add(builderForValue.build());
onChanged();
} else {
liveServersBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public Builder addLiveServers(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder builderForValue) {
if (liveServersBuilder_ == null) {
ensureLiveServersIsMutable();
liveServers_.add(index, builderForValue.build());
onChanged();
} else {
liveServersBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public Builder addAllLiveServers(
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo> values) {
if (liveServersBuilder_ == null) {
ensureLiveServersIsMutable();
super.addAll(values, liveServers_);
onChanged();
} else {
liveServersBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public Builder clearLiveServers() {
if (liveServersBuilder_ == null) {
liveServers_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000002);
onChanged();
} else {
liveServersBuilder_.clear();
}
return this;
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public Builder removeLiveServers(int index) {
if (liveServersBuilder_ == null) {
ensureLiveServersIsMutable();
liveServers_.remove(index);
onChanged();
} else {
liveServersBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder getLiveServersBuilder(
int index) {
return getLiveServersFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder getLiveServersOrBuilder(
int index) {
if (liveServersBuilder_ == null) {
return liveServers_.get(index); } else {
return liveServersBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder>
getLiveServersOrBuilderList() {
if (liveServersBuilder_ != null) {
return liveServersBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(liveServers_);
}
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder addLiveServersBuilder() {
return getLiveServersFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.getDefaultInstance());
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder addLiveServersBuilder(
int index) {
return getLiveServersFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.getDefaultInstance());
}
/**
* <code>repeated .LiveServerInfo live_servers = 2;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder>
getLiveServersBuilderList() {
return getLiveServersFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder>
getLiveServersFieldBuilder() {
if (liveServersBuilder_ == null) {
liveServersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfo.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.LiveServerInfoOrBuilder>(
liveServers_,
((bitField0_ & 0x00000002) == 0x00000002),
getParentForChildren(),
isClean());
liveServers_ = null;
}
return liveServersBuilder_;
}
// repeated .ServerName dead_servers = 3;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> deadServers_ =
java.util.Collections.emptyList();
private void ensureDeadServersIsMutable() {
if (!((bitField0_ & 0x00000004) == 0x00000004)) {
deadServers_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>(deadServers_);
bitField0_ |= 0x00000004;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> deadServersBuilder_;
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getDeadServersList() {
if (deadServersBuilder_ == null) {
return java.util.Collections.unmodifiableList(deadServers_);
} else {
return deadServersBuilder_.getMessageList();
}
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public int getDeadServersCount() {
if (deadServersBuilder_ == null) {
return deadServers_.size();
} else {
return deadServersBuilder_.getCount();
}
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getDeadServers(int index) {
if (deadServersBuilder_ == null) {
return deadServers_.get(index);
} else {
return deadServersBuilder_.getMessage(index);
}
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public Builder setDeadServers(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (deadServersBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDeadServersIsMutable();
deadServers_.set(index, value);
onChanged();
} else {
deadServersBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public Builder setDeadServers(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (deadServersBuilder_ == null) {
ensureDeadServersIsMutable();
deadServers_.set(index, builderForValue.build());
onChanged();
} else {
deadServersBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public Builder addDeadServers(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (deadServersBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDeadServersIsMutable();
deadServers_.add(value);
onChanged();
} else {
deadServersBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public Builder addDeadServers(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (deadServersBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureDeadServersIsMutable();
deadServers_.add(index, value);
onChanged();
} else {
deadServersBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public Builder addDeadServers(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (deadServersBuilder_ == null) {
ensureDeadServersIsMutable();
deadServers_.add(builderForValue.build());
onChanged();
} else {
deadServersBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public Builder addDeadServers(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (deadServersBuilder_ == null) {
ensureDeadServersIsMutable();
deadServers_.add(index, builderForValue.build());
onChanged();
} else {
deadServersBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public Builder addAllDeadServers(
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> values) {
if (deadServersBuilder_ == null) {
ensureDeadServersIsMutable();
super.addAll(values, deadServers_);
onChanged();
} else {
deadServersBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public Builder clearDeadServers() {
if (deadServersBuilder_ == null) {
deadServers_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000004);
onChanged();
} else {
deadServersBuilder_.clear();
}
return this;
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public Builder removeDeadServers(int index) {
if (deadServersBuilder_ == null) {
ensureDeadServersIsMutable();
deadServers_.remove(index);
onChanged();
} else {
deadServersBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getDeadServersBuilder(
int index) {
return getDeadServersFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getDeadServersOrBuilder(
int index) {
if (deadServersBuilder_ == null) {
return deadServers_.get(index); } else {
return deadServersBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getDeadServersOrBuilderList() {
if (deadServersBuilder_ != null) {
return deadServersBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(deadServers_);
}
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addDeadServersBuilder() {
return getDeadServersFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addDeadServersBuilder(
int index) {
return getDeadServersFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
}
/**
* <code>repeated .ServerName dead_servers = 3;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder>
getDeadServersBuilderList() {
return getDeadServersFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getDeadServersFieldBuilder() {
if (deadServersBuilder_ == null) {
deadServersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
deadServers_,
((bitField0_ & 0x00000004) == 0x00000004),
getParentForChildren(),
isClean());
deadServers_ = null;
}
return deadServersBuilder_;
}
// repeated .RegionInTransition regions_in_transition = 4;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition> regionsInTransition_ =
java.util.Collections.emptyList();
private void ensureRegionsInTransitionIsMutable() {
if (!((bitField0_ & 0x00000008) == 0x00000008)) {
regionsInTransition_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition>(regionsInTransition_);
bitField0_ |= 0x00000008;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder> regionsInTransitionBuilder_;
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition> getRegionsInTransitionList() {
if (regionsInTransitionBuilder_ == null) {
return java.util.Collections.unmodifiableList(regionsInTransition_);
} else {
return regionsInTransitionBuilder_.getMessageList();
}
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public int getRegionsInTransitionCount() {
if (regionsInTransitionBuilder_ == null) {
return regionsInTransition_.size();
} else {
return regionsInTransitionBuilder_.getCount();
}
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition getRegionsInTransition(int index) {
if (regionsInTransitionBuilder_ == null) {
return regionsInTransition_.get(index);
} else {
return regionsInTransitionBuilder_.getMessage(index);
}
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public Builder setRegionsInTransition(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition value) {
if (regionsInTransitionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureRegionsInTransitionIsMutable();
regionsInTransition_.set(index, value);
onChanged();
} else {
regionsInTransitionBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public Builder setRegionsInTransition(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder builderForValue) {
if (regionsInTransitionBuilder_ == null) {
ensureRegionsInTransitionIsMutable();
regionsInTransition_.set(index, builderForValue.build());
onChanged();
} else {
regionsInTransitionBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public Builder addRegionsInTransition(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition value) {
if (regionsInTransitionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureRegionsInTransitionIsMutable();
regionsInTransition_.add(value);
onChanged();
} else {
regionsInTransitionBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public Builder addRegionsInTransition(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition value) {
if (regionsInTransitionBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureRegionsInTransitionIsMutable();
regionsInTransition_.add(index, value);
onChanged();
} else {
regionsInTransitionBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public Builder addRegionsInTransition(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder builderForValue) {
if (regionsInTransitionBuilder_ == null) {
ensureRegionsInTransitionIsMutable();
regionsInTransition_.add(builderForValue.build());
onChanged();
} else {
regionsInTransitionBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public Builder addRegionsInTransition(
int index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder builderForValue) {
if (regionsInTransitionBuilder_ == null) {
ensureRegionsInTransitionIsMutable();
regionsInTransition_.add(index, builderForValue.build());
onChanged();
} else {
regionsInTransitionBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public Builder addAllRegionsInTransition(
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition> values) {
if (regionsInTransitionBuilder_ == null) {
ensureRegionsInTransitionIsMutable();
super.addAll(values, regionsInTransition_);
onChanged();
} else {
regionsInTransitionBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public Builder clearRegionsInTransition() {
if (regionsInTransitionBuilder_ == null) {
regionsInTransition_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000008);
onChanged();
} else {
regionsInTransitionBuilder_.clear();
}
return this;
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public Builder removeRegionsInTransition(int index) {
if (regionsInTransitionBuilder_ == null) {
ensureRegionsInTransitionIsMutable();
regionsInTransition_.remove(index);
onChanged();
} else {
regionsInTransitionBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder getRegionsInTransitionBuilder(
int index) {
return getRegionsInTransitionFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder getRegionsInTransitionOrBuilder(
int index) {
if (regionsInTransitionBuilder_ == null) {
return regionsInTransition_.get(index); } else {
return regionsInTransitionBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder>
getRegionsInTransitionOrBuilderList() {
if (regionsInTransitionBuilder_ != null) {
return regionsInTransitionBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(regionsInTransition_);
}
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder addRegionsInTransitionBuilder() {
return getRegionsInTransitionFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.getDefaultInstance());
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder addRegionsInTransitionBuilder(
int index) {
return getRegionsInTransitionFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.getDefaultInstance());
}
/**
* <code>repeated .RegionInTransition regions_in_transition = 4;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder>
getRegionsInTransitionBuilderList() {
return getRegionsInTransitionFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder>
getRegionsInTransitionFieldBuilder() {
if (regionsInTransitionBuilder_ == null) {
regionsInTransitionBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransition.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionInTransitionOrBuilder>(
regionsInTransition_,
((bitField0_ & 0x00000008) == 0x00000008),
getParentForChildren(),
isClean());
regionsInTransition_ = null;
}
return regionsInTransitionBuilder_;
}
// optional .ClusterId cluster_id = 5;
private org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId clusterId_ = org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId, org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterIdOrBuilder> clusterIdBuilder_;
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
public boolean hasClusterId() {
return ((bitField0_ & 0x00000010) == 0x00000010);
}
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId getClusterId() {
if (clusterIdBuilder_ == null) {
return clusterId_;
} else {
return clusterIdBuilder_.getMessage();
}
}
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
public Builder setClusterId(org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId value) {
if (clusterIdBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
clusterId_ = value;
onChanged();
} else {
clusterIdBuilder_.setMessage(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
public Builder setClusterId(
org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder builderForValue) {
if (clusterIdBuilder_ == null) {
clusterId_ = builderForValue.build();
onChanged();
} else {
clusterIdBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000010;
return this;
}
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
public Builder mergeClusterId(org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId value) {
if (clusterIdBuilder_ == null) {
if (((bitField0_ & 0x00000010) == 0x00000010) &&
clusterId_ != org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.getDefaultInstance()) {
clusterId_ =
org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.newBuilder(clusterId_).mergeFrom(value).buildPartial();
} else {
clusterId_ = value;
}
onChanged();
} else {
clusterIdBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000010;
return this;
}
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
public Builder clearClusterId() {
if (clusterIdBuilder_ == null) {
clusterId_ = org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.getDefaultInstance();
onChanged();
} else {
clusterIdBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000010);
return this;
}
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder getClusterIdBuilder() {
bitField0_ |= 0x00000010;
onChanged();
return getClusterIdFieldBuilder().getBuilder();
}
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterIdOrBuilder getClusterIdOrBuilder() {
if (clusterIdBuilder_ != null) {
return clusterIdBuilder_.getMessageOrBuilder();
} else {
return clusterId_;
}
}
/**
* <code>optional .ClusterId cluster_id = 5;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId, org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterIdOrBuilder>
getClusterIdFieldBuilder() {
if (clusterIdBuilder_ == null) {
clusterIdBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId, org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterId.Builder, org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.ClusterIdOrBuilder>(
clusterId_,
getParentForChildren(),
isClean());
clusterId_ = null;
}
return clusterIdBuilder_;
}
// repeated .Coprocessor master_coprocessors = 6;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> masterCoprocessors_ =
java.util.Collections.emptyList();
private void ensureMasterCoprocessorsIsMutable() {
if (!((bitField0_ & 0x00000020) == 0x00000020)) {
masterCoprocessors_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor>(masterCoprocessors_);
bitField0_ |= 0x00000020;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder> masterCoprocessorsBuilder_;
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> getMasterCoprocessorsList() {
if (masterCoprocessorsBuilder_ == null) {
return java.util.Collections.unmodifiableList(masterCoprocessors_);
} else {
return masterCoprocessorsBuilder_.getMessageList();
}
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public int getMasterCoprocessorsCount() {
if (masterCoprocessorsBuilder_ == null) {
return masterCoprocessors_.size();
} else {
return masterCoprocessorsBuilder_.getCount();
}
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor getMasterCoprocessors(int index) {
if (masterCoprocessorsBuilder_ == null) {
return masterCoprocessors_.get(index);
} else {
return masterCoprocessorsBuilder_.getMessage(index);
}
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public Builder setMasterCoprocessors(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) {
if (masterCoprocessorsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureMasterCoprocessorsIsMutable();
masterCoprocessors_.set(index, value);
onChanged();
} else {
masterCoprocessorsBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public Builder setMasterCoprocessors(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) {
if (masterCoprocessorsBuilder_ == null) {
ensureMasterCoprocessorsIsMutable();
masterCoprocessors_.set(index, builderForValue.build());
onChanged();
} else {
masterCoprocessorsBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public Builder addMasterCoprocessors(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) {
if (masterCoprocessorsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureMasterCoprocessorsIsMutable();
masterCoprocessors_.add(value);
onChanged();
} else {
masterCoprocessorsBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public Builder addMasterCoprocessors(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor value) {
if (masterCoprocessorsBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureMasterCoprocessorsIsMutable();
masterCoprocessors_.add(index, value);
onChanged();
} else {
masterCoprocessorsBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public Builder addMasterCoprocessors(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) {
if (masterCoprocessorsBuilder_ == null) {
ensureMasterCoprocessorsIsMutable();
masterCoprocessors_.add(builderForValue.build());
onChanged();
} else {
masterCoprocessorsBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public Builder addMasterCoprocessors(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder builderForValue) {
if (masterCoprocessorsBuilder_ == null) {
ensureMasterCoprocessorsIsMutable();
masterCoprocessors_.add(index, builderForValue.build());
onChanged();
} else {
masterCoprocessorsBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public Builder addAllMasterCoprocessors(
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor> values) {
if (masterCoprocessorsBuilder_ == null) {
ensureMasterCoprocessorsIsMutable();
super.addAll(values, masterCoprocessors_);
onChanged();
} else {
masterCoprocessorsBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public Builder clearMasterCoprocessors() {
if (masterCoprocessorsBuilder_ == null) {
masterCoprocessors_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000020);
onChanged();
} else {
masterCoprocessorsBuilder_.clear();
}
return this;
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public Builder removeMasterCoprocessors(int index) {
if (masterCoprocessorsBuilder_ == null) {
ensureMasterCoprocessorsIsMutable();
masterCoprocessors_.remove(index);
onChanged();
} else {
masterCoprocessorsBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder getMasterCoprocessorsBuilder(
int index) {
return getMasterCoprocessorsFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder getMasterCoprocessorsOrBuilder(
int index) {
if (masterCoprocessorsBuilder_ == null) {
return masterCoprocessors_.get(index); } else {
return masterCoprocessorsBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>
getMasterCoprocessorsOrBuilderList() {
if (masterCoprocessorsBuilder_ != null) {
return masterCoprocessorsBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(masterCoprocessors_);
}
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addMasterCoprocessorsBuilder() {
return getMasterCoprocessorsFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance());
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder addMasterCoprocessorsBuilder(
int index) {
return getMasterCoprocessorsFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.getDefaultInstance());
}
/**
* <code>repeated .Coprocessor master_coprocessors = 6;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder>
getMasterCoprocessorsBuilderList() {
return getMasterCoprocessorsFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>
getMasterCoprocessorsFieldBuilder() {
if (masterCoprocessorsBuilder_ == null) {
masterCoprocessorsBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.CoprocessorOrBuilder>(
masterCoprocessors_,
((bitField0_ & 0x00000020) == 0x00000020),
getParentForChildren(),
isClean());
masterCoprocessors_ = null;
}
return masterCoprocessorsBuilder_;
}
// optional .ServerName master = 7;
private org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> masterBuilder_;
/**
* <code>optional .ServerName master = 7;</code>
*/
public boolean hasMaster() {
return ((bitField0_ & 0x00000040) == 0x00000040);
}
/**
* <code>optional .ServerName master = 7;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getMaster() {
if (masterBuilder_ == null) {
return master_;
} else {
return masterBuilder_.getMessage();
}
}
/**
* <code>optional .ServerName master = 7;</code>
*/
public Builder setMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (masterBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
master_ = value;
onChanged();
} else {
masterBuilder_.setMessage(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* <code>optional .ServerName master = 7;</code>
*/
public Builder setMaster(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (masterBuilder_ == null) {
master_ = builderForValue.build();
onChanged();
} else {
masterBuilder_.setMessage(builderForValue.build());
}
bitField0_ |= 0x00000040;
return this;
}
/**
* <code>optional .ServerName master = 7;</code>
*/
public Builder mergeMaster(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (masterBuilder_ == null) {
if (((bitField0_ & 0x00000040) == 0x00000040) &&
master_ != org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance()) {
master_ =
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.newBuilder(master_).mergeFrom(value).buildPartial();
} else {
master_ = value;
}
onChanged();
} else {
masterBuilder_.mergeFrom(value);
}
bitField0_ |= 0x00000040;
return this;
}
/**
* <code>optional .ServerName master = 7;</code>
*/
public Builder clearMaster() {
if (masterBuilder_ == null) {
master_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance();
onChanged();
} else {
masterBuilder_.clear();
}
bitField0_ = (bitField0_ & ~0x00000040);
return this;
}
/**
* <code>optional .ServerName master = 7;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getMasterBuilder() {
bitField0_ |= 0x00000040;
onChanged();
return getMasterFieldBuilder().getBuilder();
}
/**
* <code>optional .ServerName master = 7;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getMasterOrBuilder() {
if (masterBuilder_ != null) {
return masterBuilder_.getMessageOrBuilder();
} else {
return master_;
}
}
/**
* <code>optional .ServerName master = 7;</code>
*/
private com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getMasterFieldBuilder() {
if (masterBuilder_ == null) {
masterBuilder_ = new com.google.protobuf.SingleFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
master_,
getParentForChildren(),
isClean());
master_ = null;
}
return masterBuilder_;
}
// repeated .ServerName backup_masters = 8;
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> backupMasters_ =
java.util.Collections.emptyList();
private void ensureBackupMastersIsMutable() {
if (!((bitField0_ & 0x00000080) == 0x00000080)) {
backupMasters_ = new java.util.ArrayList<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName>(backupMasters_);
bitField0_ |= 0x00000080;
}
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder> backupMastersBuilder_;
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> getBackupMastersList() {
if (backupMastersBuilder_ == null) {
return java.util.Collections.unmodifiableList(backupMasters_);
} else {
return backupMastersBuilder_.getMessageList();
}
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public int getBackupMastersCount() {
if (backupMastersBuilder_ == null) {
return backupMasters_.size();
} else {
return backupMastersBuilder_.getCount();
}
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName getBackupMasters(int index) {
if (backupMastersBuilder_ == null) {
return backupMasters_.get(index);
} else {
return backupMastersBuilder_.getMessage(index);
}
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public Builder setBackupMasters(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (backupMastersBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBackupMastersIsMutable();
backupMasters_.set(index, value);
onChanged();
} else {
backupMastersBuilder_.setMessage(index, value);
}
return this;
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public Builder setBackupMasters(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (backupMastersBuilder_ == null) {
ensureBackupMastersIsMutable();
backupMasters_.set(index, builderForValue.build());
onChanged();
} else {
backupMastersBuilder_.setMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public Builder addBackupMasters(org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (backupMastersBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBackupMastersIsMutable();
backupMasters_.add(value);
onChanged();
} else {
backupMastersBuilder_.addMessage(value);
}
return this;
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public Builder addBackupMasters(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName value) {
if (backupMastersBuilder_ == null) {
if (value == null) {
throw new NullPointerException();
}
ensureBackupMastersIsMutable();
backupMasters_.add(index, value);
onChanged();
} else {
backupMastersBuilder_.addMessage(index, value);
}
return this;
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public Builder addBackupMasters(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (backupMastersBuilder_ == null) {
ensureBackupMastersIsMutable();
backupMasters_.add(builderForValue.build());
onChanged();
} else {
backupMastersBuilder_.addMessage(builderForValue.build());
}
return this;
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public Builder addBackupMasters(
int index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder builderForValue) {
if (backupMastersBuilder_ == null) {
ensureBackupMastersIsMutable();
backupMasters_.add(index, builderForValue.build());
onChanged();
} else {
backupMastersBuilder_.addMessage(index, builderForValue.build());
}
return this;
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public Builder addAllBackupMasters(
java.lang.Iterable<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName> values) {
if (backupMastersBuilder_ == null) {
ensureBackupMastersIsMutable();
super.addAll(values, backupMasters_);
onChanged();
} else {
backupMastersBuilder_.addAllMessages(values);
}
return this;
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public Builder clearBackupMasters() {
if (backupMastersBuilder_ == null) {
backupMasters_ = java.util.Collections.emptyList();
bitField0_ = (bitField0_ & ~0x00000080);
onChanged();
} else {
backupMastersBuilder_.clear();
}
return this;
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public Builder removeBackupMasters(int index) {
if (backupMastersBuilder_ == null) {
ensureBackupMastersIsMutable();
backupMasters_.remove(index);
onChanged();
} else {
backupMastersBuilder_.remove(index);
}
return this;
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder getBackupMastersBuilder(
int index) {
return getBackupMastersFieldBuilder().getBuilder(index);
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder getBackupMastersOrBuilder(
int index) {
if (backupMastersBuilder_ == null) {
return backupMasters_.get(index); } else {
return backupMastersBuilder_.getMessageOrBuilder(index);
}
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public java.util.List<? extends org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getBackupMastersOrBuilderList() {
if (backupMastersBuilder_ != null) {
return backupMastersBuilder_.getMessageOrBuilderList();
} else {
return java.util.Collections.unmodifiableList(backupMasters_);
}
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addBackupMastersBuilder() {
return getBackupMastersFieldBuilder().addBuilder(
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder addBackupMastersBuilder(
int index) {
return getBackupMastersFieldBuilder().addBuilder(
index, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.getDefaultInstance());
}
/**
* <code>repeated .ServerName backup_masters = 8;</code>
*/
public java.util.List<org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder>
getBackupMastersBuilderList() {
return getBackupMastersFieldBuilder().getBuilderList();
}
private com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>
getBackupMastersFieldBuilder() {
if (backupMastersBuilder_ == null) {
backupMastersBuilder_ = new com.google.protobuf.RepeatedFieldBuilder<
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerName.Builder, org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.ServerNameOrBuilder>(
backupMasters_,
((bitField0_ & 0x00000080) == 0x00000080),
getParentForChildren(),
isClean());
backupMasters_ = null;
}
return backupMastersBuilder_;
}
// optional bool balancer_on = 9;
private boolean balancerOn_ ;
/**
* <code>optional bool balancer_on = 9;</code>
*/
public boolean hasBalancerOn() {
return ((bitField0_ & 0x00000100) == 0x00000100);
}
/**
* <code>optional bool balancer_on = 9;</code>
*/
public boolean getBalancerOn() {
return balancerOn_;
}
/**
* <code>optional bool balancer_on = 9;</code>
*/
public Builder setBalancerOn(boolean value) {
bitField0_ |= 0x00000100;
balancerOn_ = value;
onChanged();
return this;
}
/**
* <code>optional bool balancer_on = 9;</code>
*/
public Builder clearBalancerOn() {
bitField0_ = (bitField0_ & ~0x00000100);
balancerOn_ = false;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:ClusterStatus)
}
static {
defaultInstance = new ClusterStatus(true);
defaultInstance.initFields();
}
// @@protoc_insertion_point(class_scope:ClusterStatus)
}
private static com.google.protobuf.Descriptors.Descriptor
internal_static_RegionState_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_RegionState_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_RegionInTransition_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_RegionInTransition_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_StoreSequenceId_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_StoreSequenceId_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_RegionStoreSequenceIds_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_RegionStoreSequenceIds_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_RegionLoad_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_RegionLoad_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_ReplicationLoadSink_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ReplicationLoadSink_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_ReplicationLoadSource_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ReplicationLoadSource_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_ServerLoad_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ServerLoad_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_LiveServerInfo_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_LiveServerInfo_fieldAccessorTable;
private static com.google.protobuf.Descriptors.Descriptor
internal_static_ClusterStatus_descriptor;
private static
com.google.protobuf.GeneratedMessage.FieldAccessorTable
internal_static_ClusterStatus_fieldAccessorTable;
public static com.google.protobuf.Descriptors.FileDescriptor
getDescriptor() {
return descriptor;
}
private static com.google.protobuf.Descriptors.FileDescriptor
descriptor;
static {
java.lang.String[] descriptorData = {
"\n\023ClusterStatus.proto\032\013HBase.proto\032\017Clus" +
"terId.proto\032\010FS.proto\"\307\002\n\013RegionState\022 \n" +
"\013region_info\030\001 \002(\0132\013.RegionInfo\022!\n\005state" +
"\030\002 \002(\0162\022.RegionState.State\022\r\n\005stamp\030\003 \001(" +
"\004\"\343\001\n\005State\022\013\n\007OFFLINE\020\000\022\020\n\014PENDING_OPEN" +
"\020\001\022\013\n\007OPENING\020\002\022\010\n\004OPEN\020\003\022\021\n\rPENDING_CLO" +
"SE\020\004\022\013\n\007CLOSING\020\005\022\n\n\006CLOSED\020\006\022\r\n\tSPLITTI" +
"NG\020\007\022\t\n\005SPLIT\020\010\022\017\n\013FAILED_OPEN\020\t\022\020\n\014FAIL" +
"ED_CLOSE\020\n\022\013\n\007MERGING\020\013\022\n\n\006MERGED\020\014\022\021\n\rS" +
"PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio",
"nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" +
"ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" +
"e\";\n\017StoreSequenceId\022\023\n\013family_name\030\001 \002(" +
"\014\022\023\n\013sequence_id\030\002 \002(\004\"g\n\026RegionStoreSeq" +
"uenceIds\022 \n\030last_flushed_sequence_id\030\001 \002" +
"(\004\022+\n\021store_sequence_id\030\002 \003(\0132\020.StoreSeq" +
"uenceId\"\302\004\n\nRegionLoad\022*\n\020region_specifi" +
"er\030\001 \002(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001" +
"(\r\022\022\n\nstorefiles\030\003 \001(\r\022\"\n\032store_uncompre" +
"ssed_size_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030",
"\005 \001(\r\022\030\n\020memstore_size_MB\030\006 \001(\r\022\037\n\027store" +
"file_index_size_MB\030\007 \001(\r\022\033\n\023read_request" +
"s_count\030\010 \001(\004\022\034\n\024write_requests_count\030\t " +
"\001(\004\022\034\n\024total_compacting_KVs\030\n \001(\004\022\035\n\025cur" +
"rent_compacted_KVs\030\013 \001(\004\022\032\n\022root_index_s" +
"ize_KB\030\014 \001(\r\022\"\n\032total_static_index_size_" +
"KB\030\r \001(\r\022\"\n\032total_static_bloom_size_KB\030\016" +
" \001(\r\022\034\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rda" +
"ta_locality\030\020 \001(\002\022#\n\030last_major_compacti" +
"on_ts\030\021 \001(\004:\0010\0224\n\032store_complete_sequenc",
"e_id\030\022 \003(\0132\020.StoreSequenceId\"T\n\023Replicat" +
"ionLoadSink\022\032\n\022ageOfLastAppliedOp\030\001 \002(\004\022" +
"!\n\031timeStampsOfLastAppliedOp\030\002 \002(\004\"\225\001\n\025R" +
"eplicationLoadSource\022\016\n\006peerID\030\001 \002(\t\022\032\n\022" +
"ageOfLastShippedOp\030\002 \002(\004\022\026\n\016sizeOfLogQue" +
"ue\030\003 \002(\r\022 \n\030timeStampOfLastShippedOp\030\004 \002" +
"(\004\022\026\n\016replicationLag\030\005 \002(\004\"\346\002\n\nServerLoa" +
"d\022\032\n\022number_of_requests\030\001 \001(\004\022 \n\030total_n" +
"umber_of_requests\030\002 \001(\004\022\024\n\014used_heap_MB\030" +
"\003 \001(\r\022\023\n\013max_heap_MB\030\004 \001(\r\022!\n\014region_loa",
"ds\030\005 \003(\0132\013.RegionLoad\022\"\n\014coprocessors\030\006 " +
"\003(\0132\014.Coprocessor\022\031\n\021report_start_time\030\007" +
" \001(\004\022\027\n\017report_end_time\030\010 \001(\004\022\030\n\020info_se" +
"rver_port\030\t \001(\r\022.\n\016replLoadSource\030\n \003(\0132" +
"\026.ReplicationLoadSource\022*\n\014replLoadSink\030" +
"\013 \001(\0132\024.ReplicationLoadSink\"O\n\016LiveServe" +
"rInfo\022\033\n\006server\030\001 \002(\0132\013.ServerName\022 \n\013se" +
"rver_load\030\002 \002(\0132\013.ServerLoad\"\340\002\n\rCluster" +
"Status\022/\n\rhbase_version\030\001 \001(\0132\030.HBaseVer" +
"sionFileContent\022%\n\014live_servers\030\002 \003(\0132\017.",
"LiveServerInfo\022!\n\014dead_servers\030\003 \003(\0132\013.S" +
"erverName\0222\n\025regions_in_transition\030\004 \003(\013" +
"2\023.RegionInTransition\022\036\n\ncluster_id\030\005 \001(" +
"\0132\n.ClusterId\022)\n\023master_coprocessors\030\006 \003" +
"(\0132\014.Coprocessor\022\033\n\006master\030\007 \001(\0132\013.Serve" +
"rName\022#\n\016backup_masters\030\010 \003(\0132\013.ServerNa" +
"me\022\023\n\013balancer_on\030\t \001(\010BF\n*org.apache.ha" +
"doop.hbase.protobuf.generatedB\023ClusterSt" +
"atusProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
public com.google.protobuf.ExtensionRegistry assignDescriptors(
com.google.protobuf.Descriptors.FileDescriptor root) {
descriptor = root;
internal_static_RegionState_descriptor =
getDescriptor().getMessageTypes().get(0);
internal_static_RegionState_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionState_descriptor,
new java.lang.String[] { "RegionInfo", "State", "Stamp", });
internal_static_RegionInTransition_descriptor =
getDescriptor().getMessageTypes().get(1);
internal_static_RegionInTransition_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionInTransition_descriptor,
new java.lang.String[] { "Spec", "RegionState", });
internal_static_StoreSequenceId_descriptor =
getDescriptor().getMessageTypes().get(2);
internal_static_StoreSequenceId_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_StoreSequenceId_descriptor,
new java.lang.String[] { "FamilyName", "SequenceId", });
internal_static_RegionStoreSequenceIds_descriptor =
getDescriptor().getMessageTypes().get(3);
internal_static_RegionStoreSequenceIds_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionStoreSequenceIds_descriptor,
new java.lang.String[] { "LastFlushedSequenceId", "StoreSequenceId", });
internal_static_RegionLoad_descriptor =
getDescriptor().getMessageTypes().get(4);
internal_static_RegionLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_RegionLoad_descriptor,
new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", "StoreCompleteSequenceId", });
internal_static_ReplicationLoadSink_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_ReplicationLoadSink_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ReplicationLoadSink_descriptor,
new java.lang.String[] { "AgeOfLastAppliedOp", "TimeStampsOfLastAppliedOp", });
internal_static_ReplicationLoadSource_descriptor =
getDescriptor().getMessageTypes().get(6);
internal_static_ReplicationLoadSource_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ReplicationLoadSource_descriptor,
new java.lang.String[] { "PeerID", "AgeOfLastShippedOp", "SizeOfLogQueue", "TimeStampOfLastShippedOp", "ReplicationLag", });
internal_static_ServerLoad_descriptor =
getDescriptor().getMessageTypes().get(7);
internal_static_ServerLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ServerLoad_descriptor,
new java.lang.String[] { "NumberOfRequests", "TotalNumberOfRequests", "UsedHeapMB", "MaxHeapMB", "RegionLoads", "Coprocessors", "ReportStartTime", "ReportEndTime", "InfoServerPort", "ReplLoadSource", "ReplLoadSink", });
internal_static_LiveServerInfo_descriptor =
getDescriptor().getMessageTypes().get(8);
internal_static_LiveServerInfo_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_LiveServerInfo_descriptor,
new java.lang.String[] { "Server", "ServerLoad", });
internal_static_ClusterStatus_descriptor =
getDescriptor().getMessageTypes().get(9);
internal_static_ClusterStatus_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_ClusterStatus_descriptor,
new java.lang.String[] { "HbaseVersion", "LiveServers", "DeadServers", "RegionsInTransition", "ClusterId", "MasterCoprocessors", "Master", "BackupMasters", "BalancerOn", });
return null;
}
};
com.google.protobuf.Descriptors.FileDescriptor
.internalBuildGeneratedFileFrom(descriptorData,
new com.google.protobuf.Descriptors.FileDescriptor[] {
org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.getDescriptor(),
org.apache.hadoop.hbase.protobuf.generated.ClusterIdProtos.getDescriptor(),
org.apache.hadoop.hbase.protobuf.generated.FSProtos.getDescriptor(),
}, assigner);
}
// @@protoc_insertion_point(outer_class_scope)
}