blob: 75a9a5bee1156acbde16a67e998a0f05d5c29487 [file] [log] [blame]
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// File generated by hadoop record compiler. Do not edit.
package org.apache.hadoop.chukwa;
public class ChukwaArchiveKey extends org.apache.hadoop.record.Record {
private static final org.apache.hadoop.record.meta.RecordTypeInfo _rio_recTypeInfo;
private static org.apache.hadoop.record.meta.RecordTypeInfo _rio_rtiFilter;
private static int[] _rio_rtiFilterFields;
static {
_rio_recTypeInfo = new org.apache.hadoop.record.meta.RecordTypeInfo(
"ChukwaArchiveKey");
_rio_recTypeInfo.addField("timePartition",
org.apache.hadoop.record.meta.TypeID.LongTypeID);
_rio_recTypeInfo.addField("dataType",
org.apache.hadoop.record.meta.TypeID.StringTypeID);
_rio_recTypeInfo.addField("streamName",
org.apache.hadoop.record.meta.TypeID.StringTypeID);
_rio_recTypeInfo.addField("seqId",
org.apache.hadoop.record.meta.TypeID.LongTypeID);
}
private long timePartition;
private String dataType;
private String streamName;
private long seqId;
public ChukwaArchiveKey() {
}
public ChukwaArchiveKey(final long timePartition, final String dataType,
final String streamName, final long seqId) {
this.timePartition = timePartition;
this.dataType = dataType;
this.streamName = streamName;
this.seqId = seqId;
}
public static org.apache.hadoop.record.meta.RecordTypeInfo getTypeInfo() {
return _rio_recTypeInfo;
}
public static void setTypeFilter(
org.apache.hadoop.record.meta.RecordTypeInfo rti) {
if (null == rti)
return;
_rio_rtiFilter = rti;
_rio_rtiFilterFields = null;
}
private static void setupRtiFields() {
if (null == _rio_rtiFilter)
return;
// we may already have done this
if (null != _rio_rtiFilterFields)
return;
int _rio_i, _rio_j;
_rio_rtiFilterFields = new int[_rio_rtiFilter.getFieldTypeInfos().size()];
for (_rio_i = 0; _rio_i < _rio_rtiFilterFields.length; _rio_i++) {
_rio_rtiFilterFields[_rio_i] = 0;
}
java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_itFilter = _rio_rtiFilter
.getFieldTypeInfos().iterator();
_rio_i = 0;
while (_rio_itFilter.hasNext()) {
org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfoFilter = _rio_itFilter
.next();
java.util.Iterator<org.apache.hadoop.record.meta.FieldTypeInfo> _rio_it = _rio_recTypeInfo
.getFieldTypeInfos().iterator();
_rio_j = 1;
while (_rio_it.hasNext()) {
org.apache.hadoop.record.meta.FieldTypeInfo _rio_tInfo = _rio_it.next();
if (_rio_tInfo.equals(_rio_tInfoFilter)) {
_rio_rtiFilterFields[_rio_i] = _rio_j;
break;
}
_rio_j++;
}
_rio_i++;
}
}
public long getTimePartition() {
return timePartition;
}
public void setTimePartition(final long timePartition) {
this.timePartition = timePartition;
}
public String getDataType() {
return dataType;
}
public void setDataType(final String dataType) {
this.dataType = dataType;
}
public String getStreamName() {
return streamName;
}
public void setStreamName(final String streamName) {
this.streamName = streamName;
}
public long getSeqId() {
return seqId;
}
public void setSeqId(final long seqId) {
this.seqId = seqId;
}
public void serialize(final org.apache.hadoop.record.RecordOutput _rio_a,
final String _rio_tag) throws java.io.IOException {
_rio_a.startRecord(this, _rio_tag);
_rio_a.writeLong(timePartition, "timePartition");
_rio_a.writeString(dataType, "dataType");
_rio_a.writeString(streamName, "streamName");
_rio_a.writeLong(seqId, "seqId");
_rio_a.endRecord(this, _rio_tag);
}
private void deserializeWithoutFilter(
final org.apache.hadoop.record.RecordInput _rio_a, final String _rio_tag)
throws java.io.IOException {
_rio_a.startRecord(_rio_tag);
timePartition = _rio_a.readLong("timePartition");
dataType = _rio_a.readString("dataType");
streamName = _rio_a.readString("streamName");
seqId = _rio_a.readLong("seqId");
_rio_a.endRecord(_rio_tag);
}
public void deserialize(final org.apache.hadoop.record.RecordInput _rio_a,
final String _rio_tag) throws java.io.IOException {
if (null == _rio_rtiFilter) {
deserializeWithoutFilter(_rio_a, _rio_tag);
return;
}
// if we're here, we need to read based on version info
_rio_a.startRecord(_rio_tag);
setupRtiFields();
for (int _rio_i = 0; _rio_i < _rio_rtiFilter.getFieldTypeInfos().size(); _rio_i++) {
if (1 == _rio_rtiFilterFields[_rio_i]) {
timePartition = _rio_a.readLong("timePartition");
} else if (2 == _rio_rtiFilterFields[_rio_i]) {
dataType = _rio_a.readString("dataType");
} else if (3 == _rio_rtiFilterFields[_rio_i]) {
streamName = _rio_a.readString("streamName");
} else if (4 == _rio_rtiFilterFields[_rio_i]) {
seqId = _rio_a.readLong("seqId");
} else {
java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo> typeInfos = (java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo>) (_rio_rtiFilter
.getFieldTypeInfos());
org.apache.hadoop.record.meta.Utils.skip(_rio_a, typeInfos.get(_rio_i)
.getFieldID(), typeInfos.get(_rio_i).getTypeID());
}
}
_rio_a.endRecord(_rio_tag);
}
public int compareTo(final Object _rio_peer_) throws ClassCastException {
if (!(_rio_peer_ instanceof ChukwaArchiveKey)) {
throw new ClassCastException("Comparing different types of records.");
}
ChukwaArchiveKey _rio_peer = (ChukwaArchiveKey) _rio_peer_;
int _rio_ret = 0;
_rio_ret = (timePartition == _rio_peer.timePartition) ? 0
: ((timePartition < _rio_peer.timePartition) ? -1 : 1);
if (_rio_ret != 0)
return _rio_ret;
_rio_ret = dataType.compareTo(_rio_peer.dataType);
if (_rio_ret != 0)
return _rio_ret;
_rio_ret = streamName.compareTo(_rio_peer.streamName);
if (_rio_ret != 0)
return _rio_ret;
_rio_ret = (seqId == _rio_peer.seqId) ? 0 : ((seqId < _rio_peer.seqId) ? -1
: 1);
if (_rio_ret != 0)
return _rio_ret;
return _rio_ret;
}
public boolean equals(final Object _rio_peer_) {
if (!(_rio_peer_ instanceof ChukwaArchiveKey)) {
return false;
}
if (_rio_peer_ == this) {
return true;
}
ChukwaArchiveKey _rio_peer = (ChukwaArchiveKey) _rio_peer_;
boolean _rio_ret = false;
_rio_ret = (timePartition == _rio_peer.timePartition);
if (!_rio_ret)
return _rio_ret;
_rio_ret = dataType.equals(_rio_peer.dataType);
if (!_rio_ret)
return _rio_ret;
_rio_ret = streamName.equals(_rio_peer.streamName);
if (!_rio_ret)
return _rio_ret;
_rio_ret = (seqId == _rio_peer.seqId);
if (!_rio_ret)
return _rio_ret;
return _rio_ret;
}
public Object clone() throws CloneNotSupportedException {
super.clone();
ChukwaArchiveKey _rio_other = new ChukwaArchiveKey();
_rio_other.timePartition = this.timePartition;
_rio_other.dataType = this.dataType;
_rio_other.streamName = this.streamName;
_rio_other.seqId = this.seqId;
return _rio_other;
}
public int hashCode() {
int _rio_result = 17;
int _rio_ret;
_rio_ret = (int) (timePartition ^ (timePartition >>> 32));
_rio_result = 37 * _rio_result + _rio_ret;
_rio_ret = dataType.hashCode();
_rio_result = 37 * _rio_result + _rio_ret;
_rio_ret = streamName.hashCode();
_rio_result = 37 * _rio_result + _rio_ret;
_rio_ret = (int) (seqId ^ (seqId >>> 32));
_rio_result = 37 * _rio_result + _rio_ret;
return _rio_result;
}
public static String signature() {
return "LChukwaArchiveKey(lssl)";
}
public static class Comparator extends
org.apache.hadoop.record.RecordComparator {
public Comparator() {
super(ChukwaArchiveKey.class);
}
static public int slurpRaw(byte[] b, int s, int l) {
try {
int os = s;
{
long i = org.apache.hadoop.record.Utils.readVLong(b, s);
int z = org.apache.hadoop.record.Utils.getVIntSize(i);
s += z;
l -= z;
}
{
int i = org.apache.hadoop.record.Utils.readVInt(b, s);
int z = org.apache.hadoop.record.Utils.getVIntSize(i);
s += (z + i);
l -= (z + i);
}
{
int i = org.apache.hadoop.record.Utils.readVInt(b, s);
int z = org.apache.hadoop.record.Utils.getVIntSize(i);
s += (z + i);
l -= (z + i);
}
{
long i = org.apache.hadoop.record.Utils.readVLong(b, s);
int z = org.apache.hadoop.record.Utils.getVIntSize(i);
s += z;
l -= z;
}
return (os - s);
} catch (java.io.IOException e) {
throw new RuntimeException(e);
}
}
static public int compareRaw(byte[] b1, int s1, int l1, byte[] b2, int s2,
int l2) {
try {
int os1 = s1;
{
long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1);
long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2);
if (i1 != i2) {
return ((i1 - i2) < 0) ? -1 : 0;
}
int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1 += z1;
s2 += z2;
l1 -= z1;
l2 -= z2;
}
{
int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1 += z1;
s2 += z2;
l1 -= z1;
l2 -= z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
s2, i2);
if (r1 != 0) {
return (r1 < 0) ? -1 : 0;
}
s1 += i1;
s2 += i2;
l1 -= i1;
l1 -= i2;
}
{
int i1 = org.apache.hadoop.record.Utils.readVInt(b1, s1);
int i2 = org.apache.hadoop.record.Utils.readVInt(b2, s2);
int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1 += z1;
s2 += z2;
l1 -= z1;
l2 -= z2;
int r1 = org.apache.hadoop.record.Utils.compareBytes(b1, s1, i1, b2,
s2, i2);
if (r1 != 0) {
return (r1 < 0) ? -1 : 0;
}
s1 += i1;
s2 += i2;
l1 -= i1;
l1 -= i2;
}
{
long i1 = org.apache.hadoop.record.Utils.readVLong(b1, s1);
long i2 = org.apache.hadoop.record.Utils.readVLong(b2, s2);
if (i1 != i2) {
return ((i1 - i2) < 0) ? -1 : 0;
}
int z1 = org.apache.hadoop.record.Utils.getVIntSize(i1);
int z2 = org.apache.hadoop.record.Utils.getVIntSize(i2);
s1 += z1;
s2 += z2;
l1 -= z1;
l2 -= z2;
}
return (os1 - s1);
} catch (java.io.IOException e) {
throw new RuntimeException(e);
}
}
public int compare(byte[] b1, int s1, int l1, byte[] b2, int s2, int l2) {
int ret = compareRaw(b1, s1, l1, b2, s2, l2);
return (ret == -1) ? -1 : ((ret == 0) ? 1 : 0);
}
}
static {
org.apache.hadoop.record.RecordComparator.define(ChukwaArchiveKey.class,
new Comparator());
}
}