if (isValidValue(fields()[0], other.taskid)) { this.taskid = data().deepCopy(fields()[0].schema(), other.taskid); fieldSetFlags()[0] = true; if (isValidValue(fields()[1], other.attemptId)) { this.attemptId = data().deepCopy(fields()[1].schema(), other.attemptId); fieldSetFlags()[1] = true; if (isValidValue(fields()[2], other.taskType)) { this.taskType = data().deepCopy(fields()[2].schema(), other.taskType); fieldSetFlags()[2] = true; if (isValidValue(fields()[3], other.taskStatus)) { this.taskStatus = data().deepCopy(fields()[3].schema(), other.taskStatus); fieldSetFlags()[3] = true; if (isValidValue(fields()[4], other.shuffleFinishTime)) { this.shuffleFinishTime = data().deepCopy(fields()[4].schema(), other.shuffleFinishTime); fieldSetFlags()[4] = true; if (isValidValue(fields()[5], other.sortFinishTime)) { this.sortFinishTime = data().deepCopy(fields()[5].schema(), other.sortFinishTime); fieldSetFlags()[5] = true; if (isValidValue(fields()[6], other.finishTime)) { this.finishTime = data().deepCopy(fields()[6].schema(), other.finishTime); fieldSetFlags()[6] = true; if (isValidValue(fields()[7], other.hostname)) {
@Override public ReduceAttemptFinished build() { try { ReduceAttemptFinished record = new ReduceAttemptFinished(); record.taskid = fieldSetFlags()[0] ? this.taskid : (java.lang.CharSequence) defaultValue(fields()[0]); record.attemptId = fieldSetFlags()[1] ? this.attemptId : (java.lang.CharSequence) defaultValue(fields()[1]); record.taskType = fieldSetFlags()[2] ? this.taskType : (java.lang.CharSequence) defaultValue(fields()[2]); record.taskStatus = fieldSetFlags()[3] ? this.taskStatus : (java.lang.CharSequence) defaultValue(fields()[3]); record.shuffleFinishTime = fieldSetFlags()[4] ? this.shuffleFinishTime : (java.lang.Long) defaultValue(fields()[4]); record.sortFinishTime = fieldSetFlags()[5] ? this.sortFinishTime : (java.lang.Long) defaultValue(fields()[5]); record.finishTime = fieldSetFlags()[6] ? this.finishTime : (java.lang.Long) defaultValue(fields()[6]); record.hostname = fieldSetFlags()[7] ? this.hostname : (java.lang.CharSequence) defaultValue(fields()[7]); record.port = fieldSetFlags()[8] ? this.port : (java.lang.Integer) defaultValue(fields()[8]); record.rackname = fieldSetFlags()[9] ? this.rackname : (java.lang.CharSequence) defaultValue(fields()[9]); record.state = fieldSetFlags()[10] ? this.state : (java.lang.CharSequence) defaultValue(fields()[10]); record.counters = fieldSetFlags()[11] ? this.counters : (org.apache.hadoop.mapreduce.jobhistory.JhCounters) defaultValue(fields()[11]); record.clockSplits = fieldSetFlags()[12] ? this.clockSplits : (java.util.List<java.lang.Integer>) defaultValue(fields()[12]); record.cpuUsages = fieldSetFlags()[13] ? this.cpuUsages : (java.util.List<java.lang.Integer>) defaultValue(fields()[13]); record.vMemKbytes = fieldSetFlags()[14] ? this.vMemKbytes : (java.util.List<java.lang.Integer>) defaultValue(fields()[14]); record.physMemKbytes = fieldSetFlags()[15] ? this.physMemKbytes : (java.util.List<java.lang.Integer>) defaultValue(fields()[15]); return record; } catch (Exception e) { throw new org.apache.avro.AvroRuntimeException(e); } } }
if (isValidValue(fields()[0], other.taskid)) { this.taskid = data().deepCopy(fields()[0].schema(), other.taskid); fieldSetFlags()[0] = true; if (isValidValue(fields()[1], other.attemptId)) { this.attemptId = data().deepCopy(fields()[1].schema(), other.attemptId); fieldSetFlags()[1] = true; if (isValidValue(fields()[2], other.taskType)) { this.taskType = data().deepCopy(fields()[2].schema(), other.taskType); fieldSetFlags()[2] = true; if (isValidValue(fields()[3], other.taskStatus)) { this.taskStatus = data().deepCopy(fields()[3].schema(), other.taskStatus); fieldSetFlags()[3] = true; if (isValidValue(fields()[4], other.shuffleFinishTime)) { this.shuffleFinishTime = data().deepCopy(fields()[4].schema(), other.shuffleFinishTime); fieldSetFlags()[4] = true; if (isValidValue(fields()[5], other.sortFinishTime)) { this.sortFinishTime = data().deepCopy(fields()[5].schema(), other.sortFinishTime); fieldSetFlags()[5] = true; if (isValidValue(fields()[6], other.finishTime)) { this.finishTime = data().deepCopy(fields()[6].schema(), other.finishTime); fieldSetFlags()[6] = true; if (isValidValue(fields()[7], other.hostname)) {
if (isValidValue(fields()[0], other.taskid)) { this.taskid = data().deepCopy(fields()[0].schema(), other.taskid); fieldSetFlags()[0] = true; if (isValidValue(fields()[1], other.attemptId)) { this.attemptId = data().deepCopy(fields()[1].schema(), other.attemptId); fieldSetFlags()[1] = true; if (isValidValue(fields()[2], other.taskType)) { this.taskType = data().deepCopy(fields()[2].schema(), other.taskType); fieldSetFlags()[2] = true; if (isValidValue(fields()[3], other.taskStatus)) { this.taskStatus = data().deepCopy(fields()[3].schema(), other.taskStatus); fieldSetFlags()[3] = true; if (isValidValue(fields()[4], other.shuffleFinishTime)) { this.shuffleFinishTime = data().deepCopy(fields()[4].schema(), other.shuffleFinishTime); fieldSetFlags()[4] = true; if (isValidValue(fields()[5], other.sortFinishTime)) { this.sortFinishTime = data().deepCopy(fields()[5].schema(), other.sortFinishTime); fieldSetFlags()[5] = true; if (isValidValue(fields()[6], other.finishTime)) { this.finishTime = data().deepCopy(fields()[6].schema(), other.finishTime); fieldSetFlags()[6] = true; if (isValidValue(fields()[7], other.hostname)) {
if (isValidValue(fields()[0], other.taskid)) { this.taskid = data().deepCopy(fields()[0].schema(), other.taskid); fieldSetFlags()[0] = true; if (isValidValue(fields()[1], other.attemptId)) { this.attemptId = data().deepCopy(fields()[1].schema(), other.attemptId); fieldSetFlags()[1] = true; if (isValidValue(fields()[2], other.taskType)) { this.taskType = data().deepCopy(fields()[2].schema(), other.taskType); fieldSetFlags()[2] = true; if (isValidValue(fields()[3], other.taskStatus)) { this.taskStatus = data().deepCopy(fields()[3].schema(), other.taskStatus); fieldSetFlags()[3] = true; if (isValidValue(fields()[4], other.shuffleFinishTime)) { this.shuffleFinishTime = data().deepCopy(fields()[4].schema(), other.shuffleFinishTime); fieldSetFlags()[4] = true; if (isValidValue(fields()[5], other.sortFinishTime)) { this.sortFinishTime = data().deepCopy(fields()[5].schema(), other.sortFinishTime); fieldSetFlags()[5] = true; if (isValidValue(fields()[6], other.finishTime)) { this.finishTime = data().deepCopy(fields()[6].schema(), other.finishTime); fieldSetFlags()[6] = true; if (isValidValue(fields()[7], other.hostname)) {
@Override public ReduceAttemptFinished build() { try { ReduceAttemptFinished record = new ReduceAttemptFinished(); record.taskid = fieldSetFlags()[0] ? this.taskid : (java.lang.CharSequence) defaultValue(fields()[0]); record.attemptId = fieldSetFlags()[1] ? this.attemptId : (java.lang.CharSequence) defaultValue(fields()[1]); record.taskType = fieldSetFlags()[2] ? this.taskType : (java.lang.CharSequence) defaultValue(fields()[2]); record.taskStatus = fieldSetFlags()[3] ? this.taskStatus : (java.lang.CharSequence) defaultValue(fields()[3]); record.shuffleFinishTime = fieldSetFlags()[4] ? this.shuffleFinishTime : (java.lang.Long) defaultValue(fields()[4]); record.sortFinishTime = fieldSetFlags()[5] ? this.sortFinishTime : (java.lang.Long) defaultValue(fields()[5]); record.finishTime = fieldSetFlags()[6] ? this.finishTime : (java.lang.Long) defaultValue(fields()[6]); record.hostname = fieldSetFlags()[7] ? this.hostname : (java.lang.CharSequence) defaultValue(fields()[7]); record.port = fieldSetFlags()[8] ? this.port : (java.lang.Integer) defaultValue(fields()[8]); record.rackname = fieldSetFlags()[9] ? this.rackname : (java.lang.CharSequence) defaultValue(fields()[9]); record.state = fieldSetFlags()[10] ? this.state : (java.lang.CharSequence) defaultValue(fields()[10]); record.counters = fieldSetFlags()[11] ? this.counters : (org.apache.hadoop.mapreduce.jobhistory.JhCounters) defaultValue(fields()[11]); record.clockSplits = fieldSetFlags()[12] ? this.clockSplits : (java.util.List<java.lang.Integer>) defaultValue(fields()[12]); record.cpuUsages = fieldSetFlags()[13] ? this.cpuUsages : (java.util.List<java.lang.Integer>) defaultValue(fields()[13]); record.vMemKbytes = fieldSetFlags()[14] ? this.vMemKbytes : (java.util.List<java.lang.Integer>) defaultValue(fields()[14]); record.physMemKbytes = fieldSetFlags()[15] ? this.physMemKbytes : (java.util.List<java.lang.Integer>) defaultValue(fields()[15]); return record; } catch (Exception e) { throw new io.prestosql.hadoop.$internal.org.apache.avro.AvroRuntimeException(e); } } }
@Override public ReduceAttemptFinished build() { try { ReduceAttemptFinished record = new ReduceAttemptFinished(); record.taskid = fieldSetFlags()[0] ? this.taskid : (java.lang.CharSequence) defaultValue(fields()[0]); record.attemptId = fieldSetFlags()[1] ? this.attemptId : (java.lang.CharSequence) defaultValue(fields()[1]); record.taskType = fieldSetFlags()[2] ? this.taskType : (java.lang.CharSequence) defaultValue(fields()[2]); record.taskStatus = fieldSetFlags()[3] ? this.taskStatus : (java.lang.CharSequence) defaultValue(fields()[3]); record.shuffleFinishTime = fieldSetFlags()[4] ? this.shuffleFinishTime : (java.lang.Long) defaultValue(fields()[4]); record.sortFinishTime = fieldSetFlags()[5] ? this.sortFinishTime : (java.lang.Long) defaultValue(fields()[5]); record.finishTime = fieldSetFlags()[6] ? this.finishTime : (java.lang.Long) defaultValue(fields()[6]); record.hostname = fieldSetFlags()[7] ? this.hostname : (java.lang.CharSequence) defaultValue(fields()[7]); record.port = fieldSetFlags()[8] ? this.port : (java.lang.Integer) defaultValue(fields()[8]); record.rackname = fieldSetFlags()[9] ? this.rackname : (java.lang.CharSequence) defaultValue(fields()[9]); record.state = fieldSetFlags()[10] ? this.state : (java.lang.CharSequence) defaultValue(fields()[10]); record.counters = fieldSetFlags()[11] ? this.counters : (org.apache.hadoop.mapreduce.jobhistory.JhCounters) defaultValue(fields()[11]); record.clockSplits = fieldSetFlags()[12] ? this.clockSplits : (java.util.List<java.lang.Integer>) defaultValue(fields()[12]); record.cpuUsages = fieldSetFlags()[13] ? this.cpuUsages : (java.util.List<java.lang.Integer>) defaultValue(fields()[13]); record.vMemKbytes = fieldSetFlags()[14] ? this.vMemKbytes : (java.util.List<java.lang.Integer>) defaultValue(fields()[14]); record.physMemKbytes = fieldSetFlags()[15] ? this.physMemKbytes : (java.util.List<java.lang.Integer>) defaultValue(fields()[15]); return record; } catch (Exception e) { throw new org.apache.avro.AvroRuntimeException(e); } } }
@Override public ReduceAttemptFinished build() { try { ReduceAttemptFinished record = new ReduceAttemptFinished(); record.taskid = fieldSetFlags()[0] ? this.taskid : (java.lang.CharSequence) defaultValue(fields()[0]); record.attemptId = fieldSetFlags()[1] ? this.attemptId : (java.lang.CharSequence) defaultValue(fields()[1]); record.taskType = fieldSetFlags()[2] ? this.taskType : (java.lang.CharSequence) defaultValue(fields()[2]); record.taskStatus = fieldSetFlags()[3] ? this.taskStatus : (java.lang.CharSequence) defaultValue(fields()[3]); record.shuffleFinishTime = fieldSetFlags()[4] ? this.shuffleFinishTime : (java.lang.Long) defaultValue(fields()[4]); record.sortFinishTime = fieldSetFlags()[5] ? this.sortFinishTime : (java.lang.Long) defaultValue(fields()[5]); record.finishTime = fieldSetFlags()[6] ? this.finishTime : (java.lang.Long) defaultValue(fields()[6]); record.hostname = fieldSetFlags()[7] ? this.hostname : (java.lang.CharSequence) defaultValue(fields()[7]); record.port = fieldSetFlags()[8] ? this.port : (java.lang.Integer) defaultValue(fields()[8]); record.rackname = fieldSetFlags()[9] ? this.rackname : (java.lang.CharSequence) defaultValue(fields()[9]); record.state = fieldSetFlags()[10] ? this.state : (java.lang.CharSequence) defaultValue(fields()[10]); record.counters = fieldSetFlags()[11] ? this.counters : (org.apache.hadoop.mapreduce.jobhistory.JhCounters) defaultValue(fields()[11]); record.clockSplits = fieldSetFlags()[12] ? this.clockSplits : (java.util.List<java.lang.Integer>) defaultValue(fields()[12]); record.cpuUsages = fieldSetFlags()[13] ? this.cpuUsages : (java.util.List<java.lang.Integer>) defaultValue(fields()[13]); record.vMemKbytes = fieldSetFlags()[14] ? this.vMemKbytes : (java.util.List<java.lang.Integer>) defaultValue(fields()[14]); record.physMemKbytes = fieldSetFlags()[15] ? this.physMemKbytes : (java.util.List<java.lang.Integer>) defaultValue(fields()[15]); return record; } catch (Exception e) { throw new org.apache.avro.AvroRuntimeException(e); } } }
/** Creates a new ReduceAttemptFinished RecordBuilder by copying an existing Builder */ public static org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder newBuilder(org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder other) { return new org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder(other); }
/** Creates a new ReduceAttemptFinished RecordBuilder */ public static org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder newBuilder() { return new org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder(); }
/** Creates a new ReduceAttemptFinished RecordBuilder by copying an existing Builder */ public static org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder newBuilder(org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder other) { return new org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder(other); }
/** Creates a new ReduceAttemptFinished RecordBuilder by copying an existing ReduceAttemptFinished instance */ public static org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder newBuilder(org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished other) { return new org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder(other); }
/** Creates a new ReduceAttemptFinished RecordBuilder by copying an existing Builder */ public static org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder newBuilder(org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder other) { return new org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder(other); }
/** Creates a new ReduceAttemptFinished RecordBuilder */ public static org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder newBuilder() { return new org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder(); }
/** Creates a new ReduceAttemptFinished RecordBuilder by copying an existing ReduceAttemptFinished instance */ public static org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder newBuilder(org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished other) { return new org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder(other); }
/** Creates a new ReduceAttemptFinished RecordBuilder */ public static org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder newBuilder() { return new org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder(); }
/** Creates a new ReduceAttemptFinished RecordBuilder */ public static org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder newBuilder() { return new org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder(); }
/** Creates a new ReduceAttemptFinished RecordBuilder by copying an existing ReduceAttemptFinished instance */ public static org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder newBuilder(org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished other) { return new org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder(other); }
/** Sets the value of the 'physMemKbytes' field */ public org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder setPhysMemKbytes(java.util.List<java.lang.Integer> value) { validate(fields()[15], value); this.physMemKbytes = value; fieldSetFlags()[15] = true; return this; }
/** Sets the value of the 'vMemKbytes' field */ public org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinished.Builder setVMemKbytes(java.util.List<java.lang.Integer> value) { validate(fields()[14], value); this.vMemKbytes = value; fieldSetFlags()[14] = true; return this; }