private static void initBuffers(Record[] buffers) { final int BUFLEN = 32; for (int idx = 0; idx < buffers.length; idx++) { buffers[idx] = new RecBuffer(); int buflen = rand.nextInt(BUFLEN); byte[] bytes = new byte[buflen]; rand.nextBytes(bytes); ((RecBuffer)buffers[idx]).setData(new Buffer(bytes)); } }
public Object clone() throws CloneNotSupportedException { RecBuffer _rio_other = new RecBuffer(); _rio_other.data = (org.apache.hadoop.record.Buffer) this.data.clone(); return _rio_other; } public int hashCode() {
public void deserialize(final org.apache.hadoop.record.RecordInput _rio_a, final String _rio_tag) throws java.io.IOException { if (null == _rio_rtiFilter) { deserializeWithoutFilter(_rio_a, _rio_tag); return; } // if we're here, we need to read based on version info _rio_a.startRecord(_rio_tag); setupRtiFields(); for (int _rio_i=0; _rio_i<_rio_rtiFilter.getFieldTypeInfos().size(); _rio_i++) { if (1 == _rio_rtiFilterFields[_rio_i]) { data=_rio_a.readBuffer("data"); } else { java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo> typeInfos = (java.util.ArrayList<org.apache.hadoop.record.meta.FieldTypeInfo>)(_rio_rtiFilter.getFieldTypeInfos()); org.apache.hadoop.record.meta.Utils.skip(_rio_a, typeInfos.get(_rio_i).getFieldID(), typeInfos.get(_rio_i).getTypeID()); } } _rio_a.endRecord(_rio_tag); } public int compareTo (final Object _rio_peer_) throws ClassCastException {
byte[] data = new byte[random.nextInt(10)]; random.nextBytes(data); RecBuffer value = new RecBuffer(); value.setData(new Buffer(data)); writer.append(key, value); new SequenceFileInputFormat<RecInt, RecBuffer>(); RecInt key = new RecInt(); RecBuffer value = new RecBuffer(); for (int i = 0; i < 3; i++) { int numSplits =