recordSerializer.serializeRecord(delegate); BufferBuilder bufferBuilder = createBufferBuilder(bufferSize); recordSerializer.copyToBufferBuilder(bufferBuilder); bufferBuilder.finish();
@Override public void reset() { subSerializer.reset(); }
private void copyToTargetFile(int partition) throws IOException, InterruptedException { recordSerializer.reset(); BufferBuilder bufferBuilder = getCurrentBufferBuilder(partition); RecordSerializer.SerializationResult result = recordSerializer.copyToBufferBuilder(bufferBuilder); while (result.isFullBuffer()) { tryFinishCurrentBufferBuilder(partition); if (result.isFullRecord()) { break; } bufferBuilder = getCurrentBufferBuilder(partition); result = recordSerializer.copyToBufferBuilder(bufferBuilder); } checkState(!recordSerializer.hasSerializedData(), "All data should be written at once"); }
private void emit(T record, int[] targetChannels) throws IOException, InterruptedException { serializer.serializeRecord(record); boolean pruneAfterCopying = false; for (int channel : targetChannels) { if (copyFromSerializerToTargetChannel(channel)) { pruneAfterCopying = true; } } // Make sure we don't hold onto the large intermediate serialization buffer for too long if (pruneAfterCopying) { serializer.prune(); } }
if (subSerializationResult == SerializationResult.PARTIAL_RECORD_MEMORY_SEGMENT_FULL) { subSerializationResult = !needFlush ? subSerializer.copyToBufferBuilder(targetBuffer) : subSerializer.flushToBufferBuilder(targetBuffer); if (subSerializationResult == SerializationResult.FULL_RECORD_MEMORY_SEGMENT_FULL) { if (subSerializationBuffer == null && serializationBuffer.length() < currentInternalBufferLimit) { subSerializationDelegate.setBuffer(nextBufferForInternalSer); try { subSerializer.serializeRecord(subSerializationDelegate); } catch (IOException e) { subSerializer.reset(); subSerializationResult = subSerializer.copyToBufferBuilder(targetBuffer);
@Override public void add(T record, int[] targetPartitions) throws IOException, InterruptedException { serializationDelegate.setInstance(record); recordSerializer.serializeRecord(serializationDelegate); for (int partition : targetPartitions) { copyToTargetFile(partition); } }
private void copyToFile() throws IOException { recordSerializer.reset(); BufferBuilder bufferBuilder = getCurrentBufferBuilder(); RecordSerializer.SerializationResult result = recordSerializer.copyToBufferBuilder(bufferBuilder); while (result.isFullBuffer()) { tryFinishCurrentBufferBuilder(); if (result.isFullRecord()) { break; } bufferBuilder = getCurrentBufferBuilder(); result = recordSerializer.copyToBufferBuilder(bufferBuilder); } }
@Override public boolean hasSerializedData() { return serializationBuffer.length() > 0 || subSerializationBuffer != null || subSerializer.hasSerializedData(); } }
@Override public void prune() { serializationBuffer.pruneBuffer(); subSerializer.prune(); }
private void sendToTarget(T record, int targetChannel) throws IOException, InterruptedException { RecordSerializer<T> serializer = serializers[targetChannel]; synchronized (serializer) { SerializationResult result = serializer.addRecord(record); while (result.isFullBuffer()) { Buffer buffer = serializer.getCurrentBuffer(); if (buffer != null) { numBytesOut.inc(buffer.getSize()); writeAndClearBuffer(buffer, targetChannel, serializer); // If this was a full record, we are done. Not breaking // out of the loop at this point will lead to another // buffer request before breaking out (that would not be // a problem per se, but it can lead to stalls in the // pipeline). if (result.isFullRecord()) { break; } } else { buffer = targetPartition.getBufferProvider().requestBufferBlocking(); result = serializer.setNextBuffer(buffer); } } } }
public void clearBuffers() { for (RecordSerializer<?> serializer : serializers) { synchronized (serializer) { try { Buffer buffer = serializer.getCurrentBuffer(); if (buffer != null) { buffer.recycle(); } } finally { serializer.clear(); } } } }
serializer.reset(); SerializationResult result = serializer.copyToBufferBuilder(bufferBuilder); while (result.isFullBuffer()) { numBytesOut.inc(bufferBuilder.finish()); result = serializer.copyToBufferBuilder(bufferBuilder); checkState(!serializer.hasSerializedData(), "All data should be written at once");
private void emit(T record, int[] targetChannels) throws IOException, InterruptedException { serializer.serializeRecord(record); boolean pruneAfterCopying = false; for (int channel : targetChannels) { if (copyFromSerializerToTargetChannel(channel)) { pruneAfterCopying = true; } } // Make sure we don't hold onto the large intermediate serialization buffer for too long if (pruneAfterCopying) { serializer.prune(); } }
@Override public void writeRecord(T record) throws IOException { serializationDelegate.setInstance(record); recordSerializer.serializeRecord(serializationDelegate); copyToFile(); }
public void flush() throws IOException { if (recordSerializer.hasSerializedData()) { flushInternalSerializer(); } checkState(!recordSerializer.hasSerializedData(), "All data should be written at once"); tryFinishCurrentBufferBuilder(); }
public void flush() throws IOException { for (int targetChannel = 0; targetChannel < numChannels; targetChannel++) { RecordSerializer<T> serializer = serializers[targetChannel]; synchronized (serializer) { try { Buffer buffer = serializer.getCurrentBuffer(); if (buffer != null) { numBytesOut.inc(buffer.getSize()); targetPartition.writeBuffer(buffer, targetChannel); } } finally { serializer.clear(); } } } }
private boolean copyFromSerializerToTargetChannel(int targetChannel, boolean isBroadcast, boolean flushAlways) throws IOException, InterruptedException { // We should reset the initial position of the intermediate serialization data buffer before // copying, so the serialization results can be copied to many different target buffers. serializer.reset(); boolean pruneTriggered = false; BufferBuilder bufferBuilder = getBufferBuilder(targetChannel, isBroadcast); RecordSerializer.SerializationResult result = serializer.copyToBufferBuilder(bufferBuilder); while (result.isFullBuffer()) { updateMetrics(bufferBuilder, isBroadcast); // If this was a full record, we are done. Not breaking // out of the loop at this point will lead to another // buffer request before breaking out (that would not be // a problem per se, but it can lead to stalls in the pipeline). if (result.isFullRecord()) { pruneTriggered = true; break; } bufferBuilder = requestNewBufferBuilder(targetChannel, isBroadcast); result = serializer.copyToBufferBuilder(bufferBuilder); } checkState(!serializer.hasSerializedData(), "All data should be written at once"); if (flushAlways) { if (isBroadcast) { flushAll(); } else { flush(targetChannel); } } return pruneTriggered; }
/** * This is used to send LatencyMarks to a random target channel. */ public void randomEmit(T record) throws IOException, InterruptedException { serializer.serializeRecord(record); if (copyFromSerializerToTargetChannel(rng.nextInt(numChannels))) { serializer.prune(); } }
@Override public void add(T record, int targetPartition) throws IOException, InterruptedException { serializationDelegate.setInstance(record); recordSerializer.serializeRecord(serializationDelegate); copyToTargetFile(targetPartition); }
@Override public void finishWriting() throws IOException { if (isWritingFinished) { return; } if (recordSerializer.hasSerializedData()) { flushInternalSerializer(); } checkState(!recordSerializer.hasSerializedData(), "All data should be written at once"); tryFinishCurrentBufferBuilder(); streamFileWriter.close(); isWritingFinished = true; }