/** * * {@inheritDoc} * @see parquet.column.primitive.PrimitiveColumnWriter#getBufferedSize() */ @Override public long getBufferedSize() { return out.size(); }
@Override public long getBufferedSize() { return arrayOut.size(); }
@Override public long size() { return arrayOut.size(); }
/** * {@inheritDoc} * @see parquet.column.values.ValuesWriter#getBufferedSize() */ @Override public long getBufferedSize() { return out.size(); }
public int getMemSize() { // baos = 8 bytes // currentByte + currentBytePosition = 8 bytes // the size of baos: // count : 4 bytes (rounded to 8) // buf : 12 bytes (8 ptr + 4 length) should technically be rounded to 8 depending on buffer size return 32 + baos.size(); }
public int getMemSize() { // baos = 8 bytes // currentByte + currentBytePosition = 8 bytes // the size of baos: // count : 4 bytes (rounded to 8) // buf : 12 bytes (8 ptr + 4 length) should technically be rounded to 8 depending on buffer size return 32 + (int)baos.size(); }
@Override public long size() { return arrayOut.size(); }
@Override public long getMemSize() { return buf.size(); }
@Override public long getBufferedSize() { return arrayOut.size(); }
@Override public long getBufferedSize() { return out.size(); }
@Override public long getBufferedSize() { return baos.size(); }
@Override public long getBufferedSize() { return arrayOut.size(); }
public long getBufferedSize() { return baos.size(); }
@Override public long getBufferedSize() { return lengthWriter.getBufferedSize() + arrayOut.size(); }
@Override public BytesInput getBytes() { try { bitPackingWriter.finish(); } catch (IOException e) { throw new ParquetEncodingException("could not write page", e); } if (Log.DEBUG) LOG.debug("writing a buffer of size " + out.size()); return BytesInput.from(out); }
@Override public BytesInput getBytes() { try { out.flush(); } catch (IOException e) { throw new ParquetEncodingException("could not write page", e); } if (Log.DEBUG) LOG.debug("writing a buffer of size " + arrayOut.size()); return BytesInput.from(arrayOut); }
@Override public BytesInput getBytes() { try { out.flush(); } catch (IOException e) { throw new ParquetEncodingException("could not write page", e); } if (Log.DEBUG) LOG.debug("writing a buffer of size " + arrayOut.size()); return BytesInput.from(arrayOut); }
@Override public BytesInput getBytes() { try { out.flush(); } catch (IOException e) { throw new ParquetEncodingException("could not write page", e); } if (Log.DEBUG) LOG.debug("writing a buffer of size " + arrayOut.size()); return BytesInput.from(arrayOut); }
public void writeToFileWriter(ParquetFileWriter writer) throws IOException { writer.startColumn(path, totalValueCount, compressor.getCodecName()); if (dictionaryPage != null) { writer.writeDictionaryPage(dictionaryPage); encodings.add(dictionaryPage.getEncoding()); } writer.writeDataPages(BytesInput.from(buf), uncompressedLength, compressedLength, totalStatistics, new ArrayList<Encoding>(encodings)); writer.endColumn(); if (INFO) { LOG.info( String.format( "written %,dB for %s: %,d values, %,dB raw, %,dB comp, %d pages, encodings: %s", buf.size(), path, totalValueCount, uncompressedLength, compressedLength, pageCount, encodings) + (dictionaryPage != null ? String.format( ", dic { %,d entries, %,dB raw, %,dB comp}", dictionaryPage.getDictionarySize(), dictionaryPage.getUncompressedSize(), dictionaryPage.getDictionarySize()) : "")); } encodings.clear(); pageCount = 0; }
@Override public BytesInput getBytes() { try { out.flush(); } catch (IOException e) { throw new ParquetEncodingException("could not write page", e); } if (Log.DEBUG) LOG.debug("writing a buffer of size " + arrayOut.size()); return BytesInput.concat(lengthWriter.getBytes(), BytesInput.from(arrayOut)); }