/** * Resets the <code>count</code> field of this byte array output * stream to zero, so that all currently accumulated output in the * output stream is discarded. The output stream can be used again, * reusing the already capacity buffer space. * * @see java.io.ByteArrayInputStream#count */ public void reset() { data.reset(); }
public void reset() { for (BytesArray ba : pool) { ba.reset(); } inUse = 0; }
public void reset() { size = 0; maxEntries = 0; entries.clear(); data.reset(); }
@Override protected Object preProcess(Object object, BytesArray storage) { // serialize the json early on and copy it to storage Assert.notNull(object, "Empty/null JSON document given..."); BytesArray ba = null; if (ConfigurationOptions.ES_OPERATION_UPSERT.equals(settings.getOperation())) { ba = storage; } else { scratchPad.reset(); ba = scratchPad; } // write the doc to a temporary space jsonWriter.convert(object, ba); if (log.isTraceEnabled()) { log.trace(String.format("About to extract information from [%s]", ba)); } jsonExtractors.process(ba); return storage; } }
@Override protected OutputStream openWrite() throws IOException { this.exists = true; this.fileBytes.reset(); return new FastByteArrayOutputStream(fileBytes); }
@Before public void prepare() { ba.reset(); if (!jsonInput) { Map map = new LinkedHashMap(); map.put("n", 1); map.put("s", "v"); data = map; } else { data = "{\"n\":1,\"s\":\"v\"}"; } }
@Override public Writable serialize(Object data, ObjectInspector objInspector) throws SerDeException { lazyInitializeWrite(); // serialize the type directly to json (to avoid converting to Writable and then serializing) scratchPad.reset(); hiveType.setObjectInspector(objInspector); hiveType.setObject(data); // We use the command directly instead of the bulk entry writer since there is no close() method on SerDes. // See FileSinkOperator#process() for more info of how this is used with the output format. command.write(hiveType).copyTo(scratchPad); result.setContent(scratchPad); return result; }
@Override public BytesRef write(Object object) { ref.reset(); scratchPad.reset(); Object processed = preProcess(object, scratchPad); // write before object writeTemplate(beforeObject, processed); // write object doWriteObject(processed, scratchPad, valueWriter); ref.add(scratchPad); // writer after object writeTemplate(afterObject, processed); return ref; }
while (sq.hasNext()) { hasData = true; entry.reset(); Object[] kv = sq.next(); @SuppressWarnings("unchecked")
/** * Resets the <code>count</code> field of this byte array output * stream to zero, so that all currently accumulated output in the * output stream is discarded. The output stream can be used again, * reusing the already capacity buffer space. * * @see java.io.ByteArrayInputStream#count */ public void reset() { data.reset(); }
public void reset() { for (BytesArray ba : pool) { ba.reset(); } inUse = 0; }
/** * Resets the <code>count</code> field of this byte array output * stream to zero, so that all currently accumulated output in the * output stream is discarded. The output stream can be used again, * reusing the already capacity buffer space. * * @see java.io.ByteArrayInputStream#count */ public void reset() { data.reset(); }
/** * Resets the <code>count</code> field of this byte array output * stream to zero, so that all currently accumulated output in the * output stream is discarded. The output stream can be used again, * reusing the already capacity buffer space. * * @see java.io.ByteArrayInputStream#count */ public void reset() { data.reset(); }
public void reset() { for (BytesArray ba : pool) { ba.reset(); } inUse = 0; }
public void reset() { for (BytesArray ba : pool) { ba.reset(); } inUse = 0; }
public void reset() { size = 0; maxEntries = 0; entries.clear(); data.reset(); }
public void reset() { size = 0; maxEntries = 0; entries.clear(); data.reset(); }
public void reset() { size = 0; maxEntries = 0; entries.clear(); data.reset(); }
public void reset() { size = 0; maxEntries = 0; entries.clear(); data.reset(); }