protected static void writeServerMember(DistributedMember member, DataOutputStream dos) throws IOException { Version v = Version.CURRENT_GFE; if (dos instanceof VersionedDataStream) { v = ((VersionedDataStream) dos).getVersion(); } HeapDataOutputStream hdos = new HeapDataOutputStream(v); DataSerializer.writeObject(member, hdos); DataSerializer.writeByteArray(hdos.toByteArray(), dos); hdos.close(); }
/** * Writes the given object to this stream as a byte array. * The byte array is produced by serializing v. The serialization * is done by calling DataSerializer.writeObject. */ public void writeAsSerializedByteArray(Object v) throws IOException { if (this.ignoreWrites) return; checkIfWritable(); ensureCapacity(5); if (v instanceof HeapDataOutputStream) { HeapDataOutputStream other = (HeapDataOutputStream)v; InternalDataSerializer.writeArrayLength(other.size(), this); other.sendTo((OutputStream)this); other.rewind(); } else { ByteBuffer sizeBuf = this.buffer; int sizePos = sizeBuf.position(); sizeBuf.position(sizePos+5); final int preArraySize = size(); DataSerializer.writeObject(v, this); int arraySize = size() - preArraySize; sizeBuf.put(sizePos, InternalDataSerializer.INT_ARRAY_LEN); sizeBuf.putInt(sizePos+1, arraySize); } }
public void testWriteByteBuffer() { HeapDataOutputStream out = new HeapDataOutputStream(32, Version.CURRENT); byte[] bytes = "1234567890qwertyuiopasdfghjklzxcvbnm,./;'".getBytes(); out.write(ByteBuffer.wrap(bytes, 0, 2)); out.write(ByteBuffer.wrap(bytes, 2, bytes.length - 2)); byte[] actual = out.toByteArray(); assertEquals(new String(bytes) , new String(actual)); }
public final void sendTo(SocketChannel chan, ByteBuffer out) throws IOException { finishWriting(); if (size() == 0) { return; } if (this.chunks != null) { for (ByteBuffer bb: this.chunks) { sendChunkTo(bb, chan, out); } } sendChunkTo(this.buffer, chan, out); }
/** * Write the contents of this stream to the specified stream using * outBuf if a buffer is needed. */ public final void sendTo(OutputStream out, ByteBuffer outBuf) throws IOException { finishWriting(); if (this.chunks != null) { for (ByteBuffer bb: this.chunks) { sendTo(out, outBuf, bb); } } sendTo(out, outBuf, this.buffer); flushStream(out, outBuf); }
private byte[] encryptId(long id, ServerConnection servConn) throws Exception { // deserialize this using handshake keys HeapDataOutputStream hdos = null; try { hdos = new HeapDataOutputStream(Version.CURRENT_GFE); hdos.writeLong(id); return ((HandShake)this.handshake).encryptBytes(hdos.toByteArray()); } finally { hdos.close(); } }
public byte[] toBytes() throws IOException { HeapDataOutputStream hdos = new HeapDataOutputStream(16, Version.CURRENT); sendTo(hdos); return hdos.toByteArray(); }
private void writeCanonicalId(int id, Object object) { try { HeapDataOutputStream hdos = new HeapDataOutputStream(32, Version.CURRENT); hdos.write(IFREC_ADD_CANONICAL_MEMBER_ID); hdos.writeInt(id); DataSerializer.writeObject(object, hdos); hdos.write(END_OF_RECORD_ID); writeIFRecord(hdos, true); } catch (IOException ex) { DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent); if (!this.compactInProgress) { this.parent.handleDiskAccessException(dae, true); } throw dae; } }
private static OffHeapByteSource prepareValueForDelta(Delta delta, MemoryAllocator ma) { HeapDataOutputStream hdos = new HeapDataOutputStream(); final boolean isListOfDeltas; try { if (delta instanceof ListOfDeltas) { List<Delta> deltas = ((ListOfDeltas) delta).getDeltas(); isListOfDeltas = true; // TODO:Asif : use compact int hdos.writeInt(deltas.size()); for (Delta aDelta : deltas) { SerializableDelta sd = (SerializableDelta) aDelta; InternalDataSerializer.invokeToData(sd, hdos); } } else { isListOfDeltas = false; InternalDataSerializer.invokeToData((SerializableDelta)delta, hdos); } } catch (IOException ioe) { throw new GemFireXDRuntimeException(ioe); } byte[] data = hdos.toByteArray(); OffHeapByteSource chunk = (OffHeapByteSource) ma.allocate(data.length, isListOfDeltas ? OffHeapDeltas.TYPE : OffHeapDelta.TYPE); chunk.writeBytes(0, data); return chunk; }
@Override public int writeBytes(byte[] outBytes, int offset, DataTypeDescriptor dtd) { if (SanityManager.DEBUG) { SanityManager.ASSERT(!isNull(), "writeBytes() is not supposed to be called for null values."); } HeapDataOutputStream hdos = new HeapDataOutputStream(outBytes, offset, outBytes.length - offset); hdos.disallowExpansion(DISALLOW_EXPANSION); try { toDataForOptimizedResultHolder(hdos); } catch (IOException ioe) { throw GemFireXDRuntimeException.newRuntimeException( "Exception in UserType.writeBytes.", ioe); } return hdos.size(); }
private void doExtensible(String msg) throws IOException, ClassNotFoundException { BigInteger bi = new BigInteger("12345678901234567890"); BigDecimal bd = new BigDecimal("1234567890.1234567890"); BigHolder bih = new BigHolder(bi, bd); HeapDataOutputStream out = new HeapDataOutputStream(Version.CURRENT); DataSerializer.writeObject(bih, out); System.out.println(msg + " out.size="+ out.size()); Object dObj = DataSerializer.readObject(new DataInputStream( new ByteArrayInputStream(out.toByteArray()))); assertEquals(bih, dObj); }
private void writeRegionConfig(DiskRegionView drv) { try { int len = estimateByteSize(drv.getPartitionName()); HeapDataOutputStream bb = new HeapDataOutputStream(1+DR_ID_MAX_BYTES +1+1+4+4+4+4+1+1+1+1+8+4+len+4+1+6+1+1, Version.CURRENT); bb.write(IFREC_REGION_CONFIG_ID_75); writeDiskRegionID(bb, drv.getId()); bb.write(drv.getLruAlgorithm()); bb.write(drv.getLruAction()); bb.writeInt(drv.getLruLimit()); bb.writeInt(drv.getConcurrencyLevel()); bb.writeInt(drv.getInitialCapacity()); bb.writeFloat(drv.getLoadFactor()); bb.write((byte)(drv.getStatisticsEnabled()?1:0)); bb.write((byte)(drv.isBucket()?1:0)); final EnumSet<DiskRegionFlag> flags = drv.getFlags(); bb.writeBoolean(flags.contains(DiskRegionFlag.HAS_REDUNDANT_COPY)); bb.writeBoolean(flags.contains(DiskRegionFlag.DEFER_RECOVERY)); bb.writeLong(drv.getUUID()); bb.writeUTF(drv.getPartitionName()); bb.writeInt(drv.getStartingBucketId()); bb.writeBoolean(false); // griddb flag, preserve for backwards compatibility bb.writeUTF(drv.getCompressorClassName() == null ? "" : drv.getCompressorClassName()); bb.writeBoolean(drv.getEnableOffHeapMemory()); bb.writeBoolean(flags.contains(DiskRegionFlag.IS_WITH_VERSIONING)); bb.write(END_OF_RECORD_ID); writeIFRecord(bb, false); // don't do stats for these small records } catch (IOException ex) {
boolean receiverCacheClosed = false; this.outStream = new HeapDataOutputStream(chunkSize, getSender().getVersionObject()); outStream.disallowExpansion(CHUNK_FULL); // sets the mark where rollback occurs on CHUNK_FULL break; this.outStream.reset(); // ready for reuse, assumes sendReply
private void writePRCreate(String name, PRPersistentConfig config) { try { int nameLength = estimateByteSize(name); String colocatedWith = config.getColocatedWith(); colocatedWith = colocatedWith == null ? "" : colocatedWith; int colocatedLength = estimateByteSize(colocatedWith); HeapDataOutputStream hdos = new HeapDataOutputStream(1+nameLength+4+colocatedLength+1, Version.CURRENT); hdos.write(IFREC_PR_CREATE); hdos.writeUTF(name); hdos.writeInt(config.getTotalNumBuckets()); hdos.writeUTF(colocatedWith); hdos.write(END_OF_RECORD_ID); writeIFRecord(hdos, false); } catch (IOException ex) { DiskAccessException dae = new DiskAccessException(LocalizedStrings.DiskInitFile_FAILED_INIT_FILE_WRITE_BECAUSE_0.toLocalizedString(ex), this.parent); if (!this.compactInProgress) { this.parent.handleDiskAccessException(dae, true); } throw dae; } }
HeapDataOutputStream out = new HeapDataOutputStream(); while (bufferIter.hasNext()) { if (abortFlush) { byte[] valueBytes = out.toByteArray(); writer.append(item.getRawKey(), valueBytes); out.clearForReuse();
/** * A blob is a serialized Object. This method serializes the object into a * blob and returns the byte array that contains the blob. */ public static byte[] serializeToBlob(Object obj, HeapDataOutputStream hdos) throws IOException { final long start = startSerialization(); hdos.clearForReuse(); DataSerializer.writeObject(obj, hdos); byte[] result = hdos.toByteArray(); endSerialization(start, result.length); return result; }
/** * @exception StandardException thrown on failure to convert */ public byte[] getBytes() throws StandardException { // GemStone changes BEGIN if (!isNull()) { HeapDataOutputStream hdos = new HeapDataOutputStream(); try { toDataForOptimizedResultHolder(hdos); } catch (IOException ioe) { throw GemFireXDRuntimeException.newRuntimeException( "Exception in UserType.getBytes.", ioe); } byte[] buffer = new byte[hdos.size()]; hdos.sendTo(buffer, 0); return buffer; } return null; /* (original code) if (! isNull()) if (value instanceof byte[]) return ((byte[])value); return super.getBytes(); */ // GemStone changes END }
public void write(byte[] source, int offset, int len) { this.hdos.write(source, offset, len); } }
@Override public void toData(DataOutput out) throws IOException { boolean isStruct = this.collectionType.getElementType().isStructType(); DataSerializer.writeObject(this.collectionType.getElementType(), out); HeapDataOutputStream hdos = new HeapDataOutputStream(1024, null); LongUpdater lu = hdos.reserveLong(); Iterator<E> iter = this.iterator(); int numElements = 0; while (iter.hasNext()) { E data = iter.next(); if (isStruct) { Object[] fields = ((Struct) data).getFieldValues(); DataSerializer.writeObjectArray(fields, out); } else { DataSerializer.writeObject(data, hdos); } ++numElements; } lu.update(numElements); hdos.sendTo(out); }
public static int calculateBytesForTSandDSID(int dsid) { HeapDataOutputStream out = new HeapDataOutputStream(4 + 8, Version.CURRENT); long now = System.currentTimeMillis(); try { writeUnsignedVL(now, out); writeUnsignedVL(InternalDataSerializer.encodeZigZag64(dsid), out); } catch (IOException e) { return 0; } return out.size(); }