@Override public void writeIndexFile(Path dir, GlobalDictMetadata metadata) throws IOException { Path indexFile = new Path(dir, V1_INDEX_NAME); try (FSDataOutputStream out = fs.create(indexFile, true)) { out.writeInt(metadata.baseId); out.writeInt(metadata.maxId); out.writeInt(metadata.maxValueLength); out.writeInt(metadata.nValues); out.writeUTF(metadata.bytesConverter.getClass().getName()); out.writeInt(metadata.sliceFileMap.size()); for (Map.Entry<AppendDictSliceKey, String> entry : metadata.sliceFileMap.entrySet()) { entry.getKey().write(out); } } }
@Override public void writeIndexFile(Path dir, GlobalDictMetadata metadata) throws IOException { Path indexFile = new Path(dir, V2_INDEX_NAME); try (FSDataOutputStream out = fs.create(indexFile, true)) { out.writeByte(MINOR_VERSION_V1); out.writeInt(metadata.baseId); out.writeInt(metadata.maxId); out.writeInt(metadata.maxValueLength); out.writeInt(metadata.nValues); out.writeUTF(metadata.bytesConverter.getClass().getName()); out.writeInt(metadata.sliceFileMap.size()); for (Map.Entry<AppendDictSliceKey, String> entry : metadata.sliceFileMap.entrySet()) { entry.getKey().write(out); out.writeUTF(entry.getValue()); } } }
public synchronized void appendRaw(byte[] keyData, int keyOffset, int keyLength, ValueBytes val) throws IOException { if (keyLength < 0) throw new IOException("negative length keys not allowed: " + keyLength); int valLength = val.getSize(); checkAndWriteSync(); out.writeInt(keyLength+valLength); // total record length out.writeInt(keyLength); // key portion length out.write(keyData, keyOffset, keyLength); // key val.writeUncompressedBytes(out); // value }
/** Append a key/value pair. */ @Override public synchronized void appendRaw(byte[] keyData, int keyOffset, int keyLength, ValueBytes val) throws IOException { if (keyLength < 0) throw new IOException("negative length keys not allowed: " + keyLength); int valLength = val.getSize(); checkAndWriteSync(); // sync out.writeInt(keyLength+valLength); // total record length out.writeInt(keyLength); // key portion length out.write(keyData, keyOffset, keyLength); // 'key' data val.writeCompressedBytes(out); // 'value' data }
/** create a sync point. */ public void sync() throws IOException { if (sync != null && lastSyncPos != out.getPos()) { out.writeInt(SYNC_ESCAPE); // mark the start of the sync out.write(sync); // write sync lastSyncPos = out.getPos(); // update lastSyncPos } }
/** create a sync point */ public void sync() throws IOException { if (sync != null && lastSyncPos != out.getPos()) { out.writeInt(SYNC_ESCAPE); // mark the start of the sync out.write(sync); // write sync lastSyncPos = out.getPos(); // update lastSyncPos } }
/** create a sync point. */ public void sync() throws IOException { if (sync != null && lastSyncPos != out.getPos()) { out.writeInt(SYNC_ESCAPE); // mark the start of the sync out.write(sync); // write sync lastSyncPos = out.getPos(); // update lastSyncPos } }
@Override protected long writeWALTrailerAndMagic(WALTrailer trailer, byte[] magic) throws IOException { trailer.writeTo(output); output.writeInt(trailer.getSerializedSize()); output.write(magic); return output.getPos(); } }
public static void writeMmCommitManifest(List<Path> commitPaths, Path specPath, FileSystem fs, String taskId, Long writeId, int stmtId, String unionSuffix, boolean isInsertOverwrite) throws HiveException { if (commitPaths.isEmpty()) { return; } // We assume one FSOP per task (per specPath), so we create it in specPath. Path manifestPath = getManifestDir(specPath, writeId, stmtId, unionSuffix, isInsertOverwrite); manifestPath = new Path(manifestPath, taskId + MANIFEST_EXTENSION); Utilities.FILE_OP_LOGGER.info("Writing manifest to {} with {}", manifestPath, commitPaths); try { // Don't overwrite the manifest... should fail if we have collisions. try (FSDataOutputStream out = fs.create(manifestPath, false)) { if (out == null) { throw new HiveException("Failed to create manifest at " + manifestPath); } out.writeInt(commitPaths.size()); for (Path path : commitPaths) { out.writeUTF(path.toString()); } } } catch (IOException e) { throw new HiveException(e); } }
/** Append a key/value pair. */ @Override @SuppressWarnings("unchecked") public synchronized void append(Object key, Object val) throws IOException { if (key.getClass() != keyClass) throw new IOException("wrong key class: "+key.getClass().getName() +" is not "+keyClass); if (val.getClass() != valClass) throw new IOException("wrong value class: "+val.getClass().getName() +" is not "+valClass); buffer.reset(); // Append the 'key' keySerializer.serialize(key); int keyLength = buffer.getLength(); if (keyLength < 0) throw new IOException("negative length keys not allowed: " + key); // Compress 'value' and append it deflateFilter.resetState(); compressedValSerializer.serialize(val); deflateOut.flush(); deflateFilter.finish(); // Write the record out checkAndWriteSync(); // sync out.writeInt(buffer.getLength()); // total record length out.writeInt(keyLength); // key portion length out.write(buffer.getData(), 0, buffer.getLength()); // data }
private void writeKey(KeyBuffer keyBuffer, int recordLen, int keyLength) throws IOException { checkAndWriteSync(); // sync out.writeInt(recordLen); // total record length out.writeInt(keyLength); // key portion length if(this.isCompressed()) { Compressor compressor = CodecPool.getCompressor(codec); NonSyncDataOutputBuffer compressionBuffer = new NonSyncDataOutputBuffer(); CompressionOutputStream deflateFilter = codec.createOutputStream(compressionBuffer, compressor); DataOutputStream deflateOut = new DataOutputStream(deflateFilter); //compress key and write key out compressionBuffer.reset(); deflateFilter.resetState(); keyBuffer.write(deflateOut); deflateOut.flush(); deflateFilter.finish(); int compressedKeyLen = compressionBuffer.getLength(); out.writeInt(compressedKeyLen); out.write(compressionBuffer.getData(), 0, compressedKeyLen); CodecPool.returnCompressor(compressor); } else { out.writeInt(keyLength); keyBuffer.write(out); } }
/** @throws Exception If failed. */ @Test public void testDeleteSuccessfulIfPathIsOpenedToRead() throws Exception { Path fsHome = new Path(primaryFsUri); final Path file = new Path(fsHome, "myFile"); FSDataOutputStream os = fs.create(file, false, 128); final int cnt = 5 * FileSystemConfiguration.DFLT_BLOCK_SIZE; // Write 5 blocks. for (int i = 0; i < cnt; i++) os.writeInt(i); os.close(); final FSDataInputStream is = fs.open(file, -1); for (int i = 0; i < cnt / 2; i++) assertEquals(i, is.readInt()); assert fs.delete(file, false); assert !fs.exists(file); is.close(); }
public ChecksumFSOutputSummer(ChecksumFileSystem fs, Path file, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress, FsPermission permission) throws IOException { super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, fs.getBytesPerSum())); int bytesPerSum = fs.getBytesPerSum(); this.datas = fs.getRawFileSystem().create(file, permission, overwrite, bufferSize, replication, blockSize, progress); int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize); this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), permission, true, sumBufferSize, replication, blockSize, null); sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length); sums.writeInt(bytesPerSum); }
private void writeKey(KeyBuffer keyBuffer, int recordLen, int keyLength) throws IOException { checkAndWriteSync(); // sync out.writeInt(recordLen); // total record length out.writeInt(keyLength); // key portion length if(this.isCompressed()) { Compressor compressor = CodecPool.getCompressor(codec); NonSyncDataOutputBuffer compressionBuffer = new NonSyncDataOutputBuffer(); CompressionOutputStream deflateFilter = codec.createOutputStream(compressionBuffer, compressor); DataOutputStream deflateOut = new DataOutputStream(deflateFilter); //compress key and write key out compressionBuffer.reset(); deflateFilter.resetState(); keyBuffer.write(deflateOut); deflateOut.flush(); deflateFilter.finish(); int compressedKeyLen = compressionBuffer.getLength(); out.writeInt(compressedKeyLen); out.write(compressionBuffer.getData(), 0, compressedKeyLen); CodecPool.returnCompressor(compressor); } else { out.writeInt(keyLength); keyBuffer.write(out); } }
/** @throws Exception If failed. */ @Test public void testRenameIfSrcPathIsAlreadyBeingOpenedToRead() throws Exception { Path fsHome = new Path(primaryFsUri); Path srcFile = new Path(fsHome, "srcFile"); Path dstFile = new Path(fsHome, "dstFile"); FSDataOutputStream os = fs.create(srcFile); int cnt = 1024; for (int i = 0; i < cnt; i++) os.writeInt(i); os.close(); FSDataInputStream is = fs.open(srcFile); for (int i = 0; i < cnt; i++) { if (i == 100) // Rename file during the read process. assertTrue(fs.rename(srcFile, dstFile)); assertEquals(i, is.readInt()); } assertPathDoesNotExist(fs, srcFile); assertPathExists(fs, dstFile); os.close(); is.close(); }
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, final EnumSet<CreateFlag> createFlag, final FsPermission absolutePermission, final int bufferSize, final short replication, final long blockSize, final Progressable progress, final ChecksumOpt checksumOpt, final boolean createParent) throws IOException { super(DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, fs.getBytesPerSum())); // checksumOpt is passed down to the raw fs. Unless it implements // checksum impelemts internally, checksumOpt will be ignored. // If the raw fs does checksum internally, we will end up with // two layers of checksumming. i.e. checksumming checksum file. this.datas = fs.getRawFs().createInternal(file, createFlag, absolutePermission, bufferSize, replication, blockSize, progress, checksumOpt, createParent); // Now create the chekcsumfile; adjust the buffsize int bytesPerSum = fs.getBytesPerSum(); int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize, file); this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), absolutePermission, sumBufferSize, replication, blockSize, progress, checksumOpt, createParent); sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length); sums.writeInt(bytesPerSum); }
@Test public void testSkipRecoveredEditsReplayAllIgnored() throws Exception { byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, CONF, family); Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir); for (int i = 1000; i < 1050; i += 10) { Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i)); FSDataOutputStream dos = fs.create(recoveredEdits); dos.writeInt(i); dos.close(); } long minSeqId = 2000; Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", minSeqId - 1)); FSDataOutputStream dos = fs.create(recoveredEdits); dos.close(); Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (HStore store : region.getStores()) { maxSeqIdInStores.put(Bytes.toBytes(store.getColumnFamilyName()), minSeqId); } long seqId = region.replayRecoveredEditsIfAny(maxSeqIdInStores, null, null); assertEquals(minSeqId, seqId); }
/** @throws Exception If failed. */ @Test public void testRenameIfSrcPathIsAlreadyBeingOpenedToRead() throws Exception { Path fsHome = new Path(primaryFsUri); Path srcFile = new Path(fsHome, "srcFile"); Path dstFile = new Path(fsHome, "dstFile"); FSDataOutputStream os = fs.create(srcFile, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); int cnt = 1024; for (int i = 0; i < cnt; i++) os.writeInt(i); os.close(); FSDataInputStream is = fs.open(srcFile); for (int i = 0; i < cnt; i++) { if (i == 100) // Rename file during the read process. fs.rename(srcFile, dstFile); assertEquals(i, is.readInt()); } assertPathDoesNotExist(fs, srcFile); assertPathExists(fs, dstFile); os.close(); is.close(); }
/** @throws Exception If failed. */ @Test public void testDeleteSuccessfulIfPathIsOpenedToRead() throws Exception { Path fsHome = new Path(primaryFsUri); final Path file = new Path(fsHome, "myFile"); FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); final int cnt = 5 * FileSystemConfiguration.DFLT_BLOCK_SIZE; // Write 5 blocks. for (int i = 0; i < cnt; i++) os.writeInt(i); os.close(); final FSDataInputStream is = fs.open(file, -1); for (int i = 0; i < cnt / 2; i++) assertEquals(i, is.readInt()); assert fs.delete(file, false); GridTestUtils.assertThrows(log, new Callable<Object>() { @Override public Object call() throws Exception { fs.getFileStatus(file); return null; } }, FileNotFoundException.class, null); is.close(); }