@Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { FileStatus status = getFileStatus(f); if (status.isDirectory()) { throw new IOException("Cannot append to a diretory (=" + f + " )"); } return new FSDataOutputStream(new BufferedOutputStream( createOutputStreamWithMode(f, true, null), bufferSize), statistics, status.getLen()); }
/** * @param file File. * @param append Append flag. * @return Output stream. * @throws IOException If failed. */ private FSDataOutputStream out(File file, boolean append, int bufSize) throws IOException { return new FSDataOutputStream(new BufferedOutputStream(new FileOutputStream(file, append), bufSize < 32 * 1024 ? 32 * 1024 : bufSize), new Statistics(getUri().getScheme())); }
@Override public FSDataOutputStream create(Path path, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { if ((!overwrite) && exists(path)) { throw new IOException("File already exists:" + path); } if (!stagingDirectory.exists()) { createDirectories(stagingDirectory.toPath()); } if (!stagingDirectory.isDirectory()) { throw new IOException("Configured staging path is not a directory: " + stagingDirectory); } File tempFile = createTempFile(stagingDirectory.toPath(), "presto-s3-", ".tmp").toFile(); String key = keyFromPath(qualifiedPath(path)); return new FSDataOutputStream( new PrestoS3OutputStream(s3, getBucketName(uri), key, tempFile, sseEnabled, sseType, sseKmsKeyId, multiPartUploadMinFileSize, multiPartUploadMinPartSize, s3AclType), statistics); }
@Override public FSDataOutputStream createNonRecursive(Path f, FsPermission permission, EnumSet<CreateFlag> flags, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { if (exists(f) && !flags.contains(CreateFlag.OVERWRITE)) { throw new FileAlreadyExistsException("File already exists: " + f); } return new FSDataOutputStream(new BufferedOutputStream( createOutputStreamWithMode(f, false, permission), bufferSize), statistics); }
@Override public FSDataOutputStream createInternal(Path f, EnumSet<CreateFlag> createFlag, FsPermission absolutePermission, int bufferSize, short replication, long blockSize, Progressable progress, ChecksumOpt checksumOpt, boolean createParent) throws IOException { final FSDataOutputStream out = new FSDataOutputStream( new ChecksumFSOutputSummer(this, f, createFlag, absolutePermission, bufferSize, replication, blockSize, progress, checksumOpt, createParent), null); return out; }
@Override public FSDataOutputStream create(Path f, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { return new FSDataOutputStream(new NflyOutputStream(f, permission, overwrite, bufferSize, replication, blockSize, progress), statistics); }
private FSDataOutputStream create(Path f, boolean overwrite, boolean createParent, int bufferSize, short replication, long blockSize, Progressable progress, FsPermission permission) throws IOException { if (exists(f) && !overwrite) { throw new FileAlreadyExistsException("File already exists: " + f); } Path parent = f.getParent(); if (parent != null && !mkdirs(parent)) { throw new IOException("Mkdirs failed to create " + parent.toString()); } return new FSDataOutputStream(new BufferedOutputStream( createOutputStreamWithMode(f, false, permission), bufferSize), statistics); }
/** {@inheritDoc} */ @Override public FSDataOutputStream append(Path f, int bufSize, Progressable progress) throws IOException { A.notNull(f, "f"); enterBusy(); try { IgfsPath path = convert(f); if (LOG.isDebugEnabled()) LOG.debug("Opening output stream in append [thread=" + Thread.currentThread().getName() + ", path=" + path + ", bufSize=" + bufSize + ']'); HadoopIgfsStreamDelegate stream = rmtClient.append(path, false, null); assert stream != null; long logId = -1; if (clientLog.isLogEnabled()) { logId = IgfsLogger.nextId(); clientLog.logAppend(logId, path, bufSize); } if (LOG.isDebugEnabled()) LOG.debug("Opened output stream in append [path=" + path + ", delegate=" + stream + ']'); HadoopIgfsOutputStream igfsOut = new HadoopIgfsOutputStream(stream, LOG, clientLog, logId); bufSize = Math.max(64 * 1024, bufSize); BufferedOutputStream out = new BufferedOutputStream(igfsOut, bufSize); return new FSDataOutputStream(out, null, 0); } finally { leaveBusy(); } }
out = new FSDataOutputStream( new ChecksumFSOutputSummer(this, f, overwrite, bufferSize, replication, blockSize, progress, permission), null);
FSDataOutputStream res = new FSDataOutputStream(out, null, 0);
@Override public FSDataOutputStream append(Path path, int bufferSize, Progressable progress) throws IOException { LOG.debug("append({}, {}, {})", path, bufferSize, progress); if (mStatistics != null) { mStatistics.incrementWriteOps(1); } AlluxioURI uri = new AlluxioURI(HadoopUtils.getPathWithoutScheme(path)); try { if (mFileSystem.exists(uri)) { throw new IOException(ExceptionMessage.FILE_ALREADY_EXISTS.getMessage(uri)); } return new FSDataOutputStream( mFileSystem.createFile(uri, CreateFilePOptions.newBuilder().setRecursive(true).build()), mStatistics); } catch (AlluxioException e) { throw new IOException(e); } }
@Test public void testMovePath() throws IOException, URISyntaxException { String expected = "test"; ByteArrayOutputStream actual = new ByteArrayOutputStream(); Path src = new Path("/src/file.txt"); Path dst = new Path("/dst/file.txt"); FileSystem fs1 = Mockito.mock(FileSystem.class); Mockito.when(fs1.exists(src)).thenReturn(true); Mockito.when(fs1.isFile(src)).thenReturn(true); Mockito.when(fs1.getUri()).thenReturn(new URI("fs1:////")); Mockito.when(fs1.getFileStatus(src)).thenReturn(new FileStatus(1, false, 1, 1, 1, src)); Mockito.when(fs1.open(src)) .thenReturn(new FSDataInputStream(new SeekableFSInputStream(new ByteArrayInputStream(expected.getBytes())))); Mockito.when(fs1.delete(src, true)).thenReturn(true); FileSystem fs2 = Mockito.mock(FileSystem.class); Mockito.when(fs2.exists(dst)).thenReturn(false); Mockito.when(fs2.getUri()).thenReturn(new URI("fs2:////")); Mockito.when(fs2.getConf()).thenReturn(new Configuration()); Mockito.when(fs2.create(dst, false)).thenReturn(new FSDataOutputStream(actual, null)); try (ParallelRunner parallelRunner = new ParallelRunner(1, fs1)) { parallelRunner.movePath(src, fs2, dst, Optional.<String>absent()); } Assert.assertEquals(actual.toString(), expected); }
FSDataOutputStream res = new FSDataOutputStream(out, null, 0);
@Override public SequenceFileWriter<K, V> create(FSDataOutputStream out) throws IOException { org.apache.hadoop.fs.FSDataOutputStream stream = new org.apache.hadoop.fs.FSDataOutputStream(out, null); CompressionCodec compressionCodec = getCompressionCodec(serializableHadoopConfig.get(), compressionCodecName); SequenceFile.Writer writer = SequenceFile.createWriter( serializableHadoopConfig.get(), SequenceFile.Writer.stream(stream), SequenceFile.Writer.keyClass(keyClass), SequenceFile.Writer.valueClass(valueClass), SequenceFile.Writer.compression(compressionType, compressionCodec)); return new SequenceFileWriter<>(writer); }
@Override public void process(InputStream in, OutputStream out) throws IOException { // Use a FilterableOutputStream to change 'InputStreamWritable' to 'BytesWritable' - see comment // above for an explanation of why we want to do this. final ByteFilteringOutputStream bwos = new ByteFilteringOutputStream(out); // TODO: Adding this filter could be dangerous... A Sequence File's header contains 3 bytes: "SEQ", // followed by 1 byte that is the Sequence File version, followed by 2 "entries." These "entries" // contain the size of the Key/Value type and the Key/Value type. So, we will be writing the // value type as InputStreamWritable -- which we need to change to BytesWritable. This means that // we must also change the "size" that is written, but replacing this single byte could be // dangerous. However, we know exactly what will be written to the header, and we limit this at one // replacement, so we should be just fine. bwos.addFilter(toReplace, replaceWith, 1); bwos.addFilter((byte) InputStreamWritable.class.getCanonicalName().length(), (byte) BytesWritable.class.getCanonicalName().length(), 1); try (final FSDataOutputStream fsDataOutputStream = new FSDataOutputStream(bwos, new Statistics("")); final SequenceFile.Writer writer = SequenceFile.createWriter(configuration, SequenceFile.Writer.stream(fsDataOutputStream), SequenceFile.Writer.keyClass(Text.class), SequenceFile.Writer.valueClass(InputStreamWritable.class), SequenceFile.Writer.compression(compressionType, compressionCodec))) { processInputStream(in, flowFile, writer); } finally { watch.stop(); } } });
@Test public void checkStreamCapabilitiesOnKnownNoopStream() throws IOException { FSDataOutputStream stream = new FSDataOutputStream(new ByteArrayOutputStream(), null); assertNotEquals("We expect our dummy FSDOS to claim capabilities iff the StreamCapabilities " + "class is not defined.", STREAM_CAPABILITIES_IS_PRESENT, CommonFSUtils.hasCapability(stream, "hsync")); assertNotEquals("We expect our dummy FSDOS to claim capabilities iff the StreamCapabilities " + "class is not defined.", STREAM_CAPABILITIES_IS_PRESENT, CommonFSUtils.hasCapability(stream, "hflush")); assertNotEquals("We expect our dummy FSDOS to claim capabilities iff the StreamCapabilities " + "class is not defined.", STREAM_CAPABILITIES_IS_PRESENT, CommonFSUtils.hasCapability(stream, "a capability that hopefully no filesystem will " + "implement.")); } }
return new FSDataOutputStream(outStream, mStatistics);
private String createHFileForFamilies(byte[] family) throws IOException { HFile.WriterFactory hFileFactory = HFile.getWriterFactoryNoCache(conf); // TODO We need a way to do this without creating files File hFileLocation = testFolder.newFile(); FSDataOutputStream out = new FSDataOutputStream(new FileOutputStream(hFileLocation), null); try { hFileFactory.withOutputStream(out); hFileFactory.withFileContext(new HFileContext()); HFile.Writer writer = hFileFactory.create(); try { writer.append(new KeyValue(CellUtil.createCell(randomBytes, family, randomBytes, 0L, KeyValue.Type.Put.getCode(), randomBytes))); } finally { writer.close(); } } finally { out.close(); } return hFileLocation.getAbsoluteFile().getAbsolutePath(); }
private Path createOutputFile() throws IOException { Path path = new Path(this.fileNameFormat.getPath(), this.fileNameFormat.getName(this.rotation, System.currentTimeMillis())); if(fs.getScheme().equals("file")) { //in the situation where we're running this in a local filesystem, flushing doesn't work. fs.mkdirs(path.getParent()); this.out = new FSDataOutputStream(new FileOutputStream(path.toString()), null); } else { this.out = this.fs.create(path); } return path; }
private byte[] writeMapOutput(Configuration conf, Map<String, String> keysToValues) throws IOException { ByteArrayOutputStream baos = new ByteArrayOutputStream(); FSDataOutputStream fsdos = new FSDataOutputStream(baos, null); IFile.Writer<Text, Text> writer = new IFile.Writer<Text, Text>(conf, fsdos, Text.class, Text.class, null, null); for (String key : keysToValues.keySet()) { String value = keysToValues.get(key); writer.append(new Text(key), new Text(value)); } writer.close(); return baos.toByteArray(); }