/** Release lock by deleting file * @throws IOException if lock file could not be deleted */ public void release() throws IOException { lockFileStream.close(); if (!fs.delete(lockFile, false)) { LOG.warn("Unable to delete lock file, Spout = {}", componentID); throw new IOException("Unable to delete lock file"); } LOG.debug("Released lock file {}. Spout {}", lockFile, componentID); }
/** @throws Exception If failed. */ @Test public void testDeleteIfDirectoryPathExists() throws Exception { Path fsHome = new Path(primaryFsUri); Path dir = new Path(fsHome, "/someDir1/someDir2/someDir3"); FSDataOutputStream os = fs.create(dir, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); os.close(); assertTrue(fs.delete(dir, false)); assertPathDoesNotExist(fs, dir); }
private FileStatus[] addMockStoreFiles(int count, MasterServices services, Path storedir) throws IOException { // get the existing store files FileSystem fs = services.getMasterFileSystem().getFileSystem(); fs.mkdirs(storedir); // create the store files in the parent for (int i = 0; i < count; i++) { Path storeFile = new Path(storedir, "_store" + i); FSDataOutputStream dos = fs.create(storeFile, true); dos.writeBytes("Some data: " + i); dos.close(); } LOG.debug("Adding " + count + " store files to the storedir:" + storedir); // make sure the mock store files are there FileStatus[] storeFiles = fs.listStatus(storedir); assertEquals("Didn't have expected store files", count, storeFiles.length); return storeFiles; }
/** * Creates the given Path as a brand-new zero-length file. If * create fails, or if it already existed, return false. * <i>Important: the default implementation is not atomic</i> * @param f path to use for create * @throws IOException IO failure */ public boolean createNewFile(Path f) throws IOException { if (exists(f)) { return false; } else { create(f, false, getConf().getInt(IO_FILE_BUFFER_SIZE_KEY, IO_FILE_BUFFER_SIZE_DEFAULT)).close(); return true; } }
/** * Write the .regioninfo file on-disk. */ public static void writeRegioninfoOnFilesystem(final Configuration conf, final FileSystem fs, final Path regionInfoDir, RegionInfo regionInfo) throws IOException { final byte[] content = RegionInfo.toDelimitedByteArray(regionInfo); Path regionInfoFile = new Path(regionInfoDir, "." + HConstants.REGIONINFO_QUALIFIER_STR); // First check to get the permissions FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); // Write the RegionInfo file content FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null); try { out.write(content); } finally { out.close(); } }
private void metablocks(final String compress) throws Exception { Path mFile = new Path(ROOT_DIR, "meta.hfile"); FSDataOutputStream fout = createFSOutput(mFile); HFileContext meta = new HFileContextBuilder() .withCompression(HFileWriterImpl.compressionByName(compress)) .withBlockSize(minBlockSize).build(); Writer writer = HFile.getWriterFactory(conf, cacheConf) .withOutputStream(fout) .withFileContext(meta) .create(); someTestingWithMetaBlock(writer); writer.close(); fout.close(); FSDataInputStream fin = fs.open(mFile); Reader reader = HFile.createReaderFromStream(mFile, fs.open(mFile), this.fs.getFileStatus(mFile).getLen(), cacheConf, conf); reader.loadFileInfo(); // No data -- this should return false. assertFalse(reader.getScanner(false, false).seekTo()); someReadingWithMetaBlock(reader); fs.delete(mFile, true); reader.close(); fin.close(); }
private void updateIndex(long txId) { LOG.debug("Starting index update."); final Path tmpPath = tmpFilePath(indexFilePath.toString()); try (FSDataOutputStream out = this.options.fs.create(tmpPath, true); BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(out))) { TxnRecord txnRecord = new TxnRecord(txId, options.currentFile.toString(), this.options.getCurrentOffset()); bw.write(txnRecord.toString()); bw.newLine(); bw.flush(); out.close(); /* In non error scenarios, for the Azure Data Lake Store File System (adl://), the output stream must be closed before the file associated with it is deleted. For ADLFS deleting the file also removes any handles to the file, hence out.close() will fail. */ /* * Delete the current index file and rename the tmp file to atomically * replace the index file. Orphan .tmp files are handled in getTxnRecord. */ options.fs.delete(this.indexFilePath, false); options.fs.rename(tmpPath, this.indexFilePath); lastSeenTxn = txnRecord; LOG.debug("updateIndex updated lastSeenTxn to [{}]", this.lastSeenTxn); } catch (IOException e) { LOG.warn("Begin commit failed due to IOException. Failing batch", e); throw new FailedException(e); } }
/** * Write the .regioninfo file on-disk. */ private static void writeRegionInfoFileContent(final Configuration conf, final FileSystem fs, final Path regionInfoFile, final byte[] content) throws IOException { // First check to get the permissions FsPermission perms = FSUtils.getFilePermissions(fs, conf, HConstants.DATA_FILE_UMASK_KEY); // Write the RegionInfo file content FSDataOutputStream out = FSUtils.create(conf, fs, regionInfoFile, perms, null); try { out.write(content); } finally { out.close(); } }
@Override public void execute() throws IOException { URI fsURI; try { fsURI = new URI(this.fsUriString); } catch (URISyntaxException e) { throw new IOException("can not build URI " + this.fsUriString, e); } FileSystem fs = FileSystem.get(fsURI, new Configuration()); Path filenamePath = new Path(this.targetDirPath, ReplicaHadoopFsEndPoint.WATERMARK_FILE); if (fs.exists(filenamePath)) { fs.delete(filenamePath, false); } FSDataOutputStream fout = fs.create(filenamePath); fout.write(WatermarkMetadataUtil.serialize(this.watermark).getBytes(Charsets.UTF_8)); fout.close(); this.completed = true; }
/** @throws Exception If failed. */ @Test public void testDeleteIfFilePathExists() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "myFile"); FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); os.close(); assertTrue(fs.delete(file, false)); assertPathDoesNotExist(fs, file); }
private static void writeTD(final FileSystem fs, final Path p, final TableDescriptor htd) throws IOException { FSDataOutputStream out = fs.create(p, false); try { // We used to write this file out as a serialized HTD Writable followed by two '\n's and then // the toString version of HTD. Now we just write out the pb serialization. out.write(TableDescriptorBuilder.toByteArray(htd)); } finally { out.close(); } }
@Override public void regionClose(final SnapshotRegionManifest.Builder region) throws IOException { // we should ensure the snapshot dir exist, maybe it has been deleted by master // see HBASE-16464 FileSystem workingDirFs = snapshotDir.getFileSystem(this.conf); if (workingDirFs.exists(snapshotDir)) { SnapshotRegionManifest manifest = region.build(); FSDataOutputStream stream = workingDirFs.create( getRegionManifestPath(snapshotDir, manifest)); try { manifest.writeTo(stream); } finally { stream.close(); } } else { LOG.warn("can't write manifest without parent dir, maybe it has been deleted by master?"); } }
private static DirLock takeOwnership(FileSystem fs, Path dirLockFile) throws IOException { if (fs instanceof DistributedFileSystem) { if (!((DistributedFileSystem) fs).recoverLease(dirLockFile)) { LOG.warn("Unable to recover lease on dir lock file " + dirLockFile + " right now. Cannot transfer ownership. Will need to try later."); return null; } } // delete and recreate lock file if (fs.delete(dirLockFile, false)) { // returns false if somebody else already deleted it (to take ownership) FSDataOutputStream ostream = HdfsUtils.tryCreateFile(fs, dirLockFile); if (ostream != null) { ostream.close(); } return new DirLock(fs, dirLockFile); } return null; }
protected void serveHdfs(InputStream csv) throws IOException { if (FSUtils.isBareS3NBucketWithoutTrailingSlash(path)) { path += "/"; } Path p = new Path(path); org.apache.hadoop.fs.FileSystem fs = org.apache.hadoop.fs.FileSystem.get(p.toUri(), PersistHdfs.CONF); if( !force && fs.exists(p) ) throw new IllegalArgumentException("File " + path + " already exists."); fs.mkdirs(p.getParent()); FSDataOutputStream s = fs.create(p); byte[] buffer = new byte[1024]; try { int len; while ((len = csv.read(buffer)) > 0) { s.write(buffer, 0, len); } } finally { s.close(); Log.info("Key '" + src_key.toString() + "' was written to " + path.toString() + "."); } }