outputStream.writeUTF("hello"); } catch (IOException e) { LOG.error("Could not create file for checking if truncate works.", e);
outputStream.writeUTF("hello"); } catch (IOException e) { LOG.error("Could not create file for checking if truncate works.", e);
private void addToLocalResources(FileSystem fs, String fileSrcPath, String fileDstPath, String appId, Map<String, LocalResource> localResources, String resources) throws IOException { String suffix = jstormClientContext.appName + JOYConstants.BACKLASH + appId + JOYConstants.BACKLASH + fileDstPath; Path dst = new Path(fs.getHomeDirectory(), suffix); if (fileSrcPath == null) { FSDataOutputStream ostream = null; try { ostream = FileSystem .create(fs, dst, new FsPermission(JOYConstants.FS_PERMISSION)); ostream.writeUTF(resources); } finally { IOUtils.closeQuietly(ostream); } } else { fs.copyFromLocalFile(new Path(fileSrcPath), dst); } FileStatus scFileStatus = fs.getFileStatus(dst); LocalResource scRsrc = LocalResource.newInstance( ConverterUtils.getYarnUrlFromURI(dst.toUri()), LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, scFileStatus.getLen(), scFileStatus.getModificationTime()); localResources.put(fileDstPath, scRsrc); }
@Override public void writeIndexFile(Path dir, GlobalDictMetadata metadata) throws IOException { Path indexFile = new Path(dir, V2_INDEX_NAME); try (FSDataOutputStream out = fs.create(indexFile, true)) { out.writeByte(MINOR_VERSION_V1); out.writeInt(metadata.baseId); out.writeInt(metadata.maxId); out.writeInt(metadata.maxValueLength); out.writeInt(metadata.nValues); out.writeUTF(metadata.bytesConverter.getClass().getName()); out.writeInt(metadata.sliceFileMap.size()); for (Map.Entry<AppendDictSliceKey, String> entry : metadata.sliceFileMap.entrySet()) { entry.getKey().write(out); out.writeUTF(entry.getValue()); } } }
public static void writeMmCommitManifest(List<Path> commitPaths, Path specPath, FileSystem fs, String taskId, Long writeId, int stmtId, String unionSuffix, boolean isInsertOverwrite) throws HiveException { if (commitPaths.isEmpty()) { return; } // We assume one FSOP per task (per specPath), so we create it in specPath. Path manifestPath = getManifestDir(specPath, writeId, stmtId, unionSuffix, isInsertOverwrite); manifestPath = new Path(manifestPath, taskId + MANIFEST_EXTENSION); Utilities.FILE_OP_LOGGER.info("Writing manifest to {} with {}", manifestPath, commitPaths); try { // Don't overwrite the manifest... should fail if we have collisions. try (FSDataOutputStream out = fs.create(manifestPath, false)) { if (out == null) { throw new HiveException("Failed to create manifest at " + manifestPath); } out.writeInt(commitPaths.size()); for (Path path : commitPaths) { out.writeUTF(path.toString()); } } } catch (IOException e) { throw new HiveException(e); } }
@Override public void writeIndexFile(Path dir, GlobalDictMetadata metadata) throws IOException { Path indexFile = new Path(dir, V1_INDEX_NAME); try (FSDataOutputStream out = fs.create(indexFile, true)) { out.writeInt(metadata.baseId); out.writeInt(metadata.maxId); out.writeInt(metadata.maxValueLength); out.writeInt(metadata.nValues); out.writeUTF(metadata.bytesConverter.getClass().getName()); out.writeInt(metadata.sliceFileMap.size()); for (Map.Entry<AppendDictSliceKey, String> entry : metadata.sliceFileMap.entrySet()) { entry.getKey().write(out); } } }
LOG.debug("Writing valid-length file for {} to specify valid length {}", partPath, validLength); try (FSDataOutputStream lengthFileOut = fs.create(validLengthFilePath)) { lengthFileOut.writeUTF(Long.toString(validLength));
LOG.debug("Writing valid-length file for {} to specify valid length {}", partPath, bucketState.currentFileValidLength); try (FSDataOutputStream lengthFileOut = fs.create(validLengthFilePath)) { lengthFileOut.writeUTF(Long.toString(bucketState.currentFileValidLength));
+ ".txt")); FSDataOutputStream stream = fs.create(newOutPutFile); stream.writeUTF(completeFilePath); stream.flush(); stream.close();
FSDataOutputStream hdfsSessionPathInfoFile = fs.create(new Path(hdfsSessionPath, INFO_FILE_NAME), true); hdfsSessionPathInfoFile.writeUTF("process: " + ManagementFactory.getRuntimeMXBean().getName() +"\n"); hdfsSessionPathInfoFile.close();
FSDataOutputStream hdfsSessionPathInfoFile = fs.create(new Path(hdfsSessionPath, INFO_FILE_NAME), true); hdfsSessionPathInfoFile.writeUTF("process: " + ManagementFactory.getRuntimeMXBean().getName() +"\n"); hdfsSessionPathInfoFile.close();
@Test public void testVersion() throws DeserializationException, IOException { final Path rootdir = htu.getDataTestDir(); final FileSystem fs = rootdir.getFileSystem(conf); assertNull(FSUtils.getVersion(fs, rootdir)); // Write out old format version file. See if we can read it in and convert. Path versionFile = new Path(rootdir, HConstants.VERSION_FILE_NAME); FSDataOutputStream s = fs.create(versionFile); final String version = HConstants.FILE_SYSTEM_VERSION; s.writeUTF(version); s.close(); assertTrue(fs.exists(versionFile)); FileStatus [] status = fs.listStatus(versionFile); assertNotNull(status); assertTrue(status.length > 0); String newVersion = FSUtils.getVersion(fs, rootdir); assertEquals(version.length(), newVersion.length()); assertEquals(version, newVersion); // File will have been converted. Exercise the pb format assertEquals(version, FSUtils.getVersion(fs, rootdir)); FSUtils.checkVersion(fs, rootdir, true); }
@Test public void testRewritingClusterIdToPB() throws Exception { TEST_UTIL.startMiniZKCluster(); TEST_UTIL.startMiniDFSCluster(1); TEST_UTIL.createRootDir(); Path rootDir = FSUtils.getRootDir(TEST_UTIL.getConfiguration()); FileSystem fs = rootDir.getFileSystem(TEST_UTIL.getConfiguration()); Path filePath = new Path(rootDir, HConstants.CLUSTER_ID_FILE_NAME); FSDataOutputStream s = null; try { s = fs.create(filePath); s.writeUTF(TEST_UTIL.getRandomUUID().toString()); } finally { if (s != null) { s.close(); } } TEST_UTIL.startMiniHBaseCluster(); HMaster master = TEST_UTIL.getHBaseCluster().getMaster(); int expected = LoadBalancer.isTablesOnMaster(TEST_UTIL.getConfiguration())? 2: 1; assertEquals(expected, master.getServerManager().getOnlineServersList().size()); }
private Path addToLocalResources(FileSystem fs, String fileSrcPath, String fileDstPath, String appId, Map<String, LocalResource> localResources, String resources) throws IOException { String suffix = appName + "/" + appId + "/" + fileDstPath; Path dst = new Path(fs.getHomeDirectory(), suffix); if (fileSrcPath == null) { FSDataOutputStream ostream = null; try { ostream = FileSystem .create(fs, dst, new FsPermission((short) 0710)); ostream.writeUTF(resources); } finally { IOUtils.closeQuietly(ostream); } } else { fs.copyFromLocalFile(new Path(fileSrcPath), dst); } fs.setPermission(dst, new FsPermission((short)0755)); FileStatus scFileStatus = fs.getFileStatus(dst); LocalResource scRsrc = LocalResource.newInstance( ConverterUtils.getYarnUrlFromURI(dst.toUri()), LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, scFileStatus.getLen(), scFileStatus.getModificationTime()); localResources.put(fileDstPath, scRsrc); return dst; }
private synchronized static void writeFailure(Text urlLine, Context context) { if (failureWriter != null) { try { failureWriter.writeUTF(urlLine + "\n"); } catch (final IOException e) { logger.error(e); } } }
/** * Method to update temporary file with latest XML processed information. * @throws IOException - IO Exception occurred while writing data to temp file. */ private void updateFileTrackingInfo() throws IOException { try (FSDataOutputStream outputStream = fs.create(new Path(tempFilePath, file.getName() + ".txt"))) { outputStream.writeUTF(fileName); } } }
public static void writeString(FileSystem fs, String path, String val) { try { FSDataOutputStream out = fs.create(new Path(path), true); out.writeUTF(val); out.close(); } catch (IOException e) { e.printStackTrace(); } }
private void saveCounts() throws IOException { FSDataOutputStream out = getHdfs().create(getCountsPath()); for (Counts c:Counts.values()) { out.writeUTF(c.name()) ; Long count = counts.get(c) ; if (count != null) out.writeLong(count) ; else out.writeLong(0L) ; } out.close(); }
private void saveCounts() throws IOException { FSDataOutputStream out = getHdfs().create(getCountsPath()); for (Counts c:Counts.values()) { out.writeUTF(c.name()) ; Long count = counts.get(c) ; if (count != null) out.writeLong(count) ; else out.writeLong(0L) ; } out.close(); }
private void saveCounts() throws IOException { FSDataOutputStream out = getHdfs().create(getUnforwardedCountsPath()); for (Counts c:Counts.values()) { out.writeUTF(c.name()) ; Long count = counts.get(c) ; if (count != null) out.writeLong(count) ; else out.writeLong(0L) ; } out.close(); }