/** * Write string to file */ public void writeString(String str) { try { out.writeChars(str); } catch (IOException e) { e.printStackTrace(); } }
public void write(String text) throws IOException { String dirInHdfs = getDirInHdfs(); this.fileSystem.mkdirs(new Path(dirInHdfs)); try (FSDataOutputStream fout = this.fileSystem.create(new Path(this.filePathInHdfs))) { fout.writeChars(text); } }
private void createInputFile(Path path, int rowCount) throws IOException { if (fs.exists(path)) { fs.delete(path, true); } FSDataOutputStream os = fs.create(path); for (int i = 0; i < rowCount; i++) { os.writeChars(i + "\n"); } os.close(); }
private void createInputFile(Path path, int rowCount) throws IOException { if (fs.exists(path)) { fs.delete(path, true); } FSDataOutputStream os = fs.create(path); for (int i = 0; i < rowCount; i++) { os.writeChars(i + "\n"); } os.close(); }
splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); splitCount++; outstanding.removeAll(finished); for (Pair<byte[], byte[]> region : finished) { splitOut.writeChars("- " + splitAlgo.rowToStr(region.getFirst()) + " " + splitAlgo.rowToStr(region.getSecond()) + "\n"); splitCount++;
LOG.debug("Will Split [" + startStr + " , " + splitAlgo.rowToStr(r.getSecond()) + ") at " + splitStr); tmpOut.writeChars("+ " + startStr + splitAlgo.separator() + splitStr + "\n");
stream.writeChars("CORRUPT FILE!!!!"); stream.close(); Path origPath = store.getRegionFileSystem().commitStoreFile(
private static final void testHdfs(Configuration configuration, String fileName, String content) throws IOException { Path hdfsFile = new Path(fileName); FileSystem fs = FileSystem.get(hdfsFile.toUri(), configuration); FSDataOutputStream out = fs.create(hdfsFile); try { out.writeChars(content); } finally { Closeables.close(out, false); } } }
public class HadoopFileSystemManager { private String url; public void writeFile(String filePath,String data) throws IOException, URISyntaxException { System.setProperty( "java.library.path", "/opt/mapr/lib" ); Path fPath = new Path(filePath); String url = url = "hdfs://"+ip+":"+"8020"; FileSystem fs = FileSystem.get(new URI(url),new Configuration()); System.out.println(fs.getWorkingDirectory()); FSDataOutputStream writeStream = fs.create(fPath); writeStream.writeChars(data); writeStream.close(); } }
public void write(String text) throws IOException { String dirInHdfs = getDirInHdfs(); this.fileSystem.mkdirs(new Path(dirInHdfs)); try (FSDataOutputStream fout = this.fileSystem.create(new Path(this.filePathInHdfs))) { fout.writeChars(text); } }
public void write(String text) throws IOException { String dirInHdfs = getDirInHdfs(); this.fileSystem.mkdirs(new Path(dirInHdfs)); try (FSDataOutputStream fout = this.fileSystem.create(new Path(this.filePathInHdfs))) { fout.writeChars(text); } }
@Override public void run() { try { try (FileSystem fs = FileSystem.get( URI.create(QSQL_CLUSTER_URL), conf)) { FSDataOutputStream out = fs.create(new Path(url)); try { for (String[] arrayContent : content) { StringBuilder builder = new StringBuilder(); for (int j = 0; j < arrayContent.length; j++) { builder.append(arrayContent[j]); if (j != arrayContent.length - 1) { builder.append(separator); } } builder.append("\n"); out.writeChars(builder.toString()); } content = null; } finally { out.flush(); out.close(); System.gc(); } } } catch (IOException ex) { throw new RuntimeException(ex); } } }
private void createInputFile(Path path, int rowCount) throws IOException { if (fs.exists(path)) { fs.delete(path, true); } FSDataOutputStream os = fs.create(path); for (int i = 0; i < rowCount; i++) { String s = i + "\n"; os.writeChars(s); } os.close(); }
private void writeFile(final DistributedFileSystem dfs, Path dir, String fileName) throws IOException { Path filePath = new Path(dir.toString() + Path.SEPARATOR + fileName); final FSDataOutputStream out = dfs.create(filePath); out.writeChars("teststring"); out.close(); }
@Test public void testCorruptedCopyCrc() throws Exception { FSDataOutputStream out = lfs.getRawFileSystem().create(srcPath); out.writeChars("bang"); out.close(); shellRun(1, "-get", srcPath.toString(), dstPath.toString()); }
@Test public void testCorruptedCopyCrc() throws Exception { FSDataOutputStream out = lfs.getRawFileSystem().create(srcPath); out.writeChars("bang"); out.close(); shellRun(1, "-get", srcPath.toString(), dstPath.toString()); }
@Before public void prepFiles() throws Exception { lfs.setVerifyChecksum(true); lfs.setWriteChecksum(true); lfs.delete(srcPath, true); lfs.delete(dstPath, true); FSDataOutputStream out = lfs.create(srcPath); out.writeChars("hi"); out.close(); assertTrue(lfs.exists(lfs.getChecksumFile(srcPath))); }
@Before public void prepFiles() throws Exception { lfs.setVerifyChecksum(true); lfs.setWriteChecksum(true); lfs.delete(srcPath, true); lfs.delete(dstPath, true); FSDataOutputStream out = lfs.create(srcPath); out.writeChars("hi"); out.close(); assertTrue(lfs.exists(lfs.getChecksumFile(srcPath))); }
stream.writeChars("CORRUPT FILE!!!!"); stream.close(); Path origPath = store.getRegionFileSystem().commitStoreFile(
@Test public void testScheduleSameBlock() throws IOException { final Configuration conf = new HdfsConfiguration(); final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(4).build(); try { cluster.waitActive(); final DistributedFileSystem dfs = cluster.getFileSystem(); final String file = "/testScheduleSameBlock/file"; { final FSDataOutputStream out = dfs.create(new Path(file)); out.writeChars("testScheduleSameBlock"); out.close(); } final Mover mover = newMover(conf); mover.init(); final Mover.Processor processor = mover.new Processor(); final LocatedBlock lb = dfs.getClient().getLocatedBlocks(file, 0).get(0); final List<MLocation> locations = MLocation.toLocations(lb); final MLocation ml = locations.get(0); final DBlock db = mover.newDBlock(lb.getBlock().getLocalBlock(), locations); final List<StorageType> storageTypes = new ArrayList<StorageType>( Arrays.asList(StorageType.DEFAULT, StorageType.DEFAULT)); Assert.assertTrue(processor.scheduleMoveReplica(db, ml, storageTypes)); Assert.assertFalse(processor.scheduleMoveReplica(db, ml, storageTypes)); } finally { cluster.shutdown(); } }