Refine search
public static void cleanup() throws IOException { // clean up commitlog String[] directoryNames = { DatabaseDescriptor.getCommitLogLocation(), }; for (String dirName : directoryNames) { File dir = new File(dirName); if (!dir.exists()) throw new RuntimeException("No such directory: " + dir.getAbsolutePath()); FileUtils.deleteRecursive(dir); } // clean up data directory which are stored as data directory/table/data files for (String dirName : DatabaseDescriptor.getAllDataFileLocations()) { File dir = new File(dirName); if (!dir.exists()) throw new RuntimeException("No such directory: " + dir.getAbsolutePath()); FileUtils.deleteRecursive(dir); } }
/** * Creates a directory * * @param dir * @throws IOException */ private static void mkdir(String dir) throws IOException { FileUtils.createDirectory(dir); }
public ClearCredential() { FileInputStream fis = null; try { fis = new FileInputStream(CRED_FILE); final Properties props = new Properties(); props.load(fis); AWS_ACCESS_ID = props.getProperty("AWSACCESSID") != null ? props.getProperty("AWSACCESSID").trim() : ""; AWS_KEY = props.getProperty("AWSKEY") != null ? props.getProperty("AWSKEY").trim() : ""; } catch (Exception e) { logger.error("Exception with credential file ", e); throw new RuntimeException("Problem reading credential file. Cannot start.", e); } finally { FileUtils.closeQuietly(fis); } }
private static void rmdir(String dir) throws IOException { File dirFile = new File(dir); if (dirFile.exists()) { FileUtils.deleteRecursive(new File(dir)); } }
private IndexSummary buildSummaryAtLevel(int newSamplingLevel) throws IOException { // we read the positions in a BRAF so we don't have to worry about an entry spanning a mmap boundary. RandomAccessReader primaryIndex = RandomAccessReader.open(new File(descriptor.filenameFor(Component.PRIMARY_INDEX))); try { long indexSize = primaryIndex.length(); try (IndexSummaryBuilder summaryBuilder = new IndexSummaryBuilder(estimatedKeys(), metadata.params.minIndexInterval, newSamplingLevel)) { long indexPosition; while ((indexPosition = primaryIndex.getFilePointer()) != indexSize) { summaryBuilder.maybeAddEntry(decorateKey(ByteBufferUtil.readWithShortLength(primaryIndex)), indexPosition); RowIndexEntry.Serializer.skip(primaryIndex, descriptor.version); } return summaryBuilder.build(getPartitioner()); } } finally { FileUtils.closeQuietly(primaryIndex); } }
public void release() { int n = references.decrementAndGet(); if (n == 0) { FileUtils.closeQuietly(index); sstableRef.release(); if (obsolete.get() || sstableRef.globalCount() == 0) FileUtils.delete(index.getIndexPath()); } }
public void discard(CommitLogSegment segment, boolean delete) { segment.close(); addSize(-segment.onDiskSize()); cdcSizeTracker.processDiscardedSegment(segment); if (segment.getCDCState() == CDCState.CONTAINS) FileUtils.renameWithConfirm(segment.logFile.getAbsolutePath(), DatabaseDescriptor.getCDCLogLocation() + File.separator + segment.logFile.getName()); else { if (delete) FileUtils.deleteWithConfirm(segment.logFile); } }
private void rewriteSSTableMetadata(Descriptor descriptor, Map<MetadataType, MetadataComponent> currentComponents) throws IOException { String filePath = descriptor.tmpFilenameFor(Component.STATS); try (DataOutputStreamPlus out = new BufferedDataOutputStreamPlus(new FileOutputStream(filePath))) { serialize(currentComponents, out, descriptor.version); out.flush(); } // we cant move a file on top of another file in windows: if (FBUtilities.isWindows) FileUtils.delete(descriptor.filenameFor(Component.STATS)); FileUtils.renameWithConfirm(filePath, descriptor.filenameFor(Component.STATS)); } }
private void saveSummary(SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder, IndexSummary summary) { File summariesFile = new File(descriptor.filenameFor(Component.SUMMARY)); if (summariesFile.exists()) FileUtils.deleteWithConfirm(summariesFile); DataOutputStreamAndChannel oStream = null; try { oStream = new DataOutputStreamAndChannel(new FileOutputStream(summariesFile)); IndexSummary.serializer.serialize(summary, oStream, descriptor.version.hasSamplingLevel); ByteBufferUtil.writeWithLength(first.getKey(), oStream); ByteBufferUtil.writeWithLength(last.getKey(), oStream); ibuilder.serializeBounds(oStream); dbuilder.serializeBounds(oStream); // write a magic number, to indicate this summary has been sampled correctly oStream.writeInt(ACCURATE_BOUNDARIES_MAGIC_NUMBER); } catch (IOException e) { logger.debug("Cannot save SSTable Summary: ", e); // corrupted hence delete it and let it load it now. if (summariesFile.exists()) FileUtils.deleteWithConfirm(summariesFile); } finally { FileUtils.closeQuietly(oStream); } }
/** * Save index summary to Summary.db file. */ public static void saveSummary(Descriptor descriptor, DecoratedKey first, DecoratedKey last, IndexSummary summary) { File summariesFile = new File(descriptor.filenameFor(Component.SUMMARY)); if (summariesFile.exists()) FileUtils.deleteWithConfirm(summariesFile); try (DataOutputStreamPlus oStream = new BufferedDataOutputStreamPlus(new FileOutputStream(summariesFile));) { IndexSummary.serializer.serialize(summary, oStream, descriptor.version.hasSamplingLevel()); ByteBufferUtil.writeWithLength(first.getKey(), oStream); ByteBufferUtil.writeWithLength(last.getKey(), oStream); } catch (IOException e) { logger.trace("Cannot save SSTable Summary: ", e); // corrupted hence delete it and let it load it now. if (summariesFile.exists()) FileUtils.deleteWithConfirm(summariesFile); } }
public void releaseBuffer(ByteBuffer buffer) { usedBuffers.decrementAndGet(); if (bufferPool.size() < maxBufferPoolSize) bufferPool.add(buffer); else FileUtils.clean(buffer); }
public static void createHardLink(String from, String to) { createHardLink(new File(from), new File(to)); }
/** * Dump the inter arrival times for examination if necessary. */ public void dumpInterArrivalTimes() { File file = FileUtils.createTempFile("failuredetector-", ".dat"); OutputStream os = null; try { os = new BufferedOutputStream(new FileOutputStream(file, true)); os.write(toString().getBytes()); } catch (IOException e) { throw new FSWriteError(e, file); } finally { FileUtils.closeQuietly(os); } }
public static void close(Closeable... cs) throws IOException { close(Arrays.asList(cs)); }
/** * Simply delete untracked segment files w/standard, as it'll be flushed to sstables during recovery * * @param file segment file that is no longer in use. */ void handleReplayedSegment(final File file) { // (don't decrease managed size, since this was never a "live" segment) logger.trace("(Unopened) segment {} is no longer needed and will be deleted now", file); FileUtils.deleteWithConfirm(file); }
public static File createTempFile(String prefix, String suffix) { return createTempFile(prefix, suffix, new File(System.getProperty("java.io.tmpdir"))); }
private static void cleanupSavedCaches() { File cachesDir = new File(DatabaseDescriptor.getSavedCachesLocation()); if (!cachesDir.exists() || !cachesDir.isDirectory()) return; FileUtils.delete(cachesDir.listFiles()); }
public static void renameWithOutConfirm(String from, String to) { try { atomicMoveWithFallback(new File(from).toPath(), new File(to).toPath()); } catch (IOException e) { if (logger.isTraceEnabled()) logger.trace("Could not move file "+from+" to "+to, e); } }
public void delete() { FileUtils.delete(lockfile); }
public static void startCassandraInstance(String pathToDataDir) throws TTransportException, IOException, InterruptedException, SecurityException, IllegalArgumentException, NoSuchMethodException, IllegalAccessException, InvocationTargetException { if (cassandraStarted) { return; } try { FileUtils.deleteRecursive(new File(pathToDataDir)); } catch (AssertionError e) { // eat } embedded = new EmbeddedServerHelper(); try { embedded.setup(); } catch (ConfigurationException ce) { throw new RuntimeException(ce); } cassandraStarted = true; }