@Override public void configure(ServerContext context, String configuration) { requireNonNull(configuration); // instance_name,zookeepers int index = configuration.indexOf(','); if (index == -1) { try { Thread.sleep(1000); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } throw new IllegalArgumentException("Expected comma in configuration string"); } instanceName = configuration.substring(0, index); zookeepers = configuration.substring(index + 1); conf = context.getConfiguration(); try { fs = VolumeManagerImpl.get(conf, context.getHadoopConf()); } catch (IOException e) { log.error("Could not connect to filesystem", e); throw new RuntimeException(e); } }
public static String switchRootTableVolume(ServerContext context, String location) throws IOException { String newLocation = switchVolume(location, FileType.TABLE, ServerConstants.getVolumeReplacements(context.getConfiguration(), context.getHadoopConf())); if (newLocation != null) { MetadataTableUtil.setRootTabletDir(context, newLocation); log.info("Volume replaced: {} -> {}", location, newLocation); return new Path(newLocation).toString(); } return location; }
public static String[] getBaseUris(ServerContext context) { return getBaseUris(context.getConfiguration(), context.getHadoopConf()); }
@Override public void run() { boolean rescheduled = false; try { long time = closer.close(master.getConfiguration(), master.getContext().getHadoopConf(), master.getFileSystem(), new Path(source)); if (time > 0) { executor.schedule(this, time, TimeUnit.MILLISECONDS); rescheduled = true; } else { initiateSort(sortId, source, destination); } } catch (FileNotFoundException e) { log.debug("Unable to initate log sort for " + source + ": " + e); } catch (Exception e) { log.warn("Failed to initiate log sort " + source, e); } finally { if (!rescheduled) { synchronized (RecoveryManager.this) { closeTasksQueued.remove(sortId); } } } }
@Override public void process(String workID, byte[] data) { String paths[] = new String(data, UTF_8).split(","); Path orig = new Path(paths[0]); Path dest = new Path(paths[1]); Path tmp = new Path(dest.getParent(), dest.getName() + ".tmp"); VolumeManager vm = context.getVolumeManager(); try { FileSystem origFs = vm.getVolumeByPath(orig).getFileSystem(); FileSystem destFs = vm.getVolumeByPath(dest).getFileSystem(); FileUtil.copy(origFs, orig, destFs, tmp, false, true, context.getHadoopConf()); destFs.rename(tmp, dest); log.debug("copied {} to {}", orig, dest); } catch (IOException ex) { try { FileSystem destFs = vm.getVolumeByPath(dest).getFileSystem(); destFs.create(dest).close(); log.warn(" marked " + dest + " failed", ex); } catch (IOException e) { log.error("Unable to create failure flag file " + dest, e); } } }
throws IOException { List<Pair<Path,Path>> replacements = ServerConstants .getVolumeReplacements(context.getConfiguration(), context.getHadoopConf()); log.trace("Using volume replacements: {}", replacements);
if (!FileUtil.copy(fs1, dir, fs2, newDir, false, context.getHadoopConf())) { throw new IOException("Failed to copy " + dir + " to " + newDir);
.getVolumeReplacements(getConfiguration(), getContext().getHadoopConf());
Configuration conf = context.getHadoopConf(); FileSystem fs = FileSystem.getLocal(conf);
.getVolumeReplacements(master.getConfiguration(), master.getContext().getHadoopConf())); if (switchedWalog != null) {
mapFiles = reduceFiles(context, context.getHadoopConf(), prevEndRow, endRow, mapFiles, maxToOpen, tmpDir, 0); long t2 = System.currentTimeMillis();
mapFiles = reduceFiles(context, context.getHadoopConf(), prevEndRow, endRow, mapFiles, maxToOpen, tmpDir, 0); long t2 = System.currentTimeMillis();
private synchronized FileSKVIterator getReader() throws IOException { if (reader == null) { Configuration conf = context.getHadoopConf(); FileSystem fs = FileSystem.getLocal(conf); reader = new RFileOperations().newReaderBuilder() .forFile(memDumpFile, fs, conf, context.getCryptoService()) .withTableConfiguration(context.getConfiguration()).seekToBeginning().build(); if (iflag != null) reader.setInterruptFlag(iflag); if (getSamplerConfig() != null) { reader = reader.getSample(getSamplerConfig()); } } return reader; }
for (FileRef file : files) { FileSystem fs = volumeManager.getVolumeByPath(file.path()).getFileSystem(); Configuration conf = context.getHadoopConf(); SummaryCollection fsc = SummaryReader .load(fs, conf, factory, file.path(), summarySelector, summaryCache, indexCache,
RootTable.NAME, TableState.ONLINE, NodeExistsPolicy.SKIP); Initialize.initSystemTablesConfig(context.getZooReaderWriter(), context.getZooKeeperRoot(), context.getHadoopConf());
final long minBlockSize = context.getHadoopConf() .getLong("dfs.namenode.fs-limits.min-block-size", 0); if (minBlockSize != 0 && minBlockSize > walogMaxSize)