public static DomainVersionNumberAndNumPartitions openVersion(CoordinatorConfigurator configurator, String domainName, DomainVersionProperties domainVersionProperties) throws IOException { DomainVersionOpener domainVersionOpener = new DomainVersionOpener(domainName, domainVersionProperties); RunWithCoordinator.run(configurator, domainVersionOpener); return domainVersionOpener.result; }
public static void run(CoordinatorConfigurator configurator, RunnableWithCoordinator runnableWithCoordinator) throws IOException { Coordinator coordinator = createCoordinator(configurator); try { runnableWithCoordinator.run(coordinator); } finally { coordinator.close(); } } }
@Override public void configure(JobConf conf) { // Create unique local directory String uniqueString = UUID.randomUUID().toString(); String localTmpOutputPath; try { localTmpOutputPath = conf.getLocalPath(uniqueString).toString(); } catch (IOException e) { throw new RuntimeException("Failed to determine local temporary output directory", e); } localTmpOutput = new File(localTmpOutputPath); if (localTmpOutput.exists() || !localTmpOutput.mkdirs()) { throw new RuntimeException("Failed to initialize local temporary output directory " + localTmpOutputPath); } // Load configuration items final String domainName = DomainBuilderProperties.getDomainName(conf); final int versionNumberToCompact = DomainCompactorProperties.getVersionNumberToCompact(domainName, conf); // Create Coordinator coordinator = RunWithCoordinator.createCoordinator(DomainBuilderProperties.getConfigurator(conf)); // Determine version to compact try { Domain domain = DomainBuilderProperties.getDomain(coordinator, domainName); HadoopDomainCompactorMapper.this.storageEngine = domain.getStorageEngine(); HadoopDomainCompactorMapper.this.domainVersionToCompact = DomainBuilderProperties.getDomainVersion(coordinator, domainName, versionNumberToCompact); } catch (IOException e) { throw new RuntimeException("Failed to load configuration.", e); } }
public static void closeVersion(CoordinatorConfigurator configurator, String domainName, Integer domainVersionNumber) throws IOException { RunWithCoordinator.run(configurator, new DomainVersionCloser(domainName, domainVersionNumber)); }
public String getRemoteDomainRoot() throws IOException { DomainBuilderRemoteDomainRootGetter remoteDomainRootGetter = new DomainBuilderRemoteDomainRootGetter(domainName); RunWithCoordinator.run(configurator, remoteDomainRootGetter); return remoteDomainRootGetter.result; }
public static void cancelVersion(CoordinatorConfigurator configurator, String domainName, Integer domainVersionNumber) throws IOException { RunWithCoordinator.run(configurator, new DomainVersionCanceller(domainName, domainVersionNumber)); }
private void loadConfiguration(FlowProcess flowProcess) { if (storageEngine == null || partitioner == null) { try { RunWithCoordinator.run(DomainBuilderProperties.getConfigurator(domainName, flowProcess), new RunnableWithCoordinator() { @Override public void run(Coordinator coordinator) throws IOException { Domain domain = DomainBuilderProperties.getDomain(coordinator, domainName); domainNumParts = domain.getNumParts(); storageEngine = domain.getStorageEngine(); partitioner = domain.getPartitioner(); } }); } catch (IOException e) { throw new RuntimeException("Failed to load configuration.", e); } } } }
public void configure(JobConf conf) { final String domainName = DomainBuilderProperties.getDomainName(conf); try { RunWithCoordinator.run(DomainBuilderProperties.getConfigurator(conf), new RunnableWithCoordinator() { @Override public void run(Coordinator coordinator) throws IOException { Domain domain = DomainBuilderProperties.getDomain(coordinator, domainName); storageEngine = domain.getStorageEngine(); partitioner = domain.getPartitioner(); numPartitions = domain.getNumParts(); } }); } catch (IOException e) { throw new RuntimeException("Failed to load configuration.", e); } }
private void closeCurrentWriterIfNeeded() throws IOException { if (writer != null) { LOG.info("Closing current partition writer: " + writer.toString()); writer.close(); FileSystem fs = writerOutputPath.getFileSystem(jobConf); if (numRecordsWritten > 0) { // Move non empty partition data moveContentsAndDelete(writerOutputPath, new Path(outputPath), fs, LOG); // Record metatada only if it's not an empty partition RunWithCoordinator.run(configurator, new RunnableWithCoordinator() { @Override public void run(Coordinator coordinator) throws IOException { DomainVersion domainVersion = DomainBuilderProperties.getDomainVersion(coordinator, domainName, domainVersionNumber); domainVersion.addPartitionProperties(writerPartition, writer.getNumBytesWritten(), writer.getNumRecordsWritten()); } }); } else { // Move empty partition data moveContentsAndDelete(writerOutputPath, new Path(outputPath, EMPTY_PARTITIONS_DIR), fs, LOG); } } } }
DomainBuilderRecordWriter(JobConf conf, String outputPath) throws IOException { this.jobConf = conf; // Load configuration items this.configurator = DomainBuilderProperties.getConfigurator(conf); this.domainName = DomainBuilderProperties.getDomainName(conf); this.domainVersionNumber = DomainBuilderProperties.getVersionNumber(domainName, conf); this.outputPath = outputPath; RunWithCoordinator.run(configurator, new RunnableWithCoordinator() { @Override public void run(Coordinator coordinator) throws IOException { DomainBuilderRecordWriter.this.domain = DomainBuilderProperties.getDomain(coordinator, domainName); DomainBuilderRecordWriter.this.domainVersion = DomainBuilderProperties.getDomainVersion(coordinator, domainName, domainVersionNumber); DomainBuilderRecordWriter.this.storageEngine = domain.getStorageEngine(); } }); }
@Override public InputSplit[] getSplits(final JobConf conf, int ignored) throws IOException { final String domainName = DomainBuilderProperties.getDomainName(conf); RunWithCoordinator.run(DomainBuilderProperties.getConfigurator(conf), new RunnableWithCoordinator() { @Override public void run(Coordinator coordinator) throws IOException { domain = DomainBuilderProperties.getDomain(coordinator, domainName); domainVersionToCompact = domain.getVersion(DomainCompactorProperties.getVersionNumberToCompact(domainName, conf)); } }); final int domainNumParts = domain.getNumParts(); final StorageEngine storageEngine = domain.getStorageEngine(); final InputSplit[] splits = new InputSplit[domainNumParts]; // Create splits for (int partition = 0; partition < domainNumParts; ++partition) { // Compute remote partition file paths for this split if possible String[] locations = new String[]{}; if (storageEngine instanceof IncrementalStorageEngine) { IncrementalUpdatePlanner updatePlanner = ((IncrementalStorageEngine)storageEngine).getUpdatePlanner(domain); IncrementalUpdatePlan updatePlan = updatePlanner.computeUpdatePlan(domainVersionToCompact); List<String> paths = updatePlanner.getRemotePartitionFilePaths(updatePlan, storageEngine.getPartitionRemoteFileOps(StorageEngine.RemoteLocation.DOMAIN_BUILDER, partition)); locations = LocalityHelper.getHostsSortedByLocality(paths, conf); } splits[partition] = new HadoopDomainCompactorInputSplit(domainName, partition, locations); } return splits; }