public AbstractHDFSCompactionConfigHolder() { logger = ComponentLogWriter.getHoplogLogWriter("", LogService.logger()); }
private static ComponentLogWriter getLogger(String region) { return ComponentLogWriter.getHoplogLogWriter(region, LogService.logger()); }
public DDLHoplogOrganizer(HDFSStore hdfsstore) { this.hdfsstore = hdfsstore; Path basePath = new Path(hdfsstore.getHomeDir()); try { this.fileSystem = ((HDFSStoreImpl)hdfsstore).getFileSystem(); } catch (IOException e) { throw new HDFSIOException(e.getMessage(), e); } this.metaFolderPath = this.fileSystem.makeQualified(new Path(basePath, META_FOLDER_NAME)); this.conf = new Configuration(fileSystem.getConf()); logger = ComponentLogWriter.getHoplogLogWriter("DDL" + this.hdfsstore.getName(), LogService.logger()); }
AbstractHoplog(FileSystem inputFS, Path filePath, SortedOplogStatistics stats) throws IOException { logger = ComponentLogWriter.getHoplogLogWriter(filePath.getName(), LogService.logger()); this.fsProvider = new FSProvider(inputFS); initialize(filePath, stats, inputFS); }
@Override public synchronized void rename(String name) throws IOException { logger.fine("Renaming hoplog to " + name); Path parent = path.getParent(); Path newPath = new Path(parent, name); fsProvider.getFS().rename(path, new Path(parent, newPath)); // close the old reader and let the new one get created lazily close(); // update path to point to the new path path = newPath; this.hfd = new HoplogDescriptor(this.path.getName()); logger = ComponentLogWriter.getHoplogLogWriter(path.getName(), LogService.logger()); }
public AbstractHoplog(HDFSStoreImpl store, Path filePath, SortedOplogStatistics stats) throws IOException { logger = ComponentLogWriter.getHoplogLogWriter(filePath.getName(), LogService.logger()); this.fsProvider = new FSProvider(store); initialize(filePath, stats, store.getFileSystem()); }
public AbstractHoplogOrganizer(HdfsRegionManager region, int bucketId) { assert region != null; this.regionManager = region; this.regionFolder = region.getRegionFolder(); this.store = region.getStore(); this.listener = region.getListener(); this.stats = region.getHdfsStats(); this.bucketId = bucketId; this.basePath = new Path(store.getHomeDir()); this.bucketPath = new Path(basePath, regionFolder + "/" + bucketId); logger = ComponentLogWriter.getHoplogLogWriter(getRegionBucketStr(), LogService.logger()); }
configHolder.setName(name); logger = ComponentLogWriter.getHoplogLogWriter("HdfsStore:" + name, LogService.logger());
/** * @param config configuration source for creating this instance */ public HDFSStoreConfigHolder(HDFSStore config) { if (config == null) { // initialize default compaction strategy and leave the rest for getting // set later this.compactionConfig = AbstractHDFSCompactionConfigHolder.createInstance(null); return; } this.name = config.getName(); this.namenodeURL = config.getNameNodeURL(); this.homeDir = config.getHomeDir(); this.clientConfigFile = config.getHDFSClientConfigFile(); setHDFSCompactionConfig(config.getHDFSCompactionConfig()); this.blockCacheSize = config.getBlockCacheSize(); setHDFSEventQueueAttributes(config.getHDFSEventQueueAttributes()); this.maxFileSize = config.getMaxFileSize(); this.fileRolloverInterval = config.getFileRolloverInterval(); logger = ComponentLogWriter.getHoplogLogWriter(getName(), LogService.logger()); }