protected void collectUnusedHdfsFiles(UnusedHdfsFileCollector collector) throws IOException { if (StringUtils.isNotEmpty(config.getHBaseClusterFs())) { cleanUnusedHdfsFiles(hbaseFs, collector); } cleanUnusedHdfsFiles(defaultFs, collector); }
String hbaseClusterFs = kylinConf.getHBaseClusterFs(); if (StringUtils.isNotEmpty(hbaseClusterFs)) { conf.set(FileSystem.FS_DEFAULT_NAME_KEY, hbaseClusterFs);
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { try { config = new JobEngineConfig(context.getConfig()); List<String> toDeletePaths = getDeletePaths(); dropHdfsPathOnCluster(toDeletePaths, HadoopUtil.getWorkingFileSystem()); if (StringUtils.isNotEmpty(context.getConfig().getHBaseClusterFs())) { dropHdfsPathOnCluster(toDeletePaths, FileSystem.get(HBaseConnection.getCurrentHBaseConfiguration())); } } catch (IOException e) { logger.error("job:" + getId() + " execute finished with exception", e); output.append("\n").append(e.getLocalizedMessage()); return new ExecuteResult(ExecuteResult.State.ERROR, output.toString(), e); } return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString()); }
public static Configuration newHBaseConfiguration(String url) { Configuration conf = HBaseConfiguration.create(getCurrentConfiguration()); // using a hbase:xxx URL is deprecated, instead hbase config is always loaded from hbase-site.xml in classpath if (!(StringUtils.isEmpty(url) || "hbase".equals(url))) throw new IllegalArgumentException("to use hbase storage, pls set 'kylin.storage.url=hbase' in kylin.properties"); // support hbase using a different FS String hbaseClusterFs = KylinConfig.getInstanceFromEnv().getHBaseClusterFs(); if (StringUtils.isNotEmpty(hbaseClusterFs)) { conf.set(FileSystem.FS_DEFAULT_NAME_KEY, hbaseClusterFs); } // reduce rpc retry conf.set(HConstants.HBASE_CLIENT_PAUSE, "3000"); conf.set(HConstants.HBASE_CLIENT_RETRIES_NUMBER, "5"); conf.set(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, "60000"); // conf.set(ScannerCallable.LOG_SCANNER_ACTIVITY, "true"); return healSickConfig(conf); }
String hbaseClusterFs = kylinConf.getHBaseClusterFs(); if (StringUtils.isNotEmpty(hbaseClusterFs)) { conf.set(FileSystem.FS_DEFAULT_NAME_KEY, hbaseClusterFs);
private void dropHdfsPath(ExecutableContext context) throws IOException { List<String> oldHdfsPaths = this.getOldHdfsPaths(); FileSystem fileSystem = FileSystem.get(HadoopUtil.getCurrentConfiguration()); dropHdfsPathOnCluster(oldHdfsPaths, fileSystem); if (StringUtils.isNotEmpty(context.getConfig().getHBaseClusterFs())) { fileSystem = FileSystem.get(HadoopUtil.getCurrentHBaseConfiguration()); dropHdfsPathOnCluster(oldHdfsPaths, fileSystem); } }
@Override protected ExecuteResult doWork(ExecutableContext context) throws ExecuteException { try { config = new JobEngineConfig(context.getConfig()); List<String> toDeletePaths = getDeletePaths(); dropHdfsPathOnCluster(toDeletePaths, HadoopUtil.getWorkingFileSystem()); if (StringUtils.isNotEmpty(context.getConfig().getHBaseClusterFs())) { dropHdfsPathOnCluster(toDeletePaths, FileSystem.get(HBaseConnection.getCurrentHBaseConfiguration())); } } catch (IOException e) { logger.error("job:" + getId() + " execute finished with exception", e); output.append("\n").append(e.getLocalizedMessage()); return new ExecuteResult(ExecuteResult.State.ERROR, output.toString(), e); } return new ExecuteResult(ExecuteResult.State.SUCCEED, output.toString()); }