protected void dumpKylinPropsAndMetadata(String prj, Set<String> dumpList, KylinConfig kylinConfig, Configuration conf) throws IOException { File tmp = File.createTempFile("kylin_job_meta", ""); FileUtils.forceDelete(tmp); // we need a directory, so delete the file first File metaDir = new File(tmp, "meta"); metaDir.mkdirs(); // write kylin.properties File kylinPropsFile = new File(metaDir, "kylin.properties"); kylinConfig.exportToFile(kylinPropsFile); if (prj != null) { dumpList.add(ProjectManager.getInstance(kylinConfig).getProject(prj).getResourcePath()); } if (prj != null) { dumpList.add(ProjectManager.getInstance(kylinConfig).getProject(prj).getResourcePath()); } // write resources JobRelatedMetaUtil.dumpResources(kylinConfig, metaDir, dumpList); // hadoop distributed cache String hdfsMetaDir = OptionsHelper.convertToFileURL(metaDir.getAbsolutePath()); if (hdfsMetaDir.startsWith("/")) // note Path on windows is like "d:/../..." hdfsMetaDir = "file://" + hdfsMetaDir; else hdfsMetaDir = "file:///" + hdfsMetaDir; logger.info("HDFS meta dir is: " + hdfsMetaDir); appendTmpFiles(hdfsMetaDir, conf); }
public static Set<String> collectCubeMetadata(CubeInstance cube) { // cube, model_desc, cube_desc, table Set<String> dumpList = new LinkedHashSet<>(); dumpList.add(cube.getResourcePath()); dumpList.add(cube.getDescriptor().getModel().getResourcePath()); dumpList.add(cube.getDescriptor().getResourcePath()); dumpList.add(cube.getProjectInstance().getResourcePath()); for (TableRef tableRef : cube.getDescriptor().getModel().getAllTables()) { TableDesc table = tableRef.getTableDesc(); dumpList.add(table.getResourcePath()); dumpList.addAll(SourceManager.getMRDependentResources(table)); } return dumpList; }
private void requireProject(ProjectInstance projectInstance) throws IOException { addRequired(projectInstance.getResourcePath()); List<RealizationEntry> realizationEntries = projectInstance.getRealizationEntries(); for (RealizationEntry realizationEntry : realizationEntries) { retrieveResourcePath(getRealization(realizationEntry)); } List<DataModelDesc> modelDescs = metadataManager.getModels(projectInstance.getName()); for (DataModelDesc modelDesc : modelDescs) { addRequired(DataModelDesc.concatResourcePath(modelDesc.getName())); } addOptional(badQueryHistoryManager.getBadQueriesForProject(projectInstance.getName()).getResourcePath()); }
private void saveResource(ProjectInstance prj) throws IOException { ResourceStore store = getStore(); store.putResource(prj.getResourcePath(), prj, PROJECT_SERIALIZER); prj = reloadProjectAt(prj.getResourcePath()); projectMap.put(norm(prj.getName()), prj); // triggers update broadcast clearL2Cache(); }
private void deleteResource(ProjectInstance proj) throws IOException { ResourceStore store = getStore(); store.deleteResource(proj.getResourcePath()); projectMap.remove(norm(proj.getName())); clearL2Cache(); }
store.putResource(newPrj.getResourcePath(), newPrj, ProjectManager.PROJECT_SERIALIZER); updatedResources.add(path); } catch (IOException e) {
protected void dumpKylinPropsAndMetadata(String prj, Set<String> dumpList, KylinConfig kylinConfig, Configuration conf) throws IOException { File tmp = File.createTempFile("kylin_job_meta", ""); FileUtils.forceDelete(tmp); // we need a directory, so delete the file first File metaDir = new File(tmp, "meta"); metaDir.mkdirs(); // write kylin.properties File kylinPropsFile = new File(metaDir, "kylin.properties"); kylinConfig.exportToFile(kylinPropsFile); if (prj != null) { dumpList.add(ProjectManager.getInstance(kylinConfig).getProject(prj).getResourcePath()); } if (prj != null) { dumpList.add(ProjectManager.getInstance(kylinConfig).getProject(prj).getResourcePath()); } // write resources JobRelatedMetaUtil.dumpResources(kylinConfig, metaDir, dumpList); // hadoop distributed cache String hdfsMetaDir = OptionsHelper.convertToFileURL(metaDir.getAbsolutePath()); if (hdfsMetaDir.startsWith("/")) // note Path on windows is like "d:/../..." hdfsMetaDir = "file://" + hdfsMetaDir; else hdfsMetaDir = "file:///" + hdfsMetaDir; logger.info("HDFS meta dir is: " + hdfsMetaDir); appendTmpFiles(hdfsMetaDir, conf); }
private void saveResource(ProjectInstance prj) throws IOException { ResourceStore store = getStore(); store.putResource(prj.getResourcePath(), prj, PROJECT_SERIALIZER); prj = reloadProjectAt(prj.getResourcePath()); projectMap.put(norm(prj.getName()), prj); // triggers update broadcast clearL2Cache(); }
public static Set<String> collectCubeMetadata(CubeInstance cube) { // cube, model_desc, cube_desc, table Set<String> dumpList = new LinkedHashSet<>(); dumpList.add(cube.getResourcePath()); dumpList.add(cube.getDescriptor().getModel().getResourcePath()); dumpList.add(cube.getDescriptor().getResourcePath()); dumpList.add(cube.getProjectInstance().getResourcePath()); for (TableRef tableRef : cube.getDescriptor().getModel().getAllTables()) { TableDesc table = tableRef.getTableDesc(); dumpList.add(table.getResourcePath()); dumpList.addAll(SourceManager.getMRDependentResources(table)); } return dumpList; }
private void deleteResource(ProjectInstance proj) throws IOException { ResourceStore store = getStore(); store.deleteResource(proj.getResourcePath()); projectMap.remove(norm(proj.getName())); clearL2Cache(); }
store.putResource(newPrj.getResourcePath(), newPrj, ProjectManager.PROJECT_SERIALIZER); updatedResources.add(path); } catch (IOException e) {
private void requireProject(ProjectInstance projectInstance) throws IOException { addRequired(projectInstance.getResourcePath()); List<RealizationEntry> realizationEntries = projectInstance.getRealizationEntries(); for (RealizationEntry realizationEntry : realizationEntries) { retrieveResourcePath(getRealization(realizationEntry)); } List<DataModelDesc> modelDescs = metadataManager.getModels(projectInstance.getName()); for (DataModelDesc modelDesc : modelDescs) { addRequired(DataModelDesc.concatResourcePath(modelDesc.getName())); } addOptional(badQueryHistoryManager.getBadQueriesForProject(projectInstance.getName()).getResourcePath()); }