@Override public void setCcAppCtx(ICcApplicationContext appCtx) { this.appCtx = appCtx; node2PartitionsMap = appCtx.getMetadataProperties().getNodePartitions(); clusterPartitions = appCtx.getMetadataProperties().getClusterPartitions(); currentMetadataNode = appCtx.getMetadataProperties().getMetadataNodeName(); metadataPartition = node2PartitionsMap.get(currentMetadataNode)[0]; lifecycleCoordinator = appCtx.getNcLifecycleCoordinator(); lifecycleCoordinator.bindTo(this); }
@Override public MetadataProperties newMetadataProperties() { return new MetadataProperties(propertiesAccessor); }
public static AlgebricksAbsolutePartitionConstraint getPartitionConstraints(IApplicationContext appCtx, AlgebricksAbsolutePartitionConstraint clusterLocations) { if (clusterLocations == null) { IClusterStateManager clusterStateManager = ((ICcApplicationContext) appCtx).getClusterStateManager(); ArrayList<String> locs = new ArrayList<>(); Map<String, String[]> stores = appCtx.getMetadataProperties().getStores(); for (String node : stores.keySet()) { int numIODevices = clusterStateManager.getIODevices(node).length; for (int k = 0; k < numIODevices; k++) { locs.add(node); } } String[] cluster = new String[locs.size()]; cluster = locs.toArray(cluster); return new AlgebricksAbsolutePartitionConstraint(cluster); } return clusterLocations; }
@Override public synchronized void init() throws HyracksDataException { if (metadataNode != null && !rebindMetadataNode) { return; } try { metadataNode = proxies.iterator().next() .waitForMetadataNode(metadataProperties.getRegistrationTimeoutSecs(), TimeUnit.SECONDS); if (metadataNode != null) { rebindMetadataNode = false; } else { throw new HyracksDataException("The MetadataNode failed to bind before the configured timeout (" + metadataProperties.getRegistrationTimeoutSecs() + " seconds); the MetadataNode was " + "configured to run on NC: " + metadataProperties.getMetadataNodeName()); } } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw HyracksDataException.create(e); } catch (RemoteException e) { throw new RuntimeDataException(ErrorCode.REMOTE_EXCEPTION_WHEN_CALLING_METADATA_NODE, e); } super.init(); } }
metadataNodeName = metadataProperties.getMetadataNodeName(); nodeNames = metadataProperties.getNodeNames(); localResourceRepository = appContext.getLocalResourceRepository(); ioManager = ncServiceContext.getIoManager();
CcApplicationContext ccAppCtx = ccAppContext(csm); ccAppCtx.getMetadataProperties().getClusterPartitions().put(0, new ClusterPartition(0, NC1, 0)); ccAppCtx.getMetadataProperties().getClusterPartitions().put(1, new ClusterPartition(1, NC2, 0)); ccAppCtx.getMetadataProperties().getClusterPartitions().put(2, new ClusterPartition(2, NC3, 0)); for (ClusterPartition cp : ccAppCtx.getMetadataProperties().getClusterPartitions().values()) { ccAppCtx.getMetadataProperties().getNodePartitions().put(cp.getNodeId(), new ClusterPartition[] { cp });
IApplicationContext appCtx = (IApplicationContext) ctx.getJobletContext().getServiceContext().getApplicationContext(); ClusterPartition nodePartition = appCtx.getMetadataProperties().getNodePartitions().get(nodeId)[0]; parser = new ADMDataParser(outputType, true); tb = new ArrayTupleBuilder(1);
private FileSplit getDefaultOutputFileLocation(ICcApplicationContext appCtx) throws AlgebricksException { String outputDir = System.getProperty("java.io.tmpDir"); String filePath = outputDir + System.getProperty("file.separator") + OUTPUT_FILE_PREFIX + outputFileID.incrementAndGet(); MetadataProperties metadataProperties = appCtx.getMetadataProperties(); return new ManagedFileSplit(metadataProperties.getMetadataNodeName(), filePath); }
runtimeContext = new NCAppRuntimeContext(ncServiceCtx, getExtensions(), getPropertiesFactory()); MetadataProperties metadataProperties = runtimeContext.getMetadataProperties(); if (!metadataProperties.getNodeNames().contains(this.ncServiceCtx.getNodeId())) { if (LOGGER.isInfoEnabled()) { LOGGER.info("Substitute node joining : " + this.ncServiceCtx.getNodeId()); LOGGER.info("System state: " + recoveryMgr.getSystemState()); LOGGER.info("Node ID: " + nodeId); LOGGER.info("Stores: " + PrintUtil.toString(metadataProperties.getStores()));
private void updateOnNodeJoin() { MetadataProperties metadataProperties = runtimeContext.getMetadataProperties(); if (!metadataProperties.getNodeNames().contains(nodeId)) { NCConfig ncConfig = ((NodeControllerService) ncServiceCtx.getControllerService()).getConfiguration(); ncConfig.getConfigManager().ensureNode(nodeId); } }
@Override public void perform(CcId ccId, IControllerService cs) throws HyracksDataException { INcApplicationContext applicationContext = (INcApplicationContext) cs.getApplicationContext(); NCServiceContext serviceCtx = (NCServiceContext) cs.getContext(); MetadataProperties metadataProperties = applicationContext.getMetadataProperties(); LOGGER.info("Starting lifecycle components"); Map<String, String> lifecycleMgmtConfiguration = new HashMap<>(); String dumpPathKey = LifeCycleComponentManager.Config.DUMP_PATH_KEY; String dumpPath = metadataProperties.getCoredumpPath(serviceCtx.getNodeId()); lifecycleMgmtConfiguration.put(dumpPathKey, dumpPath); if (LOGGER.isInfoEnabled()) { LOGGER.info("Coredump directory for NC is: " + dumpPath); } ILifeCycleComponentManager lccm = serviceCtx.getLifeCycleComponentManager(); lccm.configure(lifecycleMgmtConfiguration); if (LOGGER.isInfoEnabled()) { LOGGER.info("Configured:" + lccm); } serviceCtx.setStateDumpHandler(new AsterixStateDumpHandler(serviceCtx.getNodeId(), lccm.getDumpPath(), lccm)); lccm.startAll(); }
datasetMemoryManager, indexCheckpointManagerProvider, ioManager.getIODevices().size()); final String nodeId = getServiceContext().getNodeId(); final ClusterPartition[] nodePartitions = metadataProperties.getNodePartitions().get(nodeId); final Set<Integer> nodePartitionsIds = Arrays.stream(nodePartitions).map(ClusterPartition::getPartitionId).collect(Collectors.toSet());
private MetadataProperties mockMetadataProperties() { SortedMap<Integer, ClusterPartition> clusterPartitions = Collections.synchronizedSortedMap(new TreeMap<>()); Map<String, ClusterPartition[]> nodePartitionsMap = new ConcurrentHashMap<>(); nodePartitionsMap.put(METADATA_NODE, new ClusterPartition[] { new ClusterPartition(0, METADATA_NODE, 0) }); MetadataProperties metadataProperties = Mockito.mock(MetadataProperties.class); Mockito.when(metadataProperties.getMetadataNodeName()).thenReturn(METADATA_NODE); Mockito.when(metadataProperties.getClusterPartitions()).thenReturn(clusterPartitions); Mockito.when(metadataProperties.getNodePartitions()).thenReturn(nodePartitionsMap); return metadataProperties; }
compilerProperties = new CompilerProperties(propertiesAccessor); externalProperties = new ExternalProperties(propertiesAccessor); metadataProperties = new MetadataProperties(propertiesAccessor); storageProperties = new StorageProperties(propertiesAccessor); txnProperties = new TransactionProperties(propertiesAccessor);
IClusterStateManager clusterStateManager = appCtx.getClusterStateManager(); ArrayList<String> locs = new ArrayList<>(); Set<String> stores = appCtx.getMetadataProperties().getStores().keySet(); if (stores.isEmpty()) { throw new AlgebricksException("Configurations don't have any stores");