private static void insertInitialGroups(MetadataTransactionContext mdTxnCtx) throws AlgebricksException { List<String> metadataGroupNodeNames = new ArrayList<>(); metadataGroupNodeNames.add(metadataNodeName); NodeGroup groupRecord = new NodeGroup(MetadataConstants.METADATA_NODEGROUP_NAME, metadataGroupNodeNames); MetadataManager.INSTANCE.addNodegroup(mdTxnCtx, groupRecord); }
public NodeGroup dropNodeGroup(NodeGroup nodeGroup) { synchronized (nodeGroups) { return nodeGroups.remove(nodeGroup.getNodeGroupName()); } }
public static List<String> findNodes(MetadataTransactionContext mdTxnCtx, String nodeGroupName) throws AlgebricksException { return MetadataManager.INSTANCE.getNodegroup(mdTxnCtx, nodeGroupName).getNodeNames(); }
aString.setValue(instance.getNodeGroupName()); stringSerde.serialize(aString, tupleBuilder.getDataOutput()); tupleBuilder.addFieldEndOffset(); aString.setValue(instance.getNodeGroupName()); stringSerde.serialize(aString, fieldValue.getDataOutput()); recordBuilder.addField(MetadataRecordTypes.NODEGROUP_ARECORD_GROUPNAME_FIELD_INDEX, fieldValue); List<String> nodeNames = instance.getNodeNames(); for (String nodeName : nodeNames) { itemValue.reset();
public void dropNodeGroup(String nodeGroupName) { NodeGroup nodeGroup = new NodeGroup(nodeGroupName, null); droppedCache.addOrUpdateNodeGroup(nodeGroup); logAndApply(new MetadataLogicalOperation(nodeGroup, false)); }
public static INodeDomain findNodeDomain(IClusterStateManager clusterStateManager, MetadataTransactionContext mdTxnCtx, String nodeGroupName) throws AlgebricksException { NodeGroup nodeGroup = MetadataManager.INSTANCE.getNodegroup(mdTxnCtx, nodeGroupName); List<String> partitions = new ArrayList<>(); for (String location : nodeGroup.getNodeNames()) { int numPartitions = clusterStateManager.getNodePartitionsCount(location); for (int i = 0; i < numPartitions; i++) { partitions.add(location); } } return new DefaultNodeGroupDomain(partitions); }
public NodeGroup addOrUpdateNodeGroup(NodeGroup nodeGroup) { synchronized (nodeGroups) { return nodeGroups.put(nodeGroup.getNodeGroupName(), nodeGroup); } }
@Override public NodeGroup getMetadataEntityFromTuple(ITupleReference frameTuple) throws HyracksDataException { byte[] serRecord = frameTuple.getFieldData(NODEGROUP_PAYLOAD_TUPLE_FIELD_INDEX); int recordStartOffset = frameTuple.getFieldStart(NODEGROUP_PAYLOAD_TUPLE_FIELD_INDEX); int recordLength = frameTuple.getFieldLength(NODEGROUP_PAYLOAD_TUPLE_FIELD_INDEX); ByteArrayInputStream stream = new ByteArrayInputStream(serRecord, recordStartOffset, recordLength); DataInput in = new DataInputStream(stream); ARecord nodeGroupRecord = recordSerDes.deserialize(in); String gpName = ((AString) nodeGroupRecord.getValueByPos(MetadataRecordTypes.NODEGROUP_ARECORD_GROUPNAME_FIELD_INDEX)) .getStringValue(); IACursor cursor = ((AUnorderedList) nodeGroupRecord .getValueByPos(MetadataRecordTypes.NODEGROUP_ARECORD_NODENAMES_FIELD_INDEX)).getCursor(); List<String> nodeNames = new ArrayList<>(); while (cursor.next()) { nodeNames.add(((AString) cursor.get()).getStringValue()); } return new NodeGroup(gpName, nodeNames); }
/** * Calculate an estimate size of the bloom filter. Note that this is an * estimation which assumes that the data is going to be uniformly distributed * across all partitions. * * @param dataset * @return Number of elements that will be used to create a bloom filter per * dataset per partition * @throws AlgebricksException */ public long getCardinalityPerPartitionHint(Dataset dataset) throws AlgebricksException { String numElementsHintString = dataset.getHints().get(DatasetCardinalityHint.NAME); long numElementsHint; if (numElementsHintString == null) { numElementsHint = DatasetCardinalityHint.DEFAULT; } else { numElementsHint = Long.parseLong(numElementsHintString); } int numPartitions = 0; List<String> nodeGroup = MetadataManager.INSTANCE.getNodegroup(mdTxnCtx, dataset.getNodeGroupName()).getNodeNames(); IClusterStateManager csm = appCtx.getClusterStateManager(); for (String nd : nodeGroup) { numPartitions += csm.getNodePartitionsCount(nd); } return numElementsHint / numPartitions; }
@Override public void modifyNodeGroup(TxnId txnId, NodeGroup nodeGroup, Operation modificationOp) throws AlgebricksException, RemoteException { try { NodeGroupTupleTranslator tupleReaderWriter = tupleTranslatorProvider.getNodeGroupTupleTranslator(true); ITupleReference tuple = tupleReaderWriter.getTupleFromMetadataEntity(nodeGroup); modifyMetadataIndex(modificationOp, txnId, MetadataPrimaryIndexes.NODEGROUP_DATASET, tuple); } catch (HyracksDataException e) { if (e.getComponent().equals(ErrorCode.HYRACKS) && e.getErrorCode() == ErrorCode.DUPLICATE_KEY) { throw new AlgebricksException( "A nodegroup with name '" + nodeGroup.getNodeGroupName() + "' already exists.", e); } else { throw new AlgebricksException(e); } } }
/*** * Creates a node group that is associated with a new dataset. * * @param dataverseName, * the dataverse name of the dataset. * @param datasetName, * the name of the dataset. * @param rebalanceCount * , the rebalance count of the dataset. * @param ncNames, * the set of node names. * @param metadataProvider, * the metadata provider. * @return the name of the created node group. * @throws Exception */ public static String createNodeGroupForNewDataset(String dataverseName, String datasetName, long rebalanceCount, Set<String> ncNames, MetadataProvider metadataProvider) throws Exception { ICcApplicationContext appCtx = metadataProvider.getApplicationContext(); String nodeGroup = dataverseName + "." + datasetName + (rebalanceCount == 0L ? "" : "_" + rebalanceCount); MetadataTransactionContext mdTxnCtx = metadataProvider.getMetadataTxnContext(); appCtx.getMetadataLockManager().acquireNodeGroupWriteLock(metadataProvider.getLocks(), nodeGroup); NodeGroup ng = MetadataManager.INSTANCE.getNodegroup(mdTxnCtx, nodeGroup); if (ng != null) { nodeGroup = nodeGroup + "_" + UUID.randomUUID().toString(); appCtx.getMetadataLockManager().acquireNodeGroupWriteLock(metadataProvider.getLocks(), nodeGroup); } MetadataManager.INSTANCE.addNodegroup(mdTxnCtx, new NodeGroup(nodeGroup, new ArrayList<>(ncNames))); return nodeGroup; }
public static FileSplit[] getIndexSplits(Dataset dataset, String indexName, MetadataTransactionContext mdTxnCtx, IClusterStateManager csm) throws AlgebricksException { try { NodeGroup nodeGroup = MetadataManager.INSTANCE.getNodegroup(mdTxnCtx, dataset.getNodeGroupName()); if (nodeGroup == null) { throw new AlgebricksException("Couldn't find node group " + dataset.getNodeGroupName()); } List<String> nodeList = nodeGroup.getNodeNames(); return getIndexSplits(csm, dataset, indexName, nodeList); } catch (MetadataException me) { throw new AlgebricksException(me); } }
final List<String> ngNodes = Arrays.asList("asterix_nc1"); try { MetadataManager.INSTANCE.addNodegroup(mdTxn, new NodeGroup(nodeGroupName, ngNodes)); Thread.currentThread().interrupt(); MetadataManager.INSTANCE.commitTransaction(mdTxn);
protected void handleCreateNodeGroupStatement(MetadataProvider metadataProvider, Statement stmt) throws Exception { NodegroupDecl stmtCreateNodegroup = (NodegroupDecl) stmt; SourceLocation sourceLoc = stmtCreateNodegroup.getSourceLocation(); String ngName = stmtCreateNodegroup.getNodegroupName().getValue(); MetadataTransactionContext mdTxnCtx = MetadataManager.INSTANCE.beginTransaction(); metadataProvider.setMetadataTxnContext(mdTxnCtx); lockManager.acquireNodeGroupWriteLock(metadataProvider.getLocks(), ngName); try { NodeGroup ng = MetadataManager.INSTANCE.getNodegroup(mdTxnCtx, ngName); if (ng != null) { if (!stmtCreateNodegroup.getIfNotExists()) { throw new CompilationException(ErrorCode.COMPILATION_ERROR, sourceLoc, "A nodegroup with this name " + ngName + " already exists."); } } else { List<Identifier> ncIdentifiers = stmtCreateNodegroup.getNodeControllerNames(); List<String> ncNames = new ArrayList<>(ncIdentifiers.size()); for (Identifier id : ncIdentifiers) { ncNames.add(id.getValue()); } MetadataManager.INSTANCE.addNodegroup(mdTxnCtx, new NodeGroup(ngName, ncNames)); } MetadataManager.INSTANCE.commitTransaction(mdTxnCtx); } catch (Exception e) { abort(e, e, mdTxnCtx); throw e; } finally { metadataProvider.getLocks().unlock(); } }
@Test public void abortMetadataTxn() throws Exception { ICcApplicationContext appCtx = (ICcApplicationContext) integrationUtil.getClusterControllerService().getApplicationContext(); final MetadataProvider metadataProvider = new MetadataProvider(appCtx, null); final MetadataTransactionContext mdTxn = MetadataManager.INSTANCE.beginTransaction(); metadataProvider.setMetadataTxnContext(mdTxn); final String nodeGroupName = "ng"; try { final List<String> ngNodes = Arrays.asList("asterix_nc1"); MetadataManager.INSTANCE.addNodegroup(mdTxn, new NodeGroup(nodeGroupName, ngNodes)); MetadataManager.INSTANCE.abortTransaction(mdTxn); } finally { metadataProvider.getLocks().unlock(); } // ensure that the node group was not added final MetadataTransactionContext readMdTxn = MetadataManager.INSTANCE.beginTransaction(); try { final NodeGroup nodegroup = MetadataManager.INSTANCE.getNodegroup(readMdTxn, nodeGroupName); if (nodegroup != null) { throw new AssertionError("nodegroup was found after metadata txn was aborted"); } } finally { MetadataManager.INSTANCE.commitTransaction(readMdTxn); } }
final List<String> ngNodes = Collections.singletonList("asterix_nc1"); try { MetadataManager.INSTANCE.addNodegroup(mdTxn, new NodeGroup(nodeGroupName, ngNodes)); MetadataManager.INSTANCE.commitTransaction(mdTxn); } finally { MetadataManager.INSTANCE.addNodegroup(committedMdTxn, new NodeGroup(committedNodeGroup, ngNodes)); MetadataManager.INSTANCE.commitTransaction(committedMdTxn); opTracker.setFlushOnExit(true);