/** * Returns a parent by checking the TreeMap by name. */ public NodeSPI<K, V> getParent() { if (fqn.isRoot()) { return null; } return cache.peek(fqn.getParent(), true); }
/** * Ensures a parent node exists. * Calls recursively to initialize parents as necessary. */ private void ensureParent(Fqn name) throws IOException { if (name.size() <= 1) return; Fqn parent = name.getParent(); if (parents.contains(parent)) return; if (!exists(parent)) put0(parent, emptyMap()); parents.add(parent); if (parents.size() > PARENT_CACHE_SIZE) { parents.clear(); } }
public void rollback() { move(Fqn.fromRelativeElements(to, fqn.getLastElement()), fqn.getParent(), true, null); } }
public NodeSPI<K, V> getParent() { Fqn f = getFqn(); if (f.isRoot()) return this; return spi.getNode(f.getParent()); }
protected void undeleteWorkspaceNode(WorkspaceNode nodeToUndelete, TransactionWorkspace workspace) { undeleteWorkspaceNode(nodeToUndelete, workspace.getNode(nodeToUndelete.getFqn().getParent())); }
private void cleanEmptyDeadRegion(GlobalTransaction gtx, Fqn backupFqn) throws Throwable { // if this is a DIRECT child of a DEAD buddy backup region, then remove the empty dead region structural node. Fqn deadBackupRootFqn = null; if (buddyFqnTransformer.isDeadBackupFqn(backupFqn) && buddyFqnTransformer.isDeadBackupRoot(backup.getAncestor(backupFqn.size() - 2)) && !dataContainer.hasChildren((deadBackupRootFqn = backupFqn.getParent()))) { if (trace) log.trace("Removing dead backup region " + deadBackupRootFqn); executeRemove(gtx, deadBackupRootFqn); // now check the grand parent and see if we are free of versions deadBackupRootFqn = deadBackupRootFqn.getParent(); if (!dataContainer.hasChildren(deadBackupRootFqn)) { if (trace) log.trace("Removing dead backup region " + deadBackupRootFqn); executeRemove(gtx, deadBackupRootFqn); } } }
/** * Ensures a parent node exists. * Calls recursively to initialize parents as necessary. */ private void ensureParent(Fqn name) throws Exception { if (name.size() <= 1) return; Fqn parent = name.getParent(); boolean cache = config.getParentCache(); if (cache && parents.contains(parent)) return; // potential race condition between exists and put if (!exists(parent)) put0(parent, getDummy()); if (cache) { parents.add(parent); if (parents.size() > PARENT_CACHE_SIZE) { parents.clear(); } } ensureParent(parent); }
@Override public Object visitMoveCommand(InvocationContext ctx, MoveCommand command) throws Throwable { Object returnValue = super.visitMoveCommand(ctx, command); if (trace) { log.trace("This is a move operation; removing the FROM node from the loader, no activation processing needed."); } loader.remove(command.getFqn()); removeNodeFromCacheLoader(ctx, command.getFqn().getParent(), true); removeNodeFromCacheLoader(ctx, command.getTo(), true); return returnValue; }
private void addNewSubtree(Fqn name, Map attributes) throws Exception { if (getLogger().isTraceEnabled()) getLogger().trace("addNewSubtree name=" + name + " attr=" + attributes); Fqn currentNode = name; do { if (currentNode.equals(name)) { insertNode(currentNode, attributes, false); } else { insertNode(currentNode, null, true); } if (currentNode.isRoot()) break; currentNode = currentNode.getParent(); } while (!exists(currentNode)); }
@Override public Object visitMoveCommand(InvocationContext ctx, MoveCommand command) throws Throwable { Object returnValue = super.visitMoveCommand(ctx, command); if (trace) { log.trace("This is a move operation; removing the FROM node from the loader, no activation processing needed."); } loader.remove(command.getFqn()); removeNodeFromCacheLoader(ctx, command.getFqn().getParent(), true); removeNodeFromCacheLoader(ctx, command.getTo(), true); return returnValue; }
private void removeNode(Fqn fqn) { InternalNode targetNode = peekInternalNode(fqn, true); if (targetNode == null) return; InternalNode parentNode = peekInternalNode(fqn.getParent(), true); targetNode.setValid(false, false); if (parentNode != null) { parentNode.removeChild(fqn.getLastElement()); parentNode.setChildrenLoaded(false); } }
Fqn parentFqn = fqn.getParent(); NodeSPI parent = ctx.lookUpNode(parentFqn);
removeNodeFromCacheLoader(ctx, fqn.getParent(), false);
NodeSPI parent = peek(f.getParent(), true); return parent.removeChildDirect(n.getFqn().getLastElement());
InternalNode parent = peekInternalNode(f.getParent(), true); return parent.removeChild(n.getFqn().getLastElement());
NodeSPI parentNode = lookupForEviction(ctx, fqn.getParent());
InternalNode parentNode = lookupInAllScopes(ctx, fqn.getParent());
@Override public Object handleMoveCommand(InvocationContext ctx, MoveCommand command) throws Throwable { // Nodes we need to get WLs for: // 1) node we are moving FROM, and its parent and ALL children. Same as removeNode. List<Fqn> nodeAndChildren = helper.wrapNodesRecursivelyForRemoval(ctx, command.getFqn()); Fqn newParent = command.getTo(); Fqn oldParent = command.getFqn().getParent(); // 2) The new parent. helper.wrapNodeForWriting(ctx, newParent, true, true, false, false, false); if (!oldParent.equals(newParent)) { // the nodeAndChildren list contains all child nodes, including the node itself. // 3) now obtain locks on the new places these new nodes will occupy. for (Fqn f : nodeAndChildren) { Fqn newChildFqn = f.replaceAncestor(oldParent, newParent); helper.wrapNodeForWriting(ctx, newChildFqn, true, true, true, false, false); } } // now pass up the chain. return invokeNextInterceptor(ctx, command); }
private boolean removeNode(TransactionWorkspace workspace, WorkspaceNode workspaceNode, boolean notify, InvocationContext ctx) throws CacheException { // it is already removed - we can ignore it if (workspaceNode == null) return false; Fqn parentFqn = workspaceNode.getFqn().getParent(); WorkspaceNode parentNode = fetchWorkspaceNode(ctx, parentFqn, workspace, false, true); if (parentNode == null) throw new NodeNotExistsException("Unable to find parent node with fqn " + parentFqn); // pre-notify if (notify) notifier.notifyNodeRemoved(workspaceNode.getFqn(), true, workspaceNode.getData(), ctx); Fqn nodeFqn = workspaceNode.getFqn(); parentNode.removeChild(nodeFqn.getLastElement()); SortedMap<Fqn, WorkspaceNode> tailMap = workspace.getNodesAfter(workspaceNode.getFqn()); for (WorkspaceNode toDelete : tailMap.values()) { if (toDelete.getFqn().isChildOrEquals(nodeFqn)) { if (trace) log.trace("marking node " + toDelete.getFqn() + " as deleted"); toDelete.setRemoved(true); } else { break;// no more children, we came to the end } } // post-notify if (notify) notifier.notifyNodeRemoved(workspaceNode.getFqn(), false, null, ctx); return workspaceNode.getNode().isValid(); }
private ReadCommittedNode createAbsentNode(Fqn parentFqn, Fqn fqn, InvocationContext context) throws InterruptedException { parentFqn = fqn.getParent(); NodeSPI parent = wrapNodeForWriting(context, parentFqn, false, true, false, false, false); // do we need to lock the parent to create children? boolean parentLockNeeded = isParentLockNeeded(parent.getDelegationTarget()); // get a lock on the parent. if (parentLockNeeded && acquireLock(context, parentFqn)) { ReadCommittedNode parentRCN = (ReadCommittedNode) context.lookUpNode(parentFqn); parentRCN.markForUpdate(dataContainer, writeSkewCheck); } // now to lock and create the node. Lock first to prevent concurrent creation! acquireLock(context, fqn); InternalNode in = nodeFactory.createChildNode(fqn, null, context, false); ReadCommittedNode n = nodeFactory.createWrappedNode(in, parent.getDelegationTarget()); n.setCreated(true); n.setDataLoaded(true); // created here so we are loading it here context.putLookedUpNode(fqn, n); n.markForUpdate(dataContainer, writeSkewCheck); return n; }