/** * Key for a cache object is built from all the known Authorities (which can change dynamically so they must all be * used) the NodeRef ID and the permission reference itself. This gives a unique key for each permission test. */ Serializable generateKey(Set<String> auths, NodeRef nodeRef, PermissionReference perm, CacheType type) { LinkedHashSet<Serializable> key = new LinkedHashSet<Serializable>(); key.add(perm.toString()); // We will just have to key our dynamic sets by username. We wrap it so as not to be confused with a static set if (auths instanceof AuthorityServiceImpl.UserAuthoritySet) { key.add((Serializable)Collections.singleton(((AuthorityServiceImpl.UserAuthoritySet)auths).getUsername())); } else { key.addAll(auths); } key.add(nodeRef); // Ensure some concept of node version or transaction is included in the key so we can track without cache replication NodeRef.Status nodeStatus = nodeService.getNodeStatus(nodeRef); key.add(nodeStatus == null ? "null" : nodeStatus.getChangeTxnId()); key.add(type); return key; }
@SuppressWarnings("deprecation") protected NodeRef convertVersionNodeRefToVersionedNodeRef(NodeRef versionNodeRef) { Status status = nodeDAO.getNodeRefStatus(versionNodeRef); if (status == null) { return versionNodeRef; } Map<QName, Serializable> properties = nodeDAO.getNodeProperties(status.getDbId()); NodeRef nodeRef = null; // Switch VersionStore depending on configured impl if (versionNodeRef.getStoreRef().getIdentifier().equals(Version2Model.STORE_ID)) { // V2 version store (eg. workspace://version2Store) nodeRef = (NodeRef)properties.get(Version2Model.PROP_QNAME_FROZEN_NODE_REF); } else if (versionNodeRef.getStoreRef().getIdentifier().equals(VersionModel.STORE_ID)) { // Deprecated V1 version store (eg. workspace://lightWeightVersionStore) nodeRef = new NodeRef((String) properties.get(VersionModel.PROP_QNAME_FROZEN_NODE_STORE_PROTOCOL), (String) properties.get(VersionModel.PROP_QNAME_FROZEN_NODE_STORE_ID), (String) properties.get(VersionModel.PROP_QNAME_FROZEN_NODE_ID)); } return nodeRef; }
Long childNodeId = nodeIds.get(childNodeRef); NodeRef.Status childNodeStatus = nodeDAO.getNodeRefStatus(childNodeRef); if (childNodeStatus == null || childNodeStatus.isDeleted())
NodeRef nodeRef = status.getNodeRef(); nodeMetaData.setTxnId(status.getDbTxnId()); if(status.isDeleted()) nodeMetaData.setAclId(nodeDAO.getNodeAclId(unversionedStatus.getDbId())); categoryPaths = getCategoryPaths(status.getNodeRef(), aspects, props); List<Path> directPaths = nodeDAO.getPaths(new Pair<Long, NodeRef>(nodeId, status.getNodeRef()), false); Collection<Pair<Path, QName>> paths = new ArrayList<Pair<Path, QName>>(directPaths.size() + categoryPaths.getPaths().size()); List<Path> unversionedPaths = nodeDAO.getPaths(new Pair<Long, NodeRef>(unversionedStatus.getDbId(), unversionedStatus.getNodeRef()), false); for (Path path : unversionedPaths) nodeMetaData.setOwner(ownableService.getOwner(status.getNodeRef()));
NodeRef nodeRef = status.getNodeRef(); nodeMetaData.setTxnId(status.getDbTxnId()); if(status.isDeleted()) nodeMetaData.setAclId(nodeDAO.getNodeAclId(unversionedStatus.getDbId())); categoryPaths = getCategoryPaths(status.getNodeRef(), aspects, props); List<Path> directPaths = nodeDAO.getPaths(new Pair<Long, NodeRef>(nodeId, status.getNodeRef()), false); Collection<Pair<Path, QName>> paths = new ArrayList<Pair<Path, QName>>(directPaths.size() + categoryPaths.getPaths().size()); List<Path> unversionedPaths = nodeDAO.getPaths(new Pair<Long, NodeRef>(unversionedStatus.getDbId(), unversionedStatus.getNodeRef()), false); for (Path path : unversionedPaths) nodeMetaData.setOwner(ownableService.getOwner(status.getNodeRef()));
if (status.isDeleted())
if (status.isDeleted())
assertFalse("'Live' node " + i + " should be node be deleted", liveStatus.isDeleted()); assertNull("'Archived' node " + i + " should not (yet) exist.", archivedStatus); txnIdCreate = liveStatus.getDbTxnId(); txnIdCreate, liveStatus.getDbTxnId()); assertTrue("'Live' node " + i + " should be deleted (ghost entries)", liveStatus.isDeleted()); assertNotNull("'Archived' node " + i + " does not exist.", archivedStatus); assertFalse("'Archived' node " + i + " should be undeleted", archivedStatus.isDeleted()); txnIdDelete = liveStatus.getDbTxnId(); txnIdDelete, liveStatus.getDbTxnId()); txnIdDelete, archivedStatus.getDbTxnId()); assertFalse("'Live' node " + i + " should not be deleted", liveStatus.isDeleted()); assertNotNull("'Archived' node " + i + " does not exist.", archivedStatus); assertTrue("'Archived' node " + i + " should be deleted (ghost entry)", archivedStatus.isDeleted()); txnIdRestore = liveStatus.getDbTxnId(); txnIdRestore, liveStatus.getDbTxnId()); txnIdRestore, archivedStatus.getDbTxnId());
Status status = nodeService.getNodeStatus(nodeRef4); Long lastCascadeTx = (Long)nodeService.getProperty(nodeRef4, ContentModel.PROP_CASCADE_TX); assertTrue(status.getDbTxnId().equals(lastCascadeTx)); assertTrue(nodeService.getProperty(nodeRef4, ContentModel.PROP_CASCADE_CRC) != null); Long crcIn3 = (Long)nodeService.getProperty(nodeRef4, ContentModel.PROP_CASCADE_CRC);
while ((nodeId = toVisit.pollFirst()) != null) if (visited.add(nodeId) && (nodeDAO.getNodeIdStatus(nodeId) != null) && (false == nodeDAO.getNodeIdStatus(nodeId).isDeleted()))
Long childNodeId = nodeIds.get(childNodeRef); NodeRef.Status childNodeStatus = nodeDAO.getNodeRefStatus(childNodeRef); if (childNodeStatus == null || childNodeStatus.isDeleted())
while ((nodeId = toVisit.pollFirst()) != null) if (visited.add(nodeId) && (nodeDAO.getNodeIdStatus(nodeId) != null) && (false == nodeDAO.getNodeIdStatus(nodeId).isDeleted()))
String currentTxnId = AlfrescoTransactionSupport.getTransactionId(); assertNotNull(currentTxnId); assertNotSame(currentTxnId, currentStatus.getChangeTxnId()); try assertNotNull(newStatus); assertEquals("Change didn't update status", currentTxnId, newStatus.getChangeTxnId()); Long nodeId = newStatus.getDbId(); nodeDAO.getParentAssocs(nodeId, null, null, null, new DummyChildAssocRefQueryCallback()); nodeDAO.cacheNodesById(Collections.singletonList(nodeId));
@SuppressWarnings("deprecation") protected NodeRef convertVersionNodeRefToVersionedNodeRef(NodeRef versionNodeRef) { Status status = nodeDAO.getNodeRefStatus(versionNodeRef); if (status == null) { return versionNodeRef; } Map<QName, Serializable> properties = nodeDAO.getNodeProperties(status.getDbId()); NodeRef nodeRef = null; // Switch VersionStore depending on configured impl if (versionNodeRef.getStoreRef().getIdentifier().equals(Version2Model.STORE_ID)) { // V2 version store (eg. workspace://version2Store) nodeRef = (NodeRef)properties.get(Version2Model.PROP_QNAME_FROZEN_NODE_REF); } else if (versionNodeRef.getStoreRef().getIdentifier().equals(VersionModel.STORE_ID)) { // Deprecated V1 version store (eg. workspace://lightWeightVersionStore) nodeRef = new NodeRef((String) properties.get(VersionModel.PROP_QNAME_FROZEN_NODE_STORE_PROTOCOL), (String) properties.get(VersionModel.PROP_QNAME_FROZEN_NODE_STORE_ID), (String) properties.get(VersionModel.PROP_QNAME_FROZEN_NODE_ID)); } return nodeRef; }
/** * Key for a cache object is built from all the known Authorities (which can change dynamically so they must all be * used) the NodeRef ID and the permission reference itself. This gives a unique key for each permission test. */ Serializable generateKey(Set<String> auths, NodeRef nodeRef, PermissionReference perm, CacheType type) { LinkedHashSet<Serializable> key = new LinkedHashSet<Serializable>(); key.add(perm.toString()); // We will just have to key our dynamic sets by username. We wrap it so as not to be confused with a static set if (auths instanceof AuthorityServiceImpl.UserAuthoritySet) { key.add((Serializable)Collections.singleton(((AuthorityServiceImpl.UserAuthoritySet)auths).getUsername())); } else { key.addAll(auths); } key.add(nodeRef); // Ensure some concept of node version or transaction is included in the key so we can track without cache replication NodeRef.Status nodeStatus = nodeService.getNodeStatus(nodeRef); key.add(nodeStatus == null ? "null" : nodeStatus.getChangeTxnId()); key.add(type); return key; }
public Collection<NodeRef> getNextWork() { if(val != null) { Long aspectQNameId = val.getFirst(); result.clear(); while (result.isEmpty() && minSearchNodeId < maxNodeId) { List<Long> nodeids = getPatchDAO().getNodesByAspectQNameId(aspectQNameId, minSearchNodeId, maxSearchNodeId); for(Long nodeid : nodeids) { NodeRef.Status status = getNodeDAO().getNodeIdStatus(nodeid); if(!status.isDeleted()) { result.add(status.getNodeRef()); } } minSearchNodeId = minSearchNodeId + count; maxSearchNodeId = maxSearchNodeId + count; } } return result; } };
public Long execute() throws Throwable { PropertyMap props = new PropertyMap(); props.put(ContentModel.PROP_NAME, "Container100Nodes"); NodeRef container = nodeService.createNode( rootNodeRef, ContentModel.ASSOC_CHILDREN, ContentModel.ASSOC_CHILDREN, ContentModel.TYPE_FOLDER, props).getChildRef(); setExpectedNodeStatus(container, NodeStatus.UPDATED); for(int i = 0; i < 99; i++) { FileInfo contentInfo = fileFolderService.create(container, "Content" + i, ContentModel.TYPE_CONTENT); NodeRef nodeRef = contentInfo.getNodeRef(); setExpectedNodeStatus(nodeRef, NodeStatus.UPDATED); } return nodeDAO.getNodeRefStatus(container).getDbTxnId(); } }));
public Collection<NodeRef> getNextWork() { if(val != null) { Long aspectQNameId = val.getFirst(); result.clear(); while (result.isEmpty() && minSearchNodeId < maxNodeId) { List<Long> nodeids = getPatchDAO().getNodesByAspectQNameId(aspectQNameId, minSearchNodeId, maxSearchNodeId); for(Long nodeid : nodeids) { NodeRef.Status status = getNodeDAO().getNodeIdStatus(nodeid); if(!status.isDeleted()) { result.add(status.getNodeRef()); } } minSearchNodeId = minSearchNodeId + count; maxSearchNodeId = maxSearchNodeId + count; } } return result; } };
public Object execute() { // check n6 NodeRef.Status n6Status = nodeDAO.getNodeRefStatus(n6Ref); if (!n6Status.isDeleted()) { throw new RuntimeException("Deleted node does not have deleted status"); } // n8 is a primary child - it should be deleted too NodeRef.Status n8Status = nodeDAO.getNodeRefStatus(n8Ref); if (!n8Status.isDeleted()) { throw new RuntimeException("Cascade-deleted node does not have deleted status"); } return null; } };
public Long execute() throws Throwable { PropertyMap props = new PropertyMap(); props.put(ContentModel.PROP_NAME, "ContainerResidual"); container = nodeService.createNode( rootNodeRef, ContentModel.ASSOC_CHILDREN, ContentModel.ASSOC_CHILDREN, ContentModel.TYPE_FOLDER, props).getChildRef(); FileInfo contentInfo = fileFolderService.create(container, "ContentResidual", ContentModel.TYPE_CONTENT); content = contentInfo.getNodeRef(); nodeService.setProperty(content, QName.createQName("{rubbish}rubbish"), "Rubbish"); return nodeDAO.getNodeRefStatus(container).getDbTxnId(); } }));