@Override public List<FieldType> getFieldTypes() throws InterruptedException { List<FieldType> fieldTypes = new ArrayList<FieldType>(); for (FieldType fieldType : getNameCache().values()) { fieldTypes.add(fieldType.clone()); } return fieldTypes; }
public FieldType getFieldTypeByNameReturnNull(QName name) throws InterruptedException { ArgumentValidator.notNull(name, "name"); FieldType fieldType = getNameCache().get(name); return fieldType != null ? fieldType.clone() : null; }
@Override public FieldType getFieldType(QName name) throws FieldTypeNotFoundException, InterruptedException { ArgumentValidator.notNull(name, "name"); FieldType fieldType = getNameCache().get(name); if (fieldType == null) { throw new FieldTypeNotFoundException(name); } return fieldType.clone(); }
@Override public FieldType getFieldType(SchemaId id) throws FieldTypeNotFoundException { ArgumentValidator.notNull(id, "id"); String bucket = AbstractSchemaCache.encodeHex(id.getBytes()); Map<SchemaId, FieldType> fieldTypeIdCacheBucket = buckets.get(bucket); if (fieldTypeIdCacheBucket == null) { throw new FieldTypeNotFoundException(id); } FieldType fieldType = fieldTypeIdCacheBucket.get(id); if (fieldType == null) { throw new FieldTypeNotFoundException(id); } return fieldType.clone(); }
/** * Update the cache to contain the new fieldType * * @param fieldType */ public void update(FieldType fieldType) { // Clone the FieldType to avoid changes to it while it is in the cache FieldType ftToCache = fieldType.clone(); SchemaId id = ftToCache.getId(); String bucketId = AbstractSchemaCache.encodeHex(id.getBytes()); // First increment the number of buckets that are being updated incCount(); // Get a lock on the bucket to be updated synchronized (getBucketMonitor(bucketId)) { Map<SchemaId, FieldType> bucket = buckets.get(bucketId); // If the bucket does not exist yet, create it if (bucket == null) { bucket = new ConcurrentHashMap<SchemaId, FieldType>(8, .75f, 1); buckets.put(bucketId, bucket); } bucket.put(id, ftToCache); // Mark that this fieldType is updated locally // and that the next refresh can be ignored // since this refresh can contain an old fieldType addToLocalUpdateBucket(id, bucketId); } // Decrement the number of buckets that are being updated again. decCount(); }
newFieldType = fieldType.clone(); newFieldType.setId(id);
FieldType newFieldType = fieldType.clone(); copyUnspecifiedFields(newFieldType, latestFieldType); checkImmutableFieldsCorrespond(newFieldType, latestFieldType); FieldType newFieldType = fieldType.clone(); copyUnspecifiedFields(newFieldType, latestFieldType); checkImmutableFieldsCorrespond(newFieldType, latestFieldType);
FieldType ftToCreate = fieldType.clone(); ftToCreate.setName(new QName(fieldType.getName().getNamespace(), fieldType.getName().getName() + i)); ImportResult<FieldType> result = FieldTypeImport.importFieldType(ftToCreate, ImportMode.CREATE_OR_UPDATE,