@Override public void write(List<? extends DocumentWriteOperation> items) { for (DocumentWriteOperation op : items) { writeBatcher.add(op.getUri(), op.getMetadata(), op.getContent()); } }
private void closeAllHandles() throws Throwable { Throwable lastThrowable = null; for ( DocumentWriteOperation doc : writeSet.getWriteSet() ) { try { if ( doc.getContent() instanceof Closeable ) { ((Closeable) doc.getContent()).close(); } if ( doc.getMetadata() instanceof Closeable ) { ((Closeable) doc.getMetadata()).close(); } } catch (Throwable t) { logger.error("error calling close()", t); lastThrowable = t; } } if ( lastThrowable != null ) throw lastThrowable; } }
List<RequestParameters> headerList = new ArrayList<RequestParameters>(); for ( DocumentWriteOperation write : writeSet ) { String temporalDocumentURI = write.getTemporalDocumentURI(); HandleImplementation metadata = HandleAccessor.checkHandle(write.getMetadata(), "write"); HandleImplementation content = HandleAccessor.checkHandle(write.getContent(), "write"); String contentDispositionTemporal = ""; if (temporalDocumentURI != null) { if ( write.getOperationType() == DocumentWriteOperation.OperationType.DISABLE_METADATA_DEFAULT ) { RequestParameters headers = new RequestParameters(); headers.add(HEADER_CONTENT_TYPE, metadata.getMimetype()); headers.add(HEADER_CONTENT_DISPOSITION, DISPOSITION_TYPE_INLINE + "; category=metadata" + contentDispositionTemporal); headerList.add(headers); writeHandles.add(write.getMetadata()); } else if ( metadata != null ) { RequestParameters headers = new RequestParameters(); headers.add(HEADER_CONTENT_TYPE, metadata.getMimetype()); if ( write.getOperationType() == DocumentWriteOperation.OperationType.METADATA_DEFAULT ) { headers.add(HEADER_CONTENT_DISPOSITION, DISPOSITION_TYPE_INLINE + "; category=metadata" + contentDispositionTemporal); } else { String disposition = DISPOSITION_TYPE_ATTACHMENT + "; " + DISPOSITION_PARAM_FILENAME + "=" + escapeContentDispositionFilename(write.getUri()) + "; category=metadata" + contentDispositionTemporal; headers.add(HEADER_CONTENT_DISPOSITION, disposition); writeHandles.add(write.getMetadata()); DISPOSITION_PARAM_FILENAME + "=" + escapeContentDispositionFilename(write.getUri()) + contentDispositionTemporal; headers.add(HEADER_CONTENT_DISPOSITION, disposition);
@Override public WriteBatcher add(DocumentWriteOperation writeOperation) { if ( writeOperation.getUri() == null ) throw new IllegalArgumentException("uri must not be null"); if ( writeOperation.getContent() == null ) throw new IllegalArgumentException("contentHandle must not be null"); initialize(); requireNotStopped(); queue.add(writeOperation); logger.trace("add uri={}", writeOperation.getUri()); // if we have queued batchSize, it's time to flush a batch long recordNum = batchCounter.incrementAndGet(); boolean timeToWriteBatch = (recordNum % getBatchSize()) == 0; if ( timeToWriteBatch ) { BatchWriteSet writeSet = newBatchWriteSet(false); int i=0; for ( ; i < getBatchSize(); i++ ) { DocumentWriteOperation doc = queue.poll(); if ( doc != null ) { writeSet.getWriteSet().add(doc); } else { // strange, there should have been a full batch of docs in the queue... break; } } if ( writeSet.getWriteSet().size() > 0 ) { threadPool.submit( new BatchWriter(writeSet) ); } } return this; }
public void cleanupDocs(DocumentWriteSet docs) { client.newDocumentManager().delete(docs.stream().map(doc -> doc.getUri()).toArray(String[]::new)); }
public void testExceptions(WriteBatcher writeBatcher, DocumentWriteSet docs, int expectedSuccesses, int expectedFailures) { final AtomicInteger successfulBatchCount = new AtomicInteger(0); final AtomicInteger failureBatchCount = new AtomicInteger(0); writeBatcher .withBatchSize(1) .onBatchSuccess( batch -> successfulBatchCount.incrementAndGet() ) .onBatchFailure( (batch, throwable) -> failureBatchCount.incrementAndGet() ); moveMgr.startJob(writeBatcher); for ( DocumentWriteOperation doc : docs ) { writeBatcher.add(doc.getUri(), doc.getContent()); } // while batchSize=1 means all batches are queued, we still need to wait for them to finish writeBatcher.flushAndWait(); moveMgr.stopJob(writeBatcher); assertEquals(expectedSuccesses, successfulBatchCount.get()); assertEquals(expectedFailures, failureBatchCount.get()); }
@Override public Content adapt(DocumentWriteOperation operation) { String uri = operation.getUri(); ContentCreateOptions options = adaptMetadata(operation.getMetadata()); AbstractWriteHandle handle = operation.getContent(); if (handle instanceof StringHandle) { return ContentFactory.newContent(uri, ((StringHandle) handle).get(), options); } else if (handle instanceof FileHandle) { return ContentFactory.newContent(uri, ((FileHandle) handle).get(), options); } else if (handle instanceof BytesHandle) { return ContentFactory.newContent(uri, ((BytesHandle) handle).get(), options); } else if (handle instanceof InputStreamHandle) { try { return ContentFactory.newContent(uri, ((InputStreamHandle) handle).get(), options); } catch (IOException e) { throw new RuntimeException("Unable to read content input stream: " + e.getMessage(), e); } } else if (handle instanceof DOMHandle) { return ContentFactory.newContent(uri, ((DOMHandle) handle).get(), options); } else throw new IllegalArgumentException("No support yet for content class: " + handle.getClass().getName()); }
public WriteBatch getBatchOfWriteEvents() { WriteBatchImpl batch = new WriteBatchImpl() .withBatcher(batcher) .withClient(client) .withJobBatchNumber(batchNumber) .withJobWritesSoFar(itemsSoFar) .withJobTicket(batcher.getJobTicket()); WriteEvent[] writeEvents = getWriteSet().stream() .map(writeOperation -> new WriteEventImpl() .withTargetUri(writeOperation.getUri()) .withContent(writeOperation.getContent()) .withMetadata(writeOperation.getMetadata()) ) .toArray(WriteEventImpl[]::new); batch.withItems(writeEvents); return batch; } }