return new AppenderatorImpl( schema, config,
throwPersistErrorIfExists(); try { commitLock.lock(); objectMapper.writeValue(computeCommitFile(), Committed.nil()); futures.add(abandonSegment(entry.getKey(), entry.getValue(), true));
@Override public Object startJob() { tuningConfig.getBasePersistDirectory().mkdirs(); lockBasePersistDirectory(); final Object retVal = bootstrapSinksFromDisk(); initializeExecutors(); resetNextFlush(); return retVal; }
private File createPersistDirIfNeeded(SegmentIdWithShardSpec identifier) throws IOException { final File persistDir = computePersistDir(identifier); FileUtils.forceMkdir(persistDir); objectMapper.writeValue(computeIdentifierFile(identifier), identifier); return persistDir; }
futures.add(abandonSegment(entry.getKey(), entry.getValue(), false)); shutdownExecutors(); Preconditions.checkState( persistExecutor == null || persistExecutor.awaitTermination(365, TimeUnit.DAYS), unlockBasePersistDirectory();
final File persistDir = computePersistDir(identifier); final File mergedTarget = new File(persistDir, "merged"); final File descriptorFile = computeDescriptorFile(identifier); removeDirectory(mergedTarget);
}; Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory()); appenderator.startJob(); Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory()); appenderator.add(IDENTIFIERS.get(0), IR("2000", "foo", 1), committerSupplier); Assert.assertEquals( 138 + nullHandlingOverhead, ((AppenderatorImpl) appenderator).getBytesInMemory(IDENTIFIERS.get(0)) ); Assert.assertEquals(1, ((AppenderatorImpl) appenderator).getRowsInMemory()); appenderator.add(IDENTIFIERS.get(1), IR("2000", "bar", 1), committerSupplier); Assert.assertEquals( 276 + 2 * nullHandlingOverhead, ((AppenderatorImpl) appenderator).getBytesCurrentlyInMemory() ); Assert.assertEquals(2, ((AppenderatorImpl) appenderator).getRowsInMemory()); appenderator.close(); Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory());
Assert.assertEquals(138 + nullHandlingOverhead, ((AppenderatorImpl) appenderator).getBytesCurrentlyInMemory()); appenderator.add(IDENTIFIERS.get(1), IR("2000", "bar", 1), committerSupplier); Assert.assertEquals( 276 + 2 * nullHandlingOverhead, ((AppenderatorImpl) appenderator).getBytesCurrentlyInMemory() ); appenderator.close(); Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory());
private void writeCommit(Committed newCommit) throws IOException { final File commitFile = computeCommitFile(); objectMapper.writeValue(commitFile, newCommit); }
private File computeDescriptorFile(SegmentIdWithShardSpec identifier) { return new File(computePersistDir(identifier), "descriptor.json"); }
@Override public ListenableFuture<?> drop(final SegmentIdWithShardSpec identifier) { final Sink sink = sinks.get(identifier); if (sink != null) { return abandonSegment(identifier, sink, true); } else { return Futures.immediateFuture(null); } }
private void lockBasePersistDirectory() { if (basePersistDirLock == null) { try { basePersistDirLockChannel = FileChannel.open( computeLockFile().toPath(), StandardOpenOption.CREATE, StandardOpenOption.WRITE ); basePersistDirLock = basePersistDirLockChannel.tryLock(); if (basePersistDirLock == null) { throw new ISE("Cannot acquire lock on basePersistDir: %s", computeLockFile()); } } catch (IOException e) { throw Throwables.propagate(e); } } }
final File persistDir = createPersistDirIfNeeded(identifier); final IndexSpec indexSpec = tuningConfig.getIndexSpec(); persistedFile = indexMerger.persist(
futures.add(abandonSegment(entry.getKey(), entry.getValue(), false)); shutdownExecutors(); Preconditions.checkState( persistExecutor == null || persistExecutor.awaitTermination(365, TimeUnit.DAYS), unlockBasePersistDirectory();
final File persistDir = computePersistDir(identifier); final File mergedTarget = new File(persistDir, "merged"); final File descriptorFile = computeDescriptorFile(identifier); removeDirectory(mergedTarget);
private File createPersistDirIfNeeded(SegmentIdentifier identifier) throws IOException { final File persistDir = computePersistDir(identifier); FileUtils.forceMkdir(persistDir); objectMapper.writeValue(computeIdentifierFile(identifier), identifier); return persistDir; }
private Committed readCommit() throws IOException { final File commitFile = computeCommitFile(); if (commitFile.exists()) { // merge current hydrants with existing hydrants return objectMapper.readValue(commitFile, Committed.class); } else { return null; } }
private File computeIdentifierFile(SegmentIdWithShardSpec identifier) { return new File(computePersistDir(identifier), IDENTIFIER_FILE_NAME); }
@Override public ListenableFuture<?> drop(final SegmentIdentifier identifier) { final Sink sink = sinks.get(identifier); if (sink != null) { return abandonSegment(identifier, sink, true); } else { return Futures.immediateFuture(null); } }
private void lockBasePersistDirectory() { if (basePersistDirLock == null) { try { basePersistDirLockChannel = FileChannel.open( computeLockFile().toPath(), StandardOpenOption.CREATE, StandardOpenOption.WRITE ); basePersistDirLock = basePersistDirLockChannel.tryLock(); if (basePersistDirLock == null) { throw new ISE("Cannot acquire lock on basePersistDir: %s", computeLockFile()); } } catch (IOException e) { throw Throwables.propagate(e); } } }