public Path getCurrentPath() { // if we've read some WAL entries, get the Path we read from WALEntryBatch batchQueueHead = entryBatchQueue.peek(); if (batchQueueHead != null) { return batchQueueHead.getLastWalPath(); } // otherwise, we must be currently reading from the head of the log queue return logQueue.peek(); }
private void handleEofException(IOException e) { if ((e instanceof EOFException || e.getCause() instanceof EOFException) && logQueue.size() > 1 && this.eofAutoRecovery) { try { if (fs.getFileStatus(logQueue.peek()).getLen() == 0) { LOG.warn("Forcing removal of 0 length log in queue: " + logQueue.peek()); logQueue.remove(); currentPosition = 0; } } catch (IOException ioe) { LOG.warn("Couldn't get file length information about log " + logQueue.peek()); } } }
private long getRecoveredQueueStartPos() { long startPosition = 0; String peerClusterZNode = source.getQueueId(); try { startPosition = this.replicationQueues.getWALPosition(source.getServer().getServerName(), peerClusterZNode, this.queue.peek().getName()); LOG.trace("Recovered queue started with log {} at position {}", this.queue.peek(), startPosition); } catch (ReplicationException e) { terminate("Couldn't get the position of this recovered queue " + peerClusterZNode, e); } return startPosition; }
private boolean openNextLog() throws IOException { Path nextPath = logQueue.peek(); if (nextPath != null) { openReader(nextPath); if (reader != null) { return true; } } else { // no more files in queue, this could happen for recovered queue, or for a wal group of a sync // replication peer which has already been transited to DA or S. setCurrentPath(null); } return false; }
private long scheduleDueTasks( long now ) { if ( delayedTasks.isEmpty() ) { // We have no tasks to run. Park until we're woken up by an enqueueTask() call. return NO_TASKS_PARK; } while ( !stopped && !delayedTasks.isEmpty() && delayedTasks.peek().nextDeadlineNanos <= now ) { ScheduledJobHandle task = delayedTasks.poll(); task.submitIfRunnable( pools ); } return delayedTasks.isEmpty() ? NO_TASKS_PARK : delayedTasks.peek().nextDeadlineNanos - now; }
expirationQueue.add(nextToExpire); nextToExpire = expirationQueue.peek(); if (nextToExpire != null) {
expirationQueue.add(nextToExpire); nextToExpire = expirationQueue.peek(); if (nextToExpire != null) {
QueueEntry queueEntry = null; try { queueEntry = this.queue.peek(); if ((queueEntry != null) && (Time.currentTimeMillis() >= queueEntry.endTimeMs)) {
Path walPath = walQueue.peek(); ReplicationSource source = mockReplicationSource(false, CONF); AtomicInteger invokeCount = new AtomicInteger(0);
@Test public void testReadBeyondCommittedLength() throws IOException, InterruptedException { appendToLog("1"); appendToLog("2"); long size = log.getLogFileSizeIfBeingWritten(walQueue.peek()).getAsLong(); AtomicLong fileLength = new AtomicLong(size - 1); try (WALEntryStream entryStream = new WALEntryStream(walQueue, CONF, 0, p -> OptionalLong.of(fileLength.get()), null, new MetricsSource("1"))) { assertTrue(entryStream.hasNext()); assertNotNull(entryStream.next()); // can not get log 2 assertFalse(entryStream.hasNext()); Thread.sleep(1000); entryStream.reset(); // still can not get log 2 assertFalse(entryStream.hasNext()); // can get log 2 now fileLength.set(size); entryStream.reset(); assertTrue(entryStream.hasNext()); assertNotNull(entryStream.next()); assertFalse(entryStream.hasNext()); } } }
@Test public void testReplicationSourceWALReaderRecovered() throws Exception { appendEntriesToLogAndSync(10); Path walPath = walQueue.peek(); log.rollWriter(); appendEntriesToLogAndSync(5); log.shutdown(); Configuration conf = new Configuration(CONF); conf.setInt("replication.source.nb.capacity", 10); ReplicationSourceWALReader reader = createReader(true, conf); WALEntryBatch batch = reader.take(); assertEquals(walPath, batch.getLastWalPath()); assertEquals(10, batch.getNbEntries()); assertFalse(batch.isEndOfFile()); batch = reader.take(); assertEquals(walPath, batch.getLastWalPath()); assertEquals(0, batch.getNbEntries()); assertTrue(batch.isEndOfFile()); walPath = walQueue.peek(); batch = reader.take(); assertEquals(walPath, batch.getLastWalPath()); assertEquals(5, batch.getNbEntries()); assertTrue(batch.isEndOfFile()); assertSame(WALEntryBatch.NO_MORE_DATA, reader.take()); }
@Test public void testReplicationSourceWALReaderWrongPosition() throws Exception { appendEntriesToLogAndSync(1); Path walPath = walQueue.peek(); log.rollWriter(); appendEntriesToLogAndSync(20); assertTrue(entryBatch.isEndOfFile()); Path walPath2 = walQueue.peek(); entryBatch = reader.take(); assertEquals(walPath2, entryBatch.getLastWalPath()); assertTrue(entryBatch.isEndOfFile()); Path walPath3 = walQueue.peek(); entryBatch = reader.take(); assertEquals(walPath3, entryBatch.getLastWalPath());
@Test public void testReplicationSourceWALReader() throws Exception { appendEntriesToLogAndSync(3); // get ending position long position; try (WALEntryStream entryStream = new WALEntryStream(walQueue, CONF, 0, log, null, new MetricsSource("1"))) { entryStream.next(); entryStream.next(); entryStream.next(); position = entryStream.getPosition(); } // start up a reader Path walPath = walQueue.peek(); ReplicationSourceWALReader reader = createReader(false, CONF); WALEntryBatch entryBatch = reader.take(); // should've batched up our entries assertNotNull(entryBatch); assertEquals(3, entryBatch.getWalEntries().size()); assertEquals(position, entryBatch.getLastWalPosition()); assertEquals(walPath, entryBatch.getLastWalPath()); assertEquals(3, entryBatch.getNbRowKeys()); appendToLog("foo"); entryBatch = reader.take(); assertEquals(1, entryBatch.getNbEntries()); assertEquals("foo", getRow(entryBatch.getWalEntries().get(0))); }
/** * read (but NOT remove) the first entry in the loss list * @return */ public ReceiverLossListEntry getFirstEntry(){ return backingList.peek(); }
/** * Calculate the time until the next available job. * * @return time until next job, 0 if one is one is scheduled to go, and some * really large number if there are no jobs to speak of */ private long nextJobTime() { assert _lock.isLocked(); Task job = _todo.peek(); if (job == null) return Long.MAX_VALUE; return Math.max(0, job.schedDate - System.currentTimeMillis()); }
/** * Calculate the time until the next available job. * * @return time until next job, 0 if one is one is scheduled to go, and some * really large number if there are no jobs to speak of */ private long nextJobTime() { assert _lock.isLocked(); Task job = _todo.peek(); if (job == null) return Long.MAX_VALUE; return Math.max(0, job.getScheduledDate() - System.currentTimeMillis()); }
@Override public T peek() { return this.unwrap(this.delegate.peek()); }
public synchronized int release(int index) { int count = 0; this.queue.add(index); while (!this.queue.isEmpty() && this.queue.peek() == tail) { this.queue.remove(); tail++; count++; } if (count > 0) semaphore.release(count); return count; } }
public synchronized int release(int index) { int count = 0; this.queue.add(index); while (!this.queue.isEmpty() && this.queue.peek() == tail) { this.queue.remove(); tail++; count++; } if (count > 0) semaphore.release(count); return count; } }
@Override public void call() { scheduledActionRef.set(null); long now = worker.now(); while (!taskQueue.isEmpty() && taskQueue.peek().getCleanupTime() <= now) { CleanupTask task = taskQueue.remove(); registryProxy.unregister(task.getHandler(), task.getCleanupTime()); } scheduleCleanupTask(); }