foundLock.wait(1000); checkForPrimary();
public HoplogOrganizer<?> getHoplogOrganizer() throws HDFSIOException { HoplogOrganizer<?> organizer = hoplog.get(); if (organizer == null) { synchronized (getBucketAdvisor()) { checkForPrimary(); try { organizer = createHoplogOrganizer(); } catch (IOException e) { throw new HDFSIOException("Failed to create Hoplog organizer due to ", e); } if (organizer == null) { throw new HDFSIOException("Hoplog organizer is not available for " + this); } } } return organizer; }
public HoplogOrganizer<?> getHoplogOrganizer() throws HDFSIOException { HoplogOrganizer<?> organizer = hoplog.get(); if (organizer == null) { synchronized (getBucketAdvisor()) { checkForPrimary(); try { organizer = createHoplogOrganizer(); } catch (IOException e) { throw new HDFSIOException("Failed to create Hoplog organizer due to ", e); } if (organizer == null) { throw new HDFSIOException("Hoplog organizer is not available for " + this); } } } return organizer; }
private void advanceHdfs() { if (hdfsNext = hdfs.hasNext()) { try { this.currentHdfsKey = hdfs.next(); } catch (IOException e) { region.checkForPrimary(); throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(e.getMessage()), e); } } else { this.currentHdfsKey = null; hdfs.close(); } }
private void waitForLock(final LockObject foundLock, final LogWriterI18n logger, final String title) { { { synchronized(foundLock) { try { while (!foundLock.isRemoved()) { this.partitionedRegion.checkReadiness(); foundLock.wait(1000); // primary could be changed by prRebalancing while waiting here checkForPrimary(); } } catch (InterruptedException e) { // TODO this isn't a localizable string and it's being logged at info level logger.info(LocalizedStrings.DEBUG, title+" interrupted while waiting for "+foundLock+":"+e.getMessage()); } if (logger.fineEnabled()) { long waitTime = System.currentTimeMillis()-foundLock.lockedTimeStamp; logger.fine(title+" waited " + waitTime + " ms to lock "+foundLock); } } } } }
private RegionEntry getFromHDFS(Object key, byte[] k, boolean forceOnHeap) throws IOException, ForceReattemptException { SortedHoplogPersistedEvent ev; try { ev = (SortedHoplogPersistedEvent) owner.getHoplogOrganizer().read(k); } catch (IOException e) { owner.checkForPrimary(); throw e; } if (ev != null) { if (logger.isTraceEnabled() || DEBUG) { logger.info(LocalizedMessage.create(LocalizedStrings.DEBUG, "HDFS: got from hdfs ev:" + ev)); } return getEntryFromEvent(key, ev, forceOnHeap, false); } return null; }
private void advanceHdfs() { if (hdfsNext = hdfs.hasNext()) { try { this.currentHdfsKey = hdfs.next(); } catch (IOException e) { region.checkForPrimary(); throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(e.getMessage()), e); } } else { this.currentHdfsKey = null; hdfs.close(); } }
/** * lock this bucket and, if present, its colocated "parent" * @param tryLock - whether to use tryLock (true) or a blocking lock (false) * @return true if locks were obtained and are still held */ public boolean doLockForPrimary(boolean tryLock) { boolean locked = lockPrimaryStateReadLock(tryLock); if(!locked) { return false; } boolean isPrimary = false; try { // Throw a PrimaryBucketException if this VM is assumed to be the // primary but isn't, preventing update and distribution checkForPrimary(); if (cache.isCacheAtShutdownAll()) { throw new CacheClosedException("Cache is shutting down"); } isPrimary = true; } finally { if(!isPrimary) { doUnlockForPrimary(); } } return true; }
private RegionEntry getFromHDFS(Object key, byte[] k, boolean forceOnHeap) throws IOException, ForceReattemptException { SortedHoplogPersistedEvent ev; try { ev = (SortedHoplogPersistedEvent) owner.getHoplogOrganizer().read(k); } catch (IOException e) { owner.checkForPrimary(); throw e; } if (ev != null) { if (logger.finerEnabled() || DEBUG) { logger.info(LocalizedStrings.DEBUG, "HDFS: got from hdfs ev:" + ev); } return getEntryFromEvent(key, ev, forceOnHeap, false); } return null; }
@Override public int sizeEstimate() { if (isHDFSReadWriteRegion()) { try { checkForPrimary(); ConcurrentParallelGatewaySenderQueue q = getHDFSQueue(); if (q == null) return 0; int hdfsBucketRegionSize = q.getBucketRegionQueue( partitionedRegion, getId()).size(); int hoplogEstimate = (int) getHoplogOrganizer().sizeEstimate(); if (logger.isDebugEnabled()) { logger.debug("for bucket " + getName() + " estimateSize returning " + (hdfsBucketRegionSize + hoplogEstimate)); } return hdfsBucketRegionSize + hoplogEstimate; } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } } return size(); }
tmp = hoplogs.scan(); } catch (IOException e) { HDFSEntriesSet.this.region.checkForPrimary(); throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(e.getMessage()), e);
tmp = hoplogs.scan(); } catch (IOException e) { HDFSEntriesSet.this.region.checkForPrimary(); throw new HDFSIOException(LocalizedStrings.HOPLOG_FAILED_TO_READ_HDFS_FILE.toLocalizedString(e.getMessage()), e);
@Override public int sizeEstimate() { if (isHDFSReadWriteRegion()) { try { checkForPrimary(); ConcurrentParallelGatewaySenderQueue q = getHDFSQueue(); if (q == null) return 0; int hdfsBucketRegionSize = q.getBucketRegionQueue( partitionedRegion, getId()).size(); int hoplogEstimate = (int) getHoplogOrganizer().sizeEstimate(); if (getLogWriterI18n().fineEnabled()) { getLogWriterI18n().fine("for bucket " + getName() + " estimateSize returning " + (hdfsBucketRegionSize + hoplogEstimate)); } return hdfsBucketRegionSize + hoplogEstimate; } catch (ForceReattemptException e) { throw new PrimaryBucketException(e.getLocalizedMessage(), e); } } return size(); }
if (keyInfo.isCheckPrimary()) { try { br.checkForPrimary(); } catch (PrimaryBucketException pbe) { RuntimeException re = new TransactionDataRebalancedException(