public USK clearCopy() { return copy(0); }
public void start(USKManager manager, ClientContext context) { USK usk = origUSK; if(usk.suggestedEdition < edition) usk = usk.copy(edition); else if(persistent) // Copy it to avoid deactivation issues usk = usk.copy(); fetcher = manager.getFetcher(usk, ctx, new USKFetcherWrapper(usk, priority, realTimeFlag ? USKManager.rcRT : USKManager.rcBulk), keepLastData, checkStoreOnly); fetcher.addCallback(this); fetcher.schedule(context); // non-persistent if(logMINOR) Logger.minor(this, "Starting "+fetcher+" for "+this); }
this.uskManager.hintUpdate(this.origUSK.copy(hint).getURI(), context, prio); if(toCancel != null) { for(DBRAttempt a : toCancel)
final USK usk = origUSK.copy(number); for(final USKCallback callback : callbacks) context.mainExecutor.execute(new Runnable() {
final USK usk = origUSK.copy(number); final boolean newSlotToo = newSlot; for(final USKCallback callback : callbacks)
public void gotARK(SimpleFieldSet fs, long fetchedEdition) { try { synchronized(this) { handshakeCount = 0; // edition +1 because we store the ARK edition that we want to fetch. if(myARK.suggestedEdition < fetchedEdition + 1) myARK = myARK.copy(fetchedEdition + 1); } processNewNoderef(fs, true, false, false); } catch(FSParseException e) { Logger.error(this, "Invalid ARK update: " + e, e); // This is ok as ARKs are limited to 4K anyway. Logger.error(this, "Data was: \n" + fs.toString()); synchronized(this) { handshakeCount = PeerNode.MAX_HANDSHAKE_COUNT; } } }
@Override public void onFoundEdition(long edition, USK key, ClientContext context, boolean wasMetadata, short codec, byte[] data, boolean newKnownGood, boolean newSlotToo) { if(!newKnownGood) { FreenetURI uri = key.copy(edition).getURI(); node.makeClient(PRIORITY_PROGRESS, false, false).prefetch(uri, MINUTES.toMillis(60), FProxyToadlet.MAX_LENGTH_WITH_PROGRESS, null, PRIORITY_PROGRESS); return;
cb.onEncode(pubUSK.copy(edition), this, context); insertSucceeded(context, l); if(freeData) {
long l = lookupLatestSlot(clear); if(lookupKnownGood(clear) < l) toFetch.add(clear.copy(l));
/** * Schedule a Fetcher to find us the latest inserted key of the USK. * The Fetcher must be insert-mode, in other words, it must know that we want the latest edition, * including author errors and so on. */ private void scheduleFetcher(ClientContext context) { synchronized(this) { if(logMINOR) Logger.minor(this, "scheduling fetcher for "+pubUSK.getURI()); if(finished) return; fetcher = context.uskManager.getFetcherForInsertDontSchedule(persistent ? pubUSK.copy() : pubUSK, parent.priorityClass, this, parent.getClient(), context, persistent, ctx.ignoreUSKDatehints); if(logMINOR) Logger.minor(this, "scheduled: "+fetcher); } fetcher.schedule(context); }
c.onFailure(context); else c.onFoundEdition(ed, origUSK.copy(ed), context, lastWasMetadata, lastCompressionCodec, data, false, false); } catch (Exception e) { Logger.error(this, "An exception occured while dealing with a callback:"+c.toString()+"\n"+e.getMessage(),e);
if(edition > usk.suggestedEdition) { if(logMINOR) Logger.minor(SingleFileFetcher.class, "Redirecting to edition "+edition); cb.onFailure(new FetchException(FetchExceptionMode.PERMANENT_REDIRECT, usk.copy(edition).getURI().addMetaStrings(metaStrings)), null, context); return null; } else if(edition == -1 && context.uskManager.getFetcher(usk.copy(usk.suggestedEdition), ctx, false, requester.persistent(), realTimeFlag, new MyUSKFetcherCallback(requester, cb, usk, metaStrings, ctx, actx, realTimeFlag, maxRetries, recursionLevel, dontTellClientGet, l, requester.persistent(), true), false, context, true); if(isEssential) cb.onFailure(new FetchException(FetchExceptionMode.PERMANENT_REDIRECT, usk.copy(edition).getURI().addMetaStrings(metaStrings)), null, context); return null; context.uskManager.getFetcher(usk.copy(-usk.suggestedEdition), ctx, false, requester.persistent(), realTimeFlag, new MyUSKFetcherCallback(requester, cb, usk, metaStrings, ctx, actx, realTimeFlag, maxRetries, recursionLevel, dontTellClientGet, l, requester.persistent(), false), false, context, false); if(isEssential)
} catch(IOException e) { Logger.error(this, "Corrupt ARK reference? Fetched " + myARK.copy(edition) + " got while parsing: " + e + " from:\n" + ref, e);
@Override public synchronized void onSuccess(ClientPutState state, ClientContext context) { USK newEdition = pubUSK.copy(edition); finished = true; sbi = null; FreenetURI targetURI = pubUSK.getSSK(edition).getURI(); FreenetURI realURI = ((SingleBlockInserter)state).getURI(context); if(!targetURI.equals(realURI)) Logger.error(this, "URI should be "+targetURI+" actually is "+realURI); else { if(logMINOR) Logger.minor(this, "URI should be "+targetURI+" actually is "+realURI); context.uskManager.updateKnownGood(pubUSK, edition, context); } if(freeData) { data.free(); data = null; } cb.onEncode(newEdition, this, context); insertSucceeded(context, edition); // FINISHED!!!! Yay!!! }
/** * A non-authoritative hint that a specific edition *might* exist. At the moment, * we just fetch the block. We do not fetch the contents, and it is possible that * USKFetcher's are also fetching the block. FIXME would it be more efficient to * pass it along to a USKFetcher? * @param usk * @param edition * @param context */ public void hintUpdate(USK usk, long edition, ClientContext context) { if(edition < lookupLatestSlot(usk)) return; FreenetURI uri = usk.copy(edition).getURI().sskForUSK(); final ClientGetter get = new ClientGetter(new NullClientCallback(rcBulk), uri, new FetchContext(backgroundFetchContext, FetchContext.IDENTICAL_MASK), RequestStarter.UPDATE_PRIORITY_CLASS, new NullBucket(), null, null); try { get.start(context); } catch (FetchException e) { // Ignore } }
} else if(forDiffNodeRef && arkPubKey == null && myARK != null && arkNo > -1) { ark = myARK.copy(arkNo); } else if(forDiffNodeRef && arkPubKey != null && myARK != null && arkNo <= -1) {