public synchronized void addKnownMapOutput(String hostName, String hostUrl, TaskAttemptID mapId) { MapHost host = mapLocations.get(hostName); if (host == null) { host = new MapHost(hostName, hostUrl); mapLocations.put(hostName, host); } host.addKnownMap(mapId); // Mark the host as pending if (host.getState() == State.PENDING) { pendingHosts.add(host); notifyAll(); } }
@Test public void testCopyFromHostConnectionRejected() throws Exception { when(connection.getResponseCode()) .thenReturn(Fetcher.TOO_MANY_REQ_STATUS_CODE); Fetcher<Text, Text> fetcher = new FakeFetcher<>(job, id, ss, mm, r, metrics, except, key, connection); fetcher.copyFromHost(host); Assert.assertEquals("No host failure is expected.", ss.hostFailureCount(host.getHostName()), 0); Assert.assertEquals("No fetch failure is expected.", ss.fetchFailureCount(map1ID), 0); Assert.assertEquals("No fetch failure is expected.", ss.fetchFailureCount(map2ID), 0); verify(ss).penalize(eq(host), anyLong()); verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map1ID)); verify(ss).putBackKnownMapOutput(any(MapHost.class), eq(map2ID)); }
/** * Create the map-output-url. This will contain all the map ids * separated by commas * @param host * @param maps * @return * @throws MalformedURLException */ private URL getMapOutputURL(MapHost host, Collection<TaskAttemptID> maps ) throws MalformedURLException { // Get the base url StringBuffer url = new StringBuffer(host.getBaseUrl()); boolean first = true; for (TaskAttemptID mapId : maps) { if (!first) { url.append(","); } url.append(mapId); first = false; } LOG.debug("MapOutput URL for " + host + " -> " + url.toString()); return new URL(url.toString()); }
context.getReduceShuffleBytes(), context.getFailedShuffleCounter()); MapHost host1 = new MapHost("host1", null); TaskAttemptID failedAttemptID = new TaskAttemptID( new org.apache.hadoop.mapred.TaskID( scheduler.hostFailed(host1.getHostName());
public synchronized void freeHost(MapHost host) { if (host.getState() != State.PENALIZED) { if (host.markAvailable() == State.PENDING) { pendingHosts.add(host); notifyAll(); } } LOG.info(host + " freed by " + Thread.currentThread().getName() + " in " + (System.currentTimeMillis()-shuffleStart.get()) + "s"); }
public synchronized void copyFailed(TaskAttemptID mapId, MapHost host, boolean readError) { host.penalize(); int failures = 1; if (failureCounts.containsKey(mapId)) { failureCounts.put(mapId, new IntWritable(1)); String hostname = host.getHostName(); if (hostFailures.containsKey(hostname)) { IntWritable x = hostFailures.get(hostname);
public synchronized MapHost getHost() throws InterruptedException { while(pendingHosts.isEmpty()) { wait(); } MapHost host = null; Iterator<MapHost> iter = pendingHosts.iterator(); int numToPick = random.nextInt(pendingHosts.size()); for (int i=0; i <= numToPick; ++i) { host = iter.next(); } pendingHosts.remove(host); host.markBusy(); LOG.info("Assiging " + host + " with " + host.getNumKnownMapOutputs() + " to " + Thread.currentThread().getName()); shuffleStart.set(System.currentTimeMillis()); return host; }
public synchronized List<TaskAttemptID> getMapsForHost(MapHost host) { List<TaskAttemptID> list = host.getAndClearKnownMaps(); Iterator<TaskAttemptID> itr = list.iterator(); List<TaskAttemptID> result = new ArrayList<TaskAttemptID>(); int includedMaps = 0; int totalSize = list.size(); // find the maps that we still need, up to the limit while (itr.hasNext()) { TaskAttemptID id = itr.next(); if (!obsoleteMaps.contains(id) && !finishedMaps[id.getTaskID().getId()]) { result.add(id); if (++includedMaps >= MAX_MAPS_AT_ONCE) { break; } } } // put back the maps left after the limit while (itr.hasNext()) { TaskAttemptID id = itr.next(); if (!obsoleteMaps.contains(id) && !finishedMaps[id.getTaskID().getId()]) { host.addKnownMap(id); } } LOG.info("assigned " + includedMaps + " of " + totalSize + " to " + host + " to " + Thread.currentThread().getName()); return result; }
public synchronized void putBackKnownMapOutput(MapHost host, TaskAttemptID mapId) { host.addKnownMap(mapId); }
MapHost host = new MapHost("TestHost", "http://test/url"); ByteArrayInputStream bin = new ByteArrayInputStream(bout.toByteArray()); try {
public synchronized void copyFailed(TaskAttemptID mapId, MapHost host, boolean readError, boolean connectExcpt) { host.penalize(); int failures = 1; if (failureCounts.containsKey(mapId)) { failureCounts.put(mapId, new IntWritable(1)); String hostname = host.getHostName(); IntWritable hostFailedNum = hostFailures.get(hostname);
public synchronized void freeHost(MapHost host) { if (host.getState() != State.PENALIZED) { if (host.markAvailable() == State.PENDING) { pendingHosts.add(host); notifyAll(); } } LOG.info(host + " freed by " + Thread.currentThread().getName() + " in " + (Time.monotonicNow()-SHUFFLE_START.get()) + "ms"); }
public synchronized MapHost getHost() throws InterruptedException { while(pendingHosts.isEmpty()) { wait(); } MapHost host = null; Iterator<MapHost> iter = pendingHosts.iterator(); int numToPick = random.nextInt(pendingHosts.size()); for (int i=0; i <= numToPick; ++i) { host = iter.next(); } pendingHosts.remove(host); host.markBusy(); LOG.info("Assigning " + host + " with " + host.getNumKnownMapOutputs() + " to " + Thread.currentThread().getName()); shuffleStart.set(Time.monotonicNow()); return host; }
public synchronized List<TaskAttemptID> getMapsForHost(MapHost host) { List<TaskAttemptID> list = host.getAndClearKnownMaps(); Iterator<TaskAttemptID> itr = list.iterator(); List<TaskAttemptID> result = new ArrayList<TaskAttemptID>(); int includedMaps = 0; int totalSize = list.size(); // find the maps that we still need, up to the limit while (itr.hasNext()) { TaskAttemptID id = itr.next(); if (!obsoleteMaps.contains(id) && !finishedMaps[id.getTaskID().getId()]) { result.add(id); if (++includedMaps >= MAX_MAPS_AT_ONCE) { break; } } } // put back the maps left after the limit while (itr.hasNext()) { TaskAttemptID id = itr.next(); if (!obsoleteMaps.contains(id) && !finishedMaps[id.getTaskID().getId()]) { host.addKnownMap(id); } } LOG.debug("assigned " + includedMaps + " of " + totalSize + " to " + host + " to " + Thread.currentThread().getName()); return result; }
public synchronized void putBackKnownMapOutput(MapHost host, TaskAttemptID mapId) { host.addKnownMap(mapId); }
context.getReduceShuffleBytes(), context.getFailedShuffleCounter()); MapHost host1 = new MapHost("host1", null); TaskAttemptID succeedAttempt1ID = new TaskAttemptID( new org.apache.hadoop.mapred.TaskID(
public synchronized void addKnownMapOutput(String hostName, String hostUrl, TaskAttemptID mapId) { MapHost host = mapLocations.get(hostName); if (host == null) { host = new MapHost(hostName, hostUrl); mapLocations.put(hostName, host); } host.addKnownMap(mapId); // Mark the host as pending if (host.getState() == State.PENDING) { pendingHosts.add(host); notifyAll(); } }
/** check if hit timeout of retry, if not, throw an exception and start a * new round of retry.*/ private void checkTimeoutOrRetry(MapHost host, IOException ioe) throws IOException { // First time to retry. long currentTime = Time.monotonicNow(); if (retryStartTime == 0) { retryStartTime = currentTime; } // Retry is not timeout, let's do retry with throwing an exception. if (currentTime - retryStartTime < this.fetchRetryTimeout) { LOG.warn("Shuffle output from " + host.getHostName() + " failed, retry it.", ioe); throw ioe; } else { // timeout, prepare to be failed. LOG.warn("Timeout for copying MapOutput with retry on host " + host + "after " + fetchRetryTimeout + " milliseconds."); } }
public synchronized void copyFailed(TaskAttemptID mapId, MapHost host, boolean readError, boolean connectExcpt) { host.penalize(); int failures = 1; if (failureCounts.containsKey(mapId)) { failureCounts.put(mapId, new IntWritable(1)); String hostname = host.getHostName(); IntWritable hostFailedNum = hostFailures.get(hostname);
public synchronized void freeHost(MapHost host) { if (host.getState() != State.PENALIZED) { if (host.markAvailable() == State.PENDING) { pendingHosts.add(host); notifyAll(); } } LOG.info(host + " freed by " + Thread.currentThread().getName() + " in " + (Time.monotonicNow()-shuffleStart.get()) + "ms"); }