/** {@inheritDoc} */ @Override public UUID getLocalNodeId() { assert cfg != null; return cfg.getNodeId(); }
/** {@inheritDoc} */ @Override public Integer execute() { assert ignite.configuration().getNodeId().equals(argument(0)); if (sleep) { try { Thread.sleep(Long.MAX_VALUE); } catch (InterruptedException e) { log.info("Job has been cancelled. Caught exception: " + e); Thread.currentThread().interrupt(); } } return System.identityHashCode(ses.getClassLoader()); } }
/** {@inheritDoc} */ @Override public ClusterGroup forLocal() { guard(); try { return new ClusterGroupAdapter(ctx, null, Collections.singleton(cfg.getNodeId())); } finally { unguard(); } }
@Override public Object call() { nodeId.set(ignite.configuration().getNodeId()); return null; } });
/** {@inheritDoc} */ @Override public UUID localNodeId() { ClusterNode locNode0 = localNode(); return locNode0 != null ? locNode0.id() : config().getNodeId(); }
@Override public void run() { nodeId.set(ignite.configuration().getNodeId()); } });
/** * Stores {@link IgniteConfiguration} to file as xml. * * @param cfg Ignite Configuration. * @return A name of file where the configuration was stored. * @throws IOException If failed. * @see #readCfgFromFileAndDeleteFile(String) */ public static String storeToFile(IgniteConfiguration cfg, boolean resetDiscovery) throws IOException, IgniteCheckedException { String fileName = IGNITE_CONFIGURATION_FILE + cfg.getNodeId(); storeToFile(cfg, fileName, true, resetDiscovery); return fileName; }
@Override public void run() { nodeId.set(ignite.configuration().getNodeId()); } });
/** {@inheritDoc} */ @Override public Integer execute() { assert g.configuration().getNodeId().equals(argument(0)); log.info("Running job on node: " + g.cluster().localNode().id()); if (sleep) { try { Thread.sleep(Long.MAX_VALUE); } catch (InterruptedException e) { log.info("Job has been cancelled. Caught exception: " + e); Thread.currentThread().interrupt(); } } return System.identityHashCode(ses.getClassLoader()); } }
/** {@inheritDoc} */ @Override public Serializable execute() { assert ignite.configuration().getNodeId().equals(argument(0)); jobLdr = getClass().getClassLoader(); return null; } }
/** {@inheritDoc} */ @Override public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, Serializable arg) { Map<ComputeJob, ClusterNode> map = new HashMap<>(subgrid.size()); for (ClusterNode node : subgrid) { if (!node.id().equals(ignite.configuration().getNodeId())) map.put(new GridP2PRemoteTestJob(null) , node); } return map; }
/** {@inheritDoc} */ @Override public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, @Nullable String arg) { for (ClusterNode node : subgrid) if (node.id().equals(ignite.configuration().getNodeId())) return Collections.singletonMap(new ComputeJobAdapter() { @Override public Serializable execute() { return null; } }, node); throw new IgniteException("Local node not found."); }
/** {@inheritDoc} */ @Override public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, String arg) { assert ignite != null; UUID locNodeId = ignite.configuration().getNodeId(); assert locNodeId != null; ClusterNode remoteNode = null; for (ClusterNode node : subgrid) { if (!node.id().equals(locNodeId)) remoteNode = node; } return Collections.singletonMap(new ComputeJobAdapter(arg) { @Override public Serializable execute() { throw new IgniteException("Job exception."); } }, remoteNode); }
/** {@inheritDoc} */ @Override public boolean apply(UUID nodeId, Object msg) { assertNotNull(ignite); assertNotNull(ignite.configuration().getNodeId()); X.println("Received message [nodeId=" + nodeId + ", locNodeId=" + ignite.cluster().localNode().id() + ']'); assertEquals(sourceNodeId, nodeId); assertEquals(MSG, msg); nodes.add(ignite.configuration().getNodeId()); cnt.incrementAndGet(); latch.countDown(); return ret; } }
/** {@inheritDoc} */ @SuppressWarnings({"ProhibitedExceptionThrown"}) @Override public Map<? extends ComputeJob, ClusterNode> map(List<ClusterNode> subgrid, Serializable arg) { if (log.isInfoEnabled()) log.info("Mapping job [job=" + this + ", grid=" + subgrid + ", arg=" + arg + ']'); failType = (FailType)arg; if (failType == FailType.MAP) throw new RuntimeException("Failed out of map method."); Map<ComputeJob, ClusterNode> map = new HashMap<>(2); assert subgrid.size() == 1; assert subgrid.get(0).id().equals(ignite.configuration().getNodeId()); map.put(new GridTaskFailedTestJob(null), subgrid.get(0)); map.put(new GridTaskFailedTestJob(failType), subgrid.get(0)); return map; }
/** {@inheritDoc} */ @Override public Serializable execute() { UUID nodeId = ignite.configuration().getNodeId(); if (log.isDebugEnabled()) log.debug("Executing job on node [nodeId=" + nodeId + ", jobId=" + ctx.getJobId() + ']'); try { Thread.sleep(500); } catch (InterruptedException e) { e.printStackTrace(); } // Here we gonna return node id which executed this job. // Hopefully it would be stealing node. return nodeId; } }
@Override public Object execute() { X.println("Executing ReduceTestTask job on node " + ignite.configuration().getNodeId()); ses.saveCheckpoint(CP_KEY, true); return null; } },
/** * Gives job description in standard log format. * * @return String with current job representation. */ private String getJobInfo() { return "[taskId=" + taskSes.getId() + ", jobId=" + cntx.getJobId() + ", nodeId=" + ignite.configuration().getNodeId() + "]"; } }
/** {@inheritDoc} */ @Override public UUID execute() throws IgniteException { assert ignite != null; assert jobSes != null; assert argument(0) != null; // Should always fail on task originating node and work on another one. if (ignite.configuration().getNodeId().equals(argument(0))) throw new IgniteException("Expected exception to failover job."); // Use checkpoint on job side. This will happen on remote node. jobSes.loadCheckpoint("test"); return argument(0); } }
@Override public Object execute() { X.println("Executing FailoverTestTask job on node " + ignite.configuration().getNodeId()); Boolean cpVal = ses.loadCheckpoint(CP_KEY); assert cpVal != null; if (cpVal) { ses.saveCheckpoint(CP_KEY, false); throw new ComputeExecutionRejectedException("Failing over the job."); } return null; } },