@Override public void close() throws IOException { notifyProcessStats(); keyValueSource.close(); }
@Override public Map.Entry<K, V> element() { return keyValueSource.element(); }
protected void prepareKeyPredicate() { if (predicate == null) { return; } if (keyValueSource.isAllKeysSupported()) { Collection<KeyIn> allKeys = keyValueSource.getAllKeys(); for (KeyIn key : allKeys) { if (predicate.evaluate(key)) { if (this.keys == null) { this.keys = new HashSet<KeyIn>(); } this.keys.add(key); } } } }
/** * <p> * If {@link #isAllKeysSupported()} returns true, a call to this method returns * all clusterwide available keys. If there is no chance to precollect all keys due to * partitioning of the data {@link #isAllKeysSupported()}, this method returns false.<br/> * </p> * <p> * If this functionality is not available and {@link Job#onKeys(Object[])}, * {@link Job#onKeys(Iterable)}, or {@link Job#keyPredicate(KeyPredicate)} is used, a * preselection of the interesting partitions / nodes is not available and the * overall processing speed my be degraded. * </p> * <p> * If {@link #isAllKeysSupported()} returns false this method throws an * {@link java.lang.UnsupportedOperationException}. * </p> * * @return a collection of all clusterwide available keys */ public final Collection<K> getAllKeys() { if (!isAllKeysSupported()) { throw new UnsupportedOperationException("getAllKeys is unsupported for this KeyValueSource"); } return getAllKeys0(); }
public static void main(String[] args) throws ExecutionException, InterruptedException { try { HazelcastInstance hz1 = Hazelcast.newHazelcastInstance(); Hazelcast.newHazelcastInstance(); Hazelcast.newHazelcastInstance(); // create a default map IMap<Integer, Integer> m1 = hz1.getMap("default"); for (int i = 0; i < 10000; i++) { m1.put(i, i); } // create a job tracker with default config JobTracker tracker = hz1.getJobTracker("myJobTracker"); // using a built-in source from our IMap. This supplies key value pairs KeyValueSource<Integer, Integer> kvs = KeyValueSource.fromMap(m1); // create a new Job with our source Job<Integer, Integer> job = tracker.newJob(kvs); // configure the job ICompletableFuture<Map<String, Integer>> myMapReduceFuture = job.mapper(new MyMapper()).reducer(new MyReducerFactory()).submit(); Map<String, Integer> result = myMapReduceFuture.get(); System.out.println("The sum of the numbers 1 to 10000 is: " + result.get("all_values")); } finally { Hazelcast.shutdownAll(); } }
@Override public void executeMappingPhase(KeyValueSource<KeyIn, ValueIn> keyValueSource, Mapper<KeyIn, ValueIn, KeyOut, ValueOut> mapper, Context<KeyOut, ValueOut> context) { while (keyValueSource.hasNext()) { if (matches(keyValueSource.key())) { Map.Entry<KeyIn, ValueIn> entry = keyValueSource.element(); mapper.map(entry.getKey(), entry.getValue(), context); } if (isCancelled()) { return; } } } }
private void processPartitionMapping(KeyValueSource<KeyIn, ValueIn> delegate, int partitionId, boolean partitionProcessor) throws Exception { delegate.reset(); if (delegate.open(nodeEngine)) { DefaultContext<KeyOut, ValueOut> context = supervisor.getOrCreateContext(this); processMapping(partitionId, context, delegate, partitionProcessor); delegate.close(); finalizeMapping(partitionId, context); } else { // Partition assignment might not be ready yet, postpone the processing and retry later postponePartitionProcessing(partitionId); } }
@Override public <SuppliedValue, Result> Result aggregate(Supplier<K, V, SuppliedValue> supplier, Aggregation<K, SuppliedValue, Result> aggregation, JobTracker jobTracker) { try { isNotNull(jobTracker, "jobTracker"); KeyValueSource<K, V> keyValueSource = KeyValueSource.fromMultiMap(this); Job<K, V> job = jobTracker.newJob(keyValueSource); Mapper mapper = aggregation.getMapper(supplier); CombinerFactory combinerFactory = aggregation.getCombinerFactory(); ReducerFactory reducerFactory = aggregation.getReducerFactory(); Collator collator = aggregation.getCollator(); MappingJob mappingJob = job.mapper(mapper); ReducingSubmittableJob reducingJob; if (combinerFactory != null) { reducingJob = mappingJob.combiner(combinerFactory).reducer(reducerFactory); } else { reducingJob = mappingJob.reducer(reducerFactory); } ICompletableFuture<Result> future = reducingJob.submit(collator); return future.get(); } catch (Exception e) { throw new HazelcastException(e); } }
@Override public K key() { K key = keyValueSource.key(); processedRecords++; if (processedRecords == UPDATE_PROCESSED_RECORDS_INTERVAL) { notifyProcessStats(); processedRecords = 0; } return key; }
@Override public boolean open(NodeEngine nodeEngine) { return keyValueSource.open(nodeEngine); }
@Override protected Collection<K> getAllKeys0() { return keyValueSource.getAllKeys(); }
@Override public boolean hasNext() { return keyValueSource.hasNext(); }
@Override public boolean isAllKeysSupported() { return keyValueSource.isAllKeysSupported(); }
@Override public void execute(HazelcastInstance hazelcastInstance) throws Exception { JobTracker jobTracker = hazelcastInstance.getJobTracker("default"); IMap<String, SalaryYear> map = hazelcastInstance.getMap("salaries"); KeyValueSource<String, SalaryYear> source = KeyValueSource.fromMap(map); Job<String, SalaryYear> job = jobTracker.newJob(source); JobCompletableFuture<Integer> future = job .mapper(new SalarySumMapper()) .combiner(new SalarySumCombinerFactory()) .reducer(new SalarySumReducerFactory()) .submit(new SalarySumCollator()); System.out.println("Salary sum: " + future.get()); }
@Override public void executeMappingPhase(KeyValueSource<KeyIn, ValueIn> keyValueSource, Mapper<KeyIn, ValueIn, KeyOut, ValueOut> mapper, Context<KeyOut, ValueOut> context) { while (keyValueSource.hasNext()) { if (matches(keyValueSource.key())) { Map.Entry<KeyIn, ValueIn> entry = keyValueSource.element(); mapper.map(entry.getKey(), entry.getValue(), context); } if (isCancelled()) { return; } } } }
private void processPartitionMapping(KeyValueSource<KeyIn, ValueIn> delegate, int partitionId, boolean partitionProcessor) throws Exception { delegate.reset(); if (delegate.open(nodeEngine)) { DefaultContext<KeyOut, ValueOut> context = supervisor.getOrCreateContext(this); processMapping(partitionId, context, delegate, partitionProcessor); delegate.close(); finalizeMapping(partitionId, context); } else { // Partition assignment might not be ready yet, postpone the processing and retry later postponePartitionProcessing(partitionId); } }
/** * <p> * If {@link #isAllKeysSupported()} returns true, a call to this method returns * all clusterwide available keys. If there is no chance to precollect all keys due to * partitioning of the data {@link #isAllKeysSupported()}, this method returns false.<br/> * </p> * <p> * If this functionality is not available and {@link Job#onKeys(Object[])}, * {@link Job#onKeys(Iterable)}, or {@link Job#keyPredicate(KeyPredicate)} is used, a * preselection of the interesting partitions / nodes is not available and the * overall processing speed my be degraded. * </p> * <p> * If {@link #isAllKeysSupported()} returns false this method throws an * {@link java.lang.UnsupportedOperationException}. * </p> * * @return a collection of all clusterwide available keys */ public final Collection<K> getAllKeys() { if (!isAllKeysSupported()) { throw new UnsupportedOperationException("getAllKeys is unsupported for this KeyValueSource"); } return getAllKeys0(); }
@Override public <SuppliedValue, Result> Result aggregate(Supplier<K, V, SuppliedValue> supplier, Aggregation<K, SuppliedValue, Result> aggregation, JobTracker jobTracker) { try { Preconditions.isNotNull(jobTracker, "jobTracker"); KeyValueSource<K, V> keyValueSource = KeyValueSource.fromMultiMap(this); Job<K, V> job = jobTracker.newJob(keyValueSource); Mapper mapper = aggregation.getMapper(supplier); CombinerFactory combinerFactory = aggregation.getCombinerFactory(); ReducerFactory reducerFactory = aggregation.getReducerFactory(); Collator collator = aggregation.getCollator(); MappingJob mappingJob = job.mapper(mapper); ReducingSubmittableJob reducingJob; if (combinerFactory != null) { reducingJob = mappingJob.combiner(combinerFactory).reducer(reducerFactory); } else { reducingJob = mappingJob.reducer(reducerFactory); } ICompletableFuture<Result> future = reducingJob.submit(collator); return future.get(); } catch (Exception e) { throw new HazelcastException(e); } }
@Override public K key() { K key = keyValueSource.key(); processedRecords++; if (processedRecords == UPDATE_PROCESSED_RECORDS_INTERVAL) { notifyProcessStats(); processedRecords = 0; } return key; }
@Override public boolean open(NodeEngine nodeEngine) { return keyValueSource.open(nodeEngine); }