@Override public synchronized void put(String key, Value value) { put(key, value, Scope.USER); }
@Override public synchronized void put(String key, Value value) { put(key, value, Scope.USER); }
@Override public synchronized void put(String key, String value) { put(key, Value.of(value)); }
@Override public synchronized void put(String key, String value) { put(key, Value.of(value)); }
public synchronized void setMapReduceCounters(Counters counters) { ImmutableMap.Builder<String, Map<String, Long>> countersBuilder = ImmutableMap.builder(); for (CounterGroup group : counters) { ImmutableMap.Builder<String, Long> groupBuilder = ImmutableMap.builder(); for (Counter counter : group) { groupBuilder.put(counter.getName(), counter.getValue()); // Also put the counter to system scope. put(group.getName() + "." + counter.getName(), Value.of(counter.getValue()), WorkflowToken.Scope.SYSTEM); } countersBuilder.put(group.getName(), groupBuilder.build()); } this.mapReduceCounters = countersBuilder.build(); }
public synchronized void setMapReduceCounters(Counters counters) { ImmutableMap.Builder<String, Map<String, Long>> countersBuilder = ImmutableMap.builder(); for (CounterGroup group : counters) { ImmutableMap.Builder<String, Long> groupBuilder = ImmutableMap.builder(); for (Counter counter : group) { groupBuilder.put(counter.getName(), counter.getValue()); // Also put the counter to system scope. put(group.getName() + "." + counter.getName(), Value.of(counter.getValue()), WorkflowToken.Scope.SYSTEM); } countersBuilder.put(group.getName(), groupBuilder.build()); } this.mapReduceCounters = countersBuilder.build(); }
@Test public void testRepeatedPutAtSameNode() { BasicWorkflowToken token = new BasicWorkflowToken(1); token.setCurrentNode("node1"); // after this put, size would be 512KB token.put(generateDataInKb(1), generateDataInKb(511)); // after another successful put at a different node, size would be 1024KB. Workflow token would be at capacity. token.setCurrentNode("node2"); token.put(generateDataInKb(1), generateDataInKb(511)); // should succeed because the entry at key k1 should be replaced token.put(generateDataInKb(1), generateDataInKb(511)); // now should fail, because even though we're updating node2's value, we're adding an extra KB try { token.put(generateDataInKb(1), generateDataInKb(512)); Assert.fail("Workflow token update at existing key should fail because token size should have exceeded limit."); } catch (IllegalStateException e) { assertSizeExceededErrorMessage(e); } }
@Test public void testNonUpdatableWorkflowToken() { BasicWorkflowToken token = new BasicWorkflowToken(0); token.setCurrentNode("node"); try { token.put("a", "b"); Assert.fail("Workflow token update should fail because the token is non-updatable."); } catch (IllegalStateException e) { assertSizeExceededErrorMessage(e); } }
@Test public void testMergeLargeWorkflowToken() { BasicWorkflowToken token1 = new BasicWorkflowToken(1); token1.setCurrentNode("node1"); // total size of token1 after this operation is 512KB token1.put(generateDataInKb(1), generateDataInKb(511)); // add an additional 2 bytes, just so we have size > max size (and not equal to max size) after merge token1.put("k", "v"); BasicWorkflowToken token2 = new BasicWorkflowToken(1); token2.setCurrentNode("node1"); // total size of token2 after this operation is 512KB token2.put(generateDataInKb(1), generateDataInKb(511)); // should succeed, because token1 already contains the NodeValue being merged token1.mergeToken(token2); // set a different node in token2 and add the same data token2.setCurrentNode("node2"); // token2 is at capacity after the following operation token2.put(generateDataInKb(1), generateDataInKb(511)); // merging should now fail, because token1 now does not contain the NodeValue being merged try { token1.mergeToken(token2); Assert.fail("Workflow token merging should fail because token size should have exceeded limit."); } catch (IllegalStateException e) { assertSizeExceededErrorMessage(e); } }
@Test public void testUpdateWithLargeRecord() { BasicWorkflowToken token = new BasicWorkflowToken(1); token.setCurrentNode("node"); try { token.put("k", generateDataInKb(1024)); Assert.fail("Workflow token update should fail because token size should have exceeded limit."); } catch (IllegalStateException e) { assertSizeExceededErrorMessage(e); } }
token.put("key", "value");
@Test public void testSerDeserScheduleInfo() { BasicWorkflowToken token = new BasicWorkflowToken(1); token.setCurrentNode("node"); token.put("tokenKey", "tokenVal"); List<TriggerInfo> triggerInfos = ImmutableList.of( new DefaultProgramStatusTriggerInfo("ns", Specifications.from(new WorkflowAppWithFork()), ProgramType.WORKFLOW, WorkflowAppWithFork.WorkflowWithFork.class.getSimpleName(), RunIds.generate(), ProgramStatus.COMPLETED, token, Collections.emptyMap()), new DefaultPartitionTriggerInfo("ns", "ds", 10, 11), new DefaultTimeTriggerInfo("1 * * * *", 0L) ); TriggeringScheduleInfo scheduleInfo = new DefaultTriggeringScheduleInfo("schedule", "description", triggerInfos, ImmutableMap.of("key", "value")); String scheduleInfoJson = GSON.toJson(scheduleInfo); TriggeringScheduleInfo deserializedScheduleInfo = GSON.fromJson(scheduleInfoJson, TriggeringScheduleInfo.class); Assert.assertEquals(scheduleInfoJson, GSON.toJson(deserializedScheduleInfo)); DefaultProgramStatusTriggerInfo expectedProgramStatusTriggerInfo = (DefaultProgramStatusTriggerInfo) triggerInfos.get(0); DefaultProgramStatusTriggerInfo deserializedProgramStatusTriggerInfo = (DefaultProgramStatusTriggerInfo) deserializedScheduleInfo.getTriggerInfos().get(0); Assert.assertEquals(expectedProgramStatusTriggerInfo.getApplicationSpecification().getName(), deserializedProgramStatusTriggerInfo.getApplicationSpecification().getName()); Assert.assertEquals(expectedProgramStatusTriggerInfo.getWorkflowToken().getAll(), deserializedProgramStatusTriggerInfo.getWorkflowToken().getAll()); } }