/** * Make a copy of the spec with new properties * @param properties properties of the Kafka stream * @return new instance of {@link KafkaStreamSpec} */ public KafkaStreamSpec copyWithProperties(Properties properties) { return new KafkaStreamSpec(getId(), getPhysicalName(), getSystemName(), getPartitionCount(), getReplicationFactor(), properties); }
public Properties getProperties() { return mapToProperties(getConfig()); } }
@Override public StreamSpec copyWithPartitionCount(int partitionCount) { return new KafkaStreamSpec(getId(), getPhysicalName(), getSystemName(), partitionCount, getReplicationFactor(), getProperties()); }
public KafkaStreamSpec copyWithReplicationFactor(int replicationFactor) { return new KafkaStreamSpec(getId(), getPhysicalName(), getSystemName(), getPartitionCount(), replicationFactor, getProperties()); }
KafkaStreamSpec kafkaSpec = kafkaAdmin.toKafkaSpec(spec); Assert.assertEquals("id", kafkaSpec.getId()); Assert.assertEquals(topicName, kafkaSpec.getPhysicalName()); Assert.assertEquals(SYSTEM(), kafkaSpec.getSystemName()); Assert.assertEquals(defaultPartitionCount, kafkaSpec.getPartitionCount()); spec = StreamSpec.createCoordinatorStreamSpec(topicName, SYSTEM()); kafkaSpec = admin.toKafkaSpec(spec); Assert.assertEquals(coordReplicatonFactor, kafkaSpec.getReplicationFactor()); Assert.assertEquals("123", kafkaSpec.getProperties().getProperty("segment.bytes")); Assert.assertEquals("compact", kafkaSpec.getProperties().getProperty("cleanup.policy")); spec = StreamSpec.createChangeLogStreamSpec(topicName, SYSTEM(), changeLogPartitionFactor); kafkaSpec = admin.toKafkaSpec(spec); Assert.assertEquals(changeLogReplicationFactor, kafkaSpec.getReplicationFactor()); spec = new StreamSpec(interStreamId, topicName, SYSTEM(), defaultPartitionCount, config); kafkaSpec = admin.toKafkaSpec(spec); Assert.assertEquals("v1", kafkaSpec.getProperties().getProperty("p1")); Assert.assertEquals("v2", kafkaSpec.getProperties().getProperty("p2")); Assert.assertEquals("123", kafkaSpec.getProperties().getProperty("retention.ms")); Assert.assertEquals(defaultPartitionCount, kafkaSpec.getPartitionCount());
/** * Converts a StreamSpec into a KafakStreamSpec. Special handling for coordinator and changelog stream. * @param spec a StreamSpec object * @return KafkaStreamSpec object */ public KafkaStreamSpec toKafkaSpec(StreamSpec spec) { KafkaStreamSpec kafkaSpec; if (spec.isChangeLogStream()) { String topicName = spec.getPhysicalName(); ChangelogInfo topicMeta = changelogTopicMetaInformation.get(topicName); if (topicMeta == null) { throw new StreamValidationException("Unable to find topic information for topic " + topicName); } kafkaSpec = new KafkaStreamSpec(spec.getId(), topicName, systemName, spec.getPartitionCount(), topicMeta.replicationFactor(), topicMeta.kafkaProps()); } else if (spec.isCoordinatorStream()) { kafkaSpec = new KafkaStreamSpec(spec.getId(), spec.getPhysicalName(), systemName, 1, coordinatorStreamReplicationFactor, coordinatorStreamProperties); } else if (intermediateStreamProperties.containsKey(spec.getId())) { kafkaSpec = KafkaStreamSpec.fromSpec(spec).copyWithProperties(intermediateStreamProperties.get(spec.getId())); } else { kafkaSpec = KafkaStreamSpec.fromSpec(spec); } return kafkaSpec; }
/** * Converts any StreamSpec to a KafkaStreamSpec. * If the original spec already is a KafkaStreamSpec, it is simply returned. * * @param originalSpec The StreamSpec instance to convert to KafkaStreamSpec. * @return A KafkaStreamSpec instance. */ public static KafkaStreamSpec fromSpec(StreamSpec originalSpec) { if (originalSpec instanceof KafkaStreamSpec) { return ((KafkaStreamSpec) originalSpec); } int replicationFactor = Integer.parseInt(originalSpec.getOrDefault( KafkaConfig.TOPIC_REPLICATION_FACTOR(), KafkaConfig.TOPIC_DEFAULT_REPLICATION_FACTOR())); return new KafkaStreamSpec( originalSpec.getId(), originalSpec.getPhysicalName(), originalSpec.getSystemName(), originalSpec.getPartitionCount(), replicationFactor, mapToProperties(filterUnsupportedProperties(originalSpec.getConfig()))); }
String topicName = kSpec.getPhysicalName(); NewTopic newTopic = new NewTopic(topicName, kSpec.getPartitionCount(), (short) kSpec.getReplicationFactor()); String repl = streamConfig.get(REPL_FACTOR); LOG.warn("Configuration {}={} for topic={} is invalid. Using kSpec repl factor {}", REPL_FACTOR, repl, kSpec.getPhysicalName(), kSpec.getReplicationFactor()); streamConfig.remove(REPL_FACTOR);
@Test public void testUnsupportedConfigStrippedFromProperties() { StreamSpec original = new StreamSpec("dummyId","dummyPhysicalName", "dummySystemName", ImmutableMap.of("segment.bytes", "4", "replication.factor", "7")); // First verify the original assertEquals("7", original.get("replication.factor")); assertEquals("4", original.get("segment.bytes")); Map<String, String> config = original.getConfig(); assertEquals("7", config.get("replication.factor")); assertEquals("4", config.get("segment.bytes")); // Now verify the Kafka spec KafkaStreamSpec spec = KafkaStreamSpec.fromSpec(original); assertNull(spec.get("replication.factor")); assertEquals("4", spec.get("segment.bytes")); Properties kafkaProperties = spec.getProperties(); Map<String, String> kafkaConfig = spec.getConfig(); assertNull(kafkaProperties.get("replication.factor")); assertEquals("4", kafkaProperties.get("segment.bytes")); assertNull(kafkaConfig.get("replication.factor")); assertEquals("4", kafkaConfig.get("segment.bytes")); }
public void testCreateChangelogStreamHelp(final String topic) { final int PARTITIONS = 12; final int REP_FACTOR = 2; Map<String, String> map = new HashMap<>(); map.put(JobConfig.JOB_DEFAULT_SYSTEM(), SYSTEM()); map.put(String.format("stores.%s.changelog", "fakeStore"), topic); map.put(String.format("stores.%s.changelog.replication.factor", "fakeStore"), String.valueOf(REP_FACTOR)); map.put(String.format("stores.%s.changelog.kafka.segment.bytes", "fakeStore"), "139"); KafkaSystemAdmin admin = Mockito.spy(createSystemAdmin(SYSTEM(), map)); StreamSpec spec = StreamSpec.createChangeLogStreamSpec(topic, SYSTEM(), PARTITIONS); Mockito.doAnswer(invocationOnMock -> { StreamSpec internalSpec = (StreamSpec) invocationOnMock.callRealMethod(); assertTrue(internalSpec instanceof KafkaStreamSpec); // KafkaStreamSpec is used to carry replication factor assertTrue(internalSpec.isChangeLogStream()); assertEquals(SYSTEM(), internalSpec.getSystemName()); assertEquals(topic, internalSpec.getPhysicalName()); assertEquals(REP_FACTOR, ((KafkaStreamSpec) internalSpec).getReplicationFactor()); assertEquals(PARTITIONS, internalSpec.getPartitionCount()); assertEquals("139", ((KafkaStreamSpec) internalSpec).getProperties().getProperty("segment.bytes")); assertEquals("compact", ((KafkaStreamSpec) internalSpec).getProperties().getProperty("cleanup.policy")); return internalSpec; }).when(admin).toKafkaSpec(Mockito.any()); admin.createStream(spec); admin.validateStream(spec); }
@Test(expected = IllegalArgumentException.class) public void testInvalidPartitionCount() { new KafkaStreamSpec("dummyId","dummyPhysicalName", "dummySystemName", 0); } }
public KafkaStreamSpec copyWithReplicationFactor(int replicationFactor) { return new KafkaStreamSpec(getId(), getPhysicalName(), getSystemName(), getPartitionCount(), replicationFactor, getProperties()); }
/** * Converts a StreamSpec into a KafakStreamSpec. Special handling for coordinator and changelog stream. * @param spec a StreamSpec object * @return KafkaStreamSpec object */ public KafkaStreamSpec toKafkaSpec(StreamSpec spec) { KafkaStreamSpec kafkaSpec; if (spec.isChangeLogStream()) { String topicName = spec.getPhysicalName(); ChangelogInfo topicMeta = changelogTopicMetaInformation.get(topicName); if (topicMeta == null) { throw new StreamValidationException("Unable to find topic information for topic " + topicName); } kafkaSpec = new KafkaStreamSpec(spec.getId(), topicName, systemName, spec.getPartitionCount(), topicMeta.replicationFactor(), topicMeta.kafkaProps()); } else if (spec.isCoordinatorStream()) { kafkaSpec = new KafkaStreamSpec(spec.getId(), spec.getPhysicalName(), systemName, 1, coordinatorStreamReplicationFactor, coordinatorStreamProperties); } else if (intermediateStreamProperties.containsKey(spec.getId())) { kafkaSpec = KafkaStreamSpec.fromSpec(spec).copyWithProperties(intermediateStreamProperties.get(spec.getId())); } else { kafkaSpec = KafkaStreamSpec.fromSpec(spec); } return kafkaSpec; }
/** * Converts any StreamSpec to a KafkaStreamSpec. * If the original spec already is a KafkaStreamSpec, it is simply returned. * * @param originalSpec The StreamSpec instance to convert to KafkaStreamSpec. * @return A KafkaStreamSpec instance. */ public static KafkaStreamSpec fromSpec(StreamSpec originalSpec) { if (originalSpec instanceof KafkaStreamSpec) { return ((KafkaStreamSpec) originalSpec); } int replicationFactor = Integer.parseInt(originalSpec.getOrDefault( KafkaConfig.TOPIC_REPLICATION_FACTOR(), KafkaConfig.TOPIC_DEFAULT_REPLICATION_FACTOR())); return new KafkaStreamSpec( originalSpec.getId(), originalSpec.getPhysicalName(), originalSpec.getSystemName(), originalSpec.getPartitionCount(), replicationFactor, mapToProperties(filterUnsupportedProperties(originalSpec.getConfig()))); }
@Test public void testCreateCoordinatorStreamWithSpecialCharsInTopicName() { final String STREAM = "test.coordinator_test.Stream"; Map<String, String> map = new HashMap<>(); map.put("job.coordinator.segment.bytes", "123"); map.put("job.coordinator.cleanup.policy", "compact"); int coordReplicatonFactor = 2; map.put(org.apache.samza.config.KafkaConfig.JOB_COORDINATOR_REPLICATION_FACTOR(), String.valueOf(coordReplicatonFactor)); KafkaSystemAdmin admin = Mockito.spy(createSystemAdmin(SYSTEM(), map)); StreamSpec spec = StreamSpec.createCoordinatorStreamSpec(STREAM, SYSTEM()); Mockito.doAnswer(invocationOnMock -> { StreamSpec internalSpec = (StreamSpec) invocationOnMock.callRealMethod(); assertTrue(internalSpec instanceof KafkaStreamSpec); // KafkaStreamSpec is used to carry replication factor assertTrue(internalSpec.isCoordinatorStream()); assertEquals(SYSTEM(), internalSpec.getSystemName()); assertEquals(STREAM, internalSpec.getPhysicalName()); assertEquals(1, internalSpec.getPartitionCount()); Assert.assertEquals(coordReplicatonFactor, ((KafkaStreamSpec) internalSpec).getReplicationFactor()); Assert.assertEquals("123", ((KafkaStreamSpec) internalSpec).getProperties().getProperty("segment.bytes")); // cleanup policy is overridden in the KafkaAdmin Assert.assertEquals("compact", ((KafkaStreamSpec) internalSpec).getProperties().getProperty("cleanup.policy")); return internalSpec; }).when(admin).toKafkaSpec(Mockito.any()); admin.createStream(spec); admin.validateStream(spec); }
@Test(expected = TopicAlreadyMarkedForDeletionException.class) public void testStartFailsOnTopicCreationErrors() { KafkaStreamSpec checkpointSpec = new KafkaStreamSpec(CHECKPOINT_TOPIC, CHECKPOINT_TOPIC, CHECKPOINT_SYSTEM, 1); // create an admin that throws an exception during createStream SystemAdmin mockAdmin = newAdmin("0", "10"); doThrow(new TopicAlreadyMarkedForDeletionException("invalid stream")).when(mockAdmin).createStream(checkpointSpec); SystemFactory factory = newFactory(mock(SystemProducer.class), mock(SystemConsumer.class), mockAdmin); KafkaCheckpointManager checkpointManager = new KafkaCheckpointManager(checkpointSpec, factory, true, mock(Config.class), mock(MetricsRegistry.class), null, new KafkaCheckpointLogKeySerde()); // expect an exception during startup checkpointManager.createResources(); checkpointManager.start(); }
/** * Make a copy of the spec with new properties * @param properties properties of the Kafka stream * @return new instance of {@link KafkaStreamSpec} */ public KafkaStreamSpec copyWithProperties(Properties properties) { return new KafkaStreamSpec(getId(), getPhysicalName(), getSystemName(), getPartitionCount(), getReplicationFactor(), properties); }
@Override public StreamSpec copyWithPartitionCount(int partitionCount) { return new KafkaStreamSpec(getId(), getPhysicalName(), getSystemName(), partitionCount, getReplicationFactor(), getProperties()); }
public KafkaStreamSpec copyWithReplicationFactor(int replicationFactor) { return new KafkaStreamSpec(getId(), getPhysicalName(), getSystemName(), getPartitionCount(), replicationFactor, getProperties()); }
/** * Converts a StreamSpec into a KafakStreamSpec. Special handling for coordinator and changelog stream. * @param spec a StreamSpec object * @return KafkaStreamSpec object */ public KafkaStreamSpec toKafkaSpec(StreamSpec spec) { KafkaStreamSpec kafkaSpec; if (spec.isChangeLogStream()) { String topicName = spec.getPhysicalName(); ChangelogInfo topicMeta = changelogTopicMetaInformation.get(topicName); if (topicMeta == null) { throw new StreamValidationException("Unable to find topic information for topic " + topicName); } kafkaSpec = new KafkaStreamSpec(spec.getId(), topicName, systemName, spec.getPartitionCount(), topicMeta.replicationFactor(), topicMeta.kafkaProps()); } else if (spec.isCoordinatorStream()) { kafkaSpec = new KafkaStreamSpec(spec.getId(), spec.getPhysicalName(), systemName, 1, coordinatorStreamReplicationFactor, coordinatorStreamProperties); } else if (intermediateStreamProperties.containsKey(spec.getId())) { kafkaSpec = KafkaStreamSpec.fromSpec(spec).copyWithProperties(intermediateStreamProperties.get(spec.getId())); } else { kafkaSpec = KafkaStreamSpec.fromSpec(spec); } return kafkaSpec; }