it('updates the metadata object', async () => { expect(brokerPool.metadata).toEqual(null) await brokerPool.refreshMetadata([topicName]) expect(brokerPool.metadata).not.toEqual(null) })
it('retries on LEADER_NOT_AVAILABLE errors', async () => { const leaderNotAvailableError = new KafkaJSProtocolError({ message: 'LEADER_NOT_AVAILABLE', type: 'LEADER_NOT_AVAILABLE', code: 5, }) brokerPool.findConnectedBroker = jest.fn(() => brokerPool.seedBroker) jest.spyOn(brokerPool.seedBroker, 'metadata').mockImplementationOnce(() => { throw leaderNotAvailableError }) expect(brokerPool.metadata).toEqual(null) await brokerPool.refreshMetadata([topicName]) expect(brokerPool.metadata).not.toEqual(null) })
describe('when the log level is NOTHING', () => { beforeEach(() => { logger = createLogger({ level: LEVELS.NOTHING, logCreator: LoggerConsole }) }) it('does not log', () => { logger.info('<do not log info>', { extra1: true }) logger.error('<do not log error>', { extra1: true }) logger.warn('<do not log warn>', { extra1: true }) logger.debug('<do not log debug>', { extra1: true }) expect(console.info).not.toHaveBeenCalled() expect(console.error).not.toHaveBeenCalled() expect(console.warn).not.toHaveBeenCalled() expect(console.log).not.toHaveBeenCalled() }) })
test("should not be able to listen if the server is already listening", async () => { // arrange process.env.CUBEJS_ENABLE_TLS = "false"; let cubeServer = new CubeServer(); // act try { await cubeServer.listen(); await cubeServer.listen(); } catch (err) { // assert expect(err.message).toBe("CubeServer is already listening"); expect(http.createServer).toHaveBeenCalledTimes(1); expect(http.__mockServer.listen).toHaveBeenCalledTimes(1); expect(cubeServer.server).not.toBe(null); } });
it('should "commit" offsets during fetch', async () => { const batch = new Batch(topicName, 0, { partition, highWatermark: 5, messages: [{ offset: 4, key: '1', value: '2' }], }) consumerGroup.fetch.mockImplementationOnce(() => BufferedAsyncIterator([Promise.resolve([batch])]) ) runner.scheduleFetch = jest.fn() await runner.start() await runner.fetch() // Manually fetch for test expect(eachBatch).toHaveBeenCalled() expect(consumerGroup.commitOffsets).toHaveBeenCalled() expect(onCrash).not.toHaveBeenCalled() })
describe('Utils > shuffle', () => { it('shuffles', () => { const array = Array(500) .fill() .map((_, i) => i) const shuffled = shuffle(array) expect(shuffled).not.toEqual(array) expect(shuffled).toIncludeSameMembers(array) }) it('returns the same order for single element arrays', () => { expect(shuffle([1])).toEqual([1]) }) it('throws if it receives a non-array', () => { expect(() => shuffle()).toThrowError(TypeError) expect(() => shuffle('foo')).toThrowError(TypeError) expect(() => shuffle({})).toThrowError(TypeError) }) })
test('create new partition', async () => { admin = createAdmin({ cluster: createCluster(), logger: newLogger() }) await admin.connect() await expect( admin.createTopics({ waitForLeaders: false, topics: [{ topic: topicName }], }) ).resolves.toEqual(true) await expect( admin.createPartitions({ topicPartitions: [{ topic: topicName, count: 2 }], }) ).resolves.not.toThrow() })
describe('when the group is rebalancing before the new consumer has joined', () => { it('recovers from rebalance in progress and re-join the group', async () => { consumerGroup.sync .mockImplementationOnce(() => { throw rebalancingError() }) .mockImplementationOnce(() => { throw rebalancingError() }) .mockImplementationOnce(() => true) runner.scheduleFetch = jest.fn() await runner.start() expect(runner.scheduleFetch).toHaveBeenCalled() expect(onCrash).not.toHaveBeenCalled() }) })
describe('#hasConnectedBrokers', () => { it('returns true if the seed broker is connected', async () => { expect(brokerPool.hasConnectedBrokers()).toEqual(false) await brokerPool.connect() expect(brokerPool.hasConnectedBrokers()).toEqual(true) }) it('returns true if any of the brokers are connected', async () => { expect(brokerPool.hasConnectedBrokers()).toEqual(false) await brokerPool.connect() await brokerPool.refreshMetadata([topicName]) const broker = Object.values(brokerPool.brokers).find(broker => !broker.isConnected()) expect(broker).not.toEqual(brokerPool.seedBroker) await broker.connect() await brokerPool.seedBroker.disconnect() expect(brokerPool.hasConnectedBrokers()).toEqual(true) }) it('returns false when nothing is connected', async () => { expect(brokerPool.hasConnectedBrokers()).toEqual(false) await brokerPool.connect() await brokerPool.disconnect() expect(brokerPool.hasConnectedBrokers()).toEqual(false) }) })
test('retries if the controller has moved', async () => { const cluster = createCluster() const broker = { createPartitions: jest.fn(() => true) } cluster.refreshMetadata = jest.fn() cluster.findControllerBroker = jest .fn() .mockImplementationOnce(() => { throw new KafkaJSProtocolError(createErrorFromCode(NOT_CONTROLLER)) }) .mockImplementationOnce(() => broker) admin = createAdmin({ cluster, logger: newLogger() }) await expect( admin.createPartitions({ topicPartitions: [{ topic: topicName, count: 2 }], }) ).resolves.not.toThrow() expect(cluster.refreshMetadata).toHaveBeenCalledTimes(2) expect(cluster.findControllerBroker).toHaveBeenCalledTimes(2) expect(broker.createPartitions).toHaveBeenCalledTimes(1) })
}) await broker.connect() expect(broker.authenticatedAt).not.toBe(null) await broker.disconnect() expect(broker.authenticatedAt).toBe(null)
await producer.send({ acks: 1, topic: topic2, messages: [message1] }) expect(eachMessage).not.toHaveBeenCalled() }) })
describe('when eachBatchAutoResolve is set to false', () => { beforeEach(() => { runner = new Runner({ consumerGroup, instrumentationEmitter: new InstrumentationEventEmitter(), eachBatchAutoResolve: false, eachBatch, onCrash, logger: newLogger(), partitionsConsumedConcurrently: 1, }) runner.scheduleFetch = jest.fn(() => runner.fetch()) }) it('does not call resolveOffset with the last offset', async () => { const batch = new Batch(topicName, 0, { partition, highWatermark: 5, messages: [{ offset: 4, key: '1', value: '2' }], }) consumerGroup.fetch.mockImplementationOnce(() => BufferedAsyncIterator([Promise.resolve([batch])]) ) await runner.start() expect(onCrash).not.toHaveBeenCalled() expect(consumerGroup.resolveOffset).not.toHaveBeenCalled() }) })
it('calls onCrash for any other errors', async () => { const unknownError = new KafkaJSProtocolError(createErrorFromCode(UNKNOWN)) consumerGroup.join .mockImplementationOnce(() => { throw unknownError }) .mockImplementationOnce(() => true) runner.scheduleFetch = jest.fn() await runner.start() // scheduleFetch in runner#start is async, and we never wait for it, // so we have to wait a bit to give the callback a chance of being executed await sleep(100) expect(runner.scheduleFetch).not.toHaveBeenCalled() expect(onCrash).toHaveBeenCalledWith(unknownError) })
describe('Consumer > OffsetMananger > seek', () => { let offsetManager, coordinator beforeEach(() => { const memberAssignment = { topic1: [0, 1, 2, 3], topic2: [0, 1, 2, 3, 4, 5], } coordinator = { offsetCommit: jest.fn() } offsetManager = new OffsetManager({ memberAssignment }) offsetManager.getCoordinator = jest.fn(() => coordinator) }) it('ignores the seek when the consumer is not assigned to the topic', async () => { await offsetManager.seek({ topic: 'topic3', partition: 0, offset: '100' }) expect(offsetManager.getCoordinator).not.toHaveBeenCalled() expect(coordinator.offsetCommit).not.toHaveBeenCalled() }) it('ignores the seek when the consumer is not assigned to the partition', async () => { await offsetManager.seek({ topic: 'topic1', partition: 4, offset: '101' }) expect(offsetManager.getCoordinator).not.toHaveBeenCalled() expect(coordinator.offsetCommit).not.toHaveBeenCalled() }) })