/* * Paths to / */ const pathsToRoot = (startFrom = process.cwd()) => { return _(_.range(path.dirname(startFrom).split(path.sep).length)) .map(end => _.dropRight(path.dirname(startFrom).split(path.sep), end).join(path.sep)) .unshift(startFrom) .dropRight() .value(); }
/* * Helper to traverse up directories from a start point */ const traverseUp = file => _(_.range(path.dirname(file).split(path.sep).length)) .map(end => _.dropRight(path.dirname(file).split(path.sep), end).join(path.sep)) .map(dir => path.join(dir, path.basename(file))) .value()
async function onTrigger(message) { logger.debug('trigger: received', message) // validate data let data try { data = joi.attempt(message, schema) } catch (err) { logger.error('trigger: invalid message', { data: message, error: err.message }) return } const { date, query } = data // only the first 1000 search results are available await Promise.all(_.range(10).map((page) => redis.publishObject(CHANNELS.collect.repository.v1, { date, page, query }) )) logger.debug('trigger: finished', message) }
_.range(10).forEach((page) => { expect(redis.publishObject).to.have.been.calledWith(CHANNELS.collect.repository.v1, { date,
function getRandomPercentageValues(timeWindow, dataPoints) { const valuesArr = []; _.range(dataPoints).forEach((i) => valuesArr.push({value: getValue(80, 100), timestamp: getTimeStamp(i * timeWindow)})); return valuesArr; }
[ function (cb) { return cb(null, 1); } ].concat(_.range(count).map(function (i) { return function (arg, cb) { setImmediate(function () { cb(null, i); }); }; }))
it("should be able to process 1000 items", async () => { const oneThousand = _.range(1000); s3.getObject.mockImplementation(s3Body(oneThousand)); const reader = new S3ItemReader(new AWS.S3(), s3Params, "."); await reduceNexts(reader, 1000, undefined, (next, index) => { expect(next).toEqual({ item: index, cursor: { index: index }, finished: index === _.last(oneThousand) }); }); });
/* * Start a number of containers equals to the size of the pool. * * After creating the containers, the call to the user callback will be * intentionally delayed to give the containers the time to initialize and be * ready */ initialize(size, cb) { if (size <= 0) throw new Error("invalid pool size") let startups = _.range(size).map(() => this._createContainer.bind(this)); log.debug("Creating the container pool", {size: size}) async.parallel(startups, (err, data) => cb(err)) }
_.range(0, limit).map(function () { return _.merge({}, template, { bulkId: options.bulkId }); })
const reduceNexts = async (reader, times, startingCursor, callback) => { await _.range(times).reduce(async (cursorPromise, index) => { const cursor = await cursorPromise; const next = await reader.next(cursor); callback(next, index); return next.cursor; }, startingCursor); }
_.range(1, length + 1).forEach(function(sec) { batch.push({media_id: media_id, second: sec, title: title, name: name, length_in_seconds: length, decile: Math.floor((sec - 1) / length * 10) + 1, quartile: Math.floor((sec - 1) / length * 4) + 1, transition: "no"}); });
function getRandomValues(granularity, dataPoints, from) { const valuesArr = []; _.range(dataPoints).forEach((i) => valuesArr.push({x: from + i * granularity, y: getValue(0, 3000)})); return valuesArr; }
/* * Helper to locate the "closest" platform yaml */ const traverseUp = (startFrom = process.cwd()) => { return _(_.range(path.dirname(startFrom).split(path.sep).length)) .map(end => _.dropRight(path.dirname(startFrom).split(path.sep), end).join(path.sep)) .unshift(startFrom) .dropRight() .value(); }
function getRandomValues(timeWindow, dataPoints) { const valuesArr = []; _.range(dataPoints).forEach((i) => valuesArr.push({value: getValue(1000, 10000000), timestamp: getTimeStamp(i * timeWindow)})); return valuesArr; }
const reduceNexts = async (reader, times, startingCursor, callback) => { await _.range(times).reduce(async (cursorPromise, index) => { const cursor = await cursorPromise; const next = await reader.next(cursor); callback(next, index); return next.cursor; }, startingCursor); }