/** * Register a task generator. * * @param pinotTaskGenerator Task generator to be registered */ public void registerTaskGenerator(@Nonnull PinotTaskGenerator pinotTaskGenerator) { // Task type cannot contain the task name separator String taskType = pinotTaskGenerator.getTaskType(); Preconditions.checkArgument(!taskType.contains(PinotHelixTaskResourceManager.TASK_NAME_SEPARATOR), "Task type: %s cannot contain underscore character", taskType); _taskGeneratorRegistry.put(taskType, pinotTaskGenerator); }
CountMinSketch(int depth, int width, long size, long[] hashA, long[][] table) { this.depth = depth; this.width = width; this.eps = 2.0 / width; this.confidence = 1 - 1 / Math.pow(2, depth); this.hashA = hashA; this.table = table; Preconditions.checkState(size >= 0, "The size cannot be smaller than ZER0: " + size); this.size = size; }
public void remove(TDigest.Group base) { Preconditions.checkState(size > 0, "Cannot remove from empty set"); if (size == 1) { Preconditions.checkArgument(base.compareTo(leaf) == 0, "Element %s not found", base); count = size = 0; leaf = null;
public void remove(TDigest.Group base) { Preconditions.checkState(size > 0, "Cannot remove from empty set"); if (size == 1) { Preconditions.checkArgument(base.compareTo(leaf) == 0, "Element %s not found", base); count = size = 0; leaf = null;
public static TDigest merge(double compression, Iterable<TDigest> subData) { Preconditions.checkArgument(subData.iterator().hasNext(), "Can't merge 0 digests"); List<TDigest> elements = Lists.newArrayList(subData); int n = Math.max(1, elements.size() / 4); TDigest r = new TDigest(compression, elements.get(0).gen); if (elements.get(0).recordAllData) { r.recordAllData(); } for (int i = 0; i < elements.size(); i += n) { if (n > 1) { r.add(merge(compression, elements.subList(i, Math.min(i + n, elements.size())))); } else { r.add(elements.get(i)); } } return r; }
public void remove(TDigest.Group base) { Preconditions.checkState(size > 0, "Cannot remove from empty set"); if (size == 1) { Preconditions.checkArgument(base.compareTo(leaf) == 0, "Element %s not found", base); count = size = 0; leaf = null;
public static int decode(ByteBuffer buf) { int v = buf.get(); int z = 0x7f & v; int shift = 7; while ((v & 0x80) != 0) { Preconditions.checkState(shift <= 28); v = buf.get(); z += (v & 0x7f) << shift; shift += 7; } return z; }
void init(long rows, int threshold, int separate, String p1, String p2) { Preconditions.checkArgument((threshold > 0 || separate == 0) && separate < 100 && separate >= 0 && rows > 0); primaryRows = rows * (100 - separate) / 100;
public static void encode(ByteBuffer buf, int n) { int k = 0; while (n < 0 || n > 0x7f) { byte b = (byte) (0x80 | (0x7f & n)); buf.put(b); n = n >>> 7; k++; Preconditions.checkState(k < 6); } buf.put((byte) n); }
Preconditions.checkArgument(info != null, "AssertionError: no source info for the column: " + columnName);
/** * @return the first Group in this set */ public TDigest.Group first() { Preconditions.checkState(size > 0, "No first element of empty set"); if (left == null) { return leaf; } else { return left.first(); } }
/** * In bucket mapjoin, there are ReduceSinks that mark a small table parent (Reduce Sink are removed from big-table). * In SMB join these are not expected for any parents, either from small or big tables. * @param mapJoinOp */ @SuppressWarnings("unchecked") private void removeSmallTableReduceSink(MapJoinOperator mapJoinOp) { SMBJoinDesc smbJoinDesc = new SMBJoinDesc(mapJoinOp.getConf()); List<Operator<? extends OperatorDesc>> parentOperators = mapJoinOp.getParentOperators(); for (int i = 0; i < parentOperators.size(); i++) { Operator<? extends OperatorDesc> par = parentOperators.get(i); if (i != smbJoinDesc.getPosBigTable()) { if (par instanceof ReduceSinkOperator) { List<Operator<? extends OperatorDesc>> grandParents = par.getParentOperators(); Preconditions.checkArgument(grandParents.size() == 1, "AssertionError: expect # of parents to be 1, but was " + grandParents.size()); Operator<? extends OperatorDesc> grandParent = grandParents.get(0); grandParent.removeChild(par); grandParent.setChildOperators(Utilities.makeList(mapJoinOp)); mapJoinOp.getParentOperators().set(i, grandParent); } } } } }
public TDigest.Group last() { Preconditions.checkState(size > 0, "Cannot find last element of empty set"); if (size == 1) { return leaf; } else { return right.last(); } }
/** * In bucket mapjoin, there are ReduceSinks that mark a small table parent (Reduce Sink are removed from big-table). * In SMB join these are not expected for any parents, either from small or big tables. * @param mapJoinOp */ @SuppressWarnings("unchecked") private void removeSmallTableReduceSink(MapJoinOperator mapJoinOp) { SMBJoinDesc smbJoinDesc = new SMBJoinDesc(mapJoinOp.getConf()); List<Operator<? extends OperatorDesc>> parentOperators = mapJoinOp.getParentOperators(); for (int i = 0; i < parentOperators.size(); i++) { Operator<? extends OperatorDesc> par = parentOperators.get(i); if (i != smbJoinDesc.getPosBigTable()) { if (par instanceof ReduceSinkOperator) { List<Operator<? extends OperatorDesc>> grandParents = par.getParentOperators(); Preconditions.checkArgument(grandParents.size() == 1, "AssertionError: expect # of parents to be 1, but was " + grandParents.size()); Operator<? extends OperatorDesc> grandParent = grandParents.get(0); grandParent.removeChild(par); grandParent.setChildOperators(Utilities.makeList(mapJoinOp)); mapJoinOp.getParentOperators().set(i, grandParent); } } } } }
public void checkBalance() { if (left != null) { Preconditions.checkState(Math.abs(left.depth() - right.depth()) < 2, "Imbalanced"); int l = left.depth(); int r = right.depth(); Preconditions.checkState(depth == Math.max(l, r) + 1, "Depth doesn't match children"); Preconditions.checkState(size == left.size + right.size, "Sizes don't match children"); Preconditions.checkState(count == left.count + right.count, "Counts don't match children"); Preconditions.checkState(leaf.compareTo(right.first()) == 0, "Split is wrong %.5d != %.5d or %d != %d", leaf.mean(), right.first().mean(), leaf.id(), right.first().id()); left.checkBalance(); right.checkBalance(); } }
public StringSelectionColumnIterator(Block block) { _dataType = block.getMetadata().getDataType(); Preconditions .checkArgument(_dataType.equals(FieldSpec.DataType.STRING) || _dataType.equals(FieldSpec.DataType.BYTES), "Illegal data type for StringSelectionColumnIterator: " + _dataType); bvIter = (BlockSingleValIterator) block.getBlockValueSet().iterator(); }
public void addColumnMinMaxValue() throws Exception { Preconditions.checkState(_columnMinMaxValueGeneratorMode != ColumnMinMaxValueGeneratorMode.NONE); Schema schema = _segmentMetadata.getSchema(); // Process time column String timeColumnName = schema.getTimeColumnName(); if (timeColumnName != null) { addColumnMinMaxValueForColumn(timeColumnName); } if (_columnMinMaxValueGeneratorMode == ColumnMinMaxValueGeneratorMode.TIME) { saveMetadata(); return; } // Process dimension columns for (String dimensionColumnName : schema.getDimensionNames()) { addColumnMinMaxValueForColumn(dimensionColumnName); } if (_columnMinMaxValueGeneratorMode == ColumnMinMaxValueGeneratorMode.NON_METRIC) { saveMetadata(); return; } // Process metric columns for (String metricColumnName : schema.getMetricNames()) { addColumnMinMaxValueForColumn(metricColumnName); } saveMetadata(); }
Preconditions.checkArgument(values.size() > 1);
public static void encode(ByteBuffer buf, int n) { int k = 0; while (n < 0 || n > 0x7f) { byte b = (byte) (0x80 | (0x7f & n)); buf.put(b); n = n >>> 7; k++; Preconditions.checkState(k < 6); } buf.put((byte) n); }
public static TDigest merge(double compression, Iterable<TDigest> subData) { Preconditions.checkArgument(subData.iterator().hasNext(), "Can't merge 0 digests"); List<TDigest> elements = Lists.newArrayList(subData); int n = Math.max(1, elements.size() / 4); TDigest r = new TDigest(compression, elements.get(0).gen); if (elements.get(0).recordAllData) { r.recordAllData(); } for (int i = 0; i < elements.size(); i += n) { if (n > 1) { r.add(merge(compression, elements.subList(i, Math.min(i + n, elements.size())))); } else { r.add(elements.get(i)); } } return r; }