/** * Partition should not be used a indicative of the class itself. * New Fact partition created includes more final partitions with that creation. * @return */ public FactPartition withoutContaining() { return new FactPartition(this.getPartCol(), this.getPartSpec(), this.getPeriod(), null, this .getPartFormat(), this.getStorageTables()); } public FactPartition(String partCol, TimePartition timePartition) {
public String getFormattedFilter(String partCol, String tableName) { StringBuilder builder = new StringBuilder(); if (containingPart != null) { builder.append(containingPart.getFormattedFilter(tableName)); builder.append(" AND "); } if (tableName != null) { builder.append(tableName); builder.append("."); } builder.append(partCol); builder.append(" = '").append(getFormattedPartSpec()).append("'"); return builder.toString(); }
public FactPartition next() throws LensException { return new FactPartition(getPartCol(), getTimePartition().next(), getContainingPart(), getStorageTables()); }
public TimePartition getTimePartition() throws LensException { return TimePartition.of(getPeriod(), getPartSpec()); }
FactPartition part = new FactPartition(partCol, fromDate, maxInterval, null, partWhereClauseFormat); partitions.add(part); part.getStorageTables().add(storageTable); part = new FactPartition(partCol, toDate, maxInterval, null, partWhereClauseFormat); partitions.add(part); part.getStorageTables().add(storageTable); this.participatingUpdatePeriods.add(maxInterval); log.info("Added continuous fact partition for storage table {}", storageName); Date dt = iter.next(); Date nextDt = iter.peekNext(); FactPartition part = new FactPartition(partCol, dt, maxInterval, null, partWhereClauseFormat); updatePartitionStorage(part); log.debug("Storage tables containing Partition {} are: {}", part, part.getStorageTables()); if (part.isFound()) { log.debug("Adding existing partition {}", part); partitions.add(part); Date pdt = processTimeIter.next(); Date nextPdt = processTimeIter.peekNext(); FactPartition processTimePartition = new FactPartition(processTimePartCol, pdt, maxInterval, null, partWhereClauseFormat); updatePartitionStorage(processTimePartition); if (processTimePartition.isFound()) { log.debug("Finer parts not required for look-ahead partition :{}", part); } else { for (FactPartition pPart : processTimeParts) { log.debug("Looking for finer partitions in pPart: {}", pPart);
FactPartition first = null; for (FactPartition part : rangeParts) { if (part.hasContainingPart()) { throw new LensException(LensCubeErrorCode.CANNOT_USE_TIMERANGE_WRITER.getLensErrorInfo(), "Partition has containing part"); } else { if (!first.getPartCol().equalsIgnoreCase(part.getPartCol())) { throw new LensException(LensCubeErrorCode.CANNOT_USE_TIMERANGE_WRITER.getLensErrorInfo(), "Part columns are different in partitions"); if (!first.getPeriod().equals(part.getPeriod())) { throw new LensException(LensCubeErrorCode.CANNOT_USE_TIMERANGE_WRITER.getLensErrorInfo(), "Partitions are in different update periods"); start = start.previous(); end = end.next(); String partCol = start.getPartCol(); if (!cubeQueryContext.shouldReplaceTimeDimWithPart()) { partCol = cubeQueryContext.getTimeDimOfPartitionColumn(partCol); .append(start.getFormattedPartSpec()).append("' AND '").append(end.getFormattedPartSpec()).append("') ");
@Test(dataProvider = "formatDataProvider") public void testConsecutiveDayParts(DateFormat format) throws LensException, InterruptedException { Set<FactPartition> answeringParts = new LinkedHashSet<>(); answeringParts.add(new FactPartition("dt", getDateWithOffset(DAILY, -1), DAILY, null, format)); answeringParts.add(new FactPartition("dt", getDateWithOffset(DAILY, -2), DAILY, null, format)); answeringParts.add(new FactPartition("dt", getDateWithOffset(DAILY, 0), DAILY, null, format)); String whereClause = getTimerangeWriter().getTimeRangeWhereClause(getMockedCubeContext(false), "test", answeringParts); validateConsecutive(whereClause, format); }
/** extract storage name and check in timeline cache for existance */ public boolean factPartitionExists(FactTable fact, FactPartition part, String storageTableName) throws HiveException, LensException { String storage = extractStorageName(fact, storageTableName); return partitionTimelineCache.partitionTimeExists(fact.getSourceFactName(), storage, part.getPeriod(), part.getPartCol(), part.getPartSpec()); }
private FactPartition mockFactPartition(UpdatePeriod mockPeriod, ImmutableMap<String, Double> tableWeights, double mockAllTableWeight) { FactPartition fp = mock(FactPartition.class); when(fp.getPeriod()).thenReturn(mockPeriod); when(fp.getAllTableWeights(tableWeights)).thenReturn(mockAllTableWeight); return fp; }
public static String getTimeRangePartitionFilter(FactPartition partition, CubeQueryContext cubeQueryContext, String tableName) { String partCol = partition.getPartCol(); String partFilter; if (cubeQueryContext != null && !cubeQueryContext.shouldReplaceTimeDimWithPart()) { String replacedPartCol = cubeQueryContext.getTimeDimOfPartitionColumn(partCol); if (!partCol.equalsIgnoreCase(replacedPartCol)) { partFilter = partition.getFormattedFilter(replacedPartCol, tableName); } else { partFilter = partition.getFormattedFilter(tableName); } } else { partFilter = partition.getFormattedFilter(tableName); } return partFilter; } }
/** * Truncates partitions in {@link #rangeToPartitions} such that only partitions belonging to * the passed undatePeriod are retained. * @param updatePeriod */ private void truncatePartitions(UpdatePeriod updatePeriod) { Iterator<Map.Entry<TimeRange, Set<FactPartition>>> rangeItr = rangeToPartitions.entrySet().iterator(); while (rangeItr.hasNext()) { Map.Entry<TimeRange, Set<FactPartition>> rangeEntry = rangeItr.next(); rangeEntry.getValue().removeIf(factPartition -> !factPartition.getPeriod().equals(updatePeriod)); rangeEntry.getValue().forEach(factPartition -> { factPartition.getStorageTables().remove(storageTable); factPartition.getStorageTables().add(resolvedName); }); if (rangeEntry.getValue().isEmpty()) { rangeItr.remove(); } } }
public String getFormattedFilter(String tableName) { return getFormattedFilter(partCol, tableName); }
public String getFilter() { StringBuilder builder = new StringBuilder(); if (containingPart != null) { builder.append(containingPart.getFilter()); builder.append(" AND "); } builder.append(partCol); builder.append(" = '").append(getPartString()).append("'"); return builder.toString(); }
Map<FactPartition, Set<FactPartition>> partitionSetMap = new HashMap<FactPartition, Set<FactPartition>>(); for (FactPartition part : parts) { partitionSetMap.computeIfAbsent(part.getContainingPart(), k -> Sets.newTreeSet()).add(part.withoutContaining());
private boolean contains(Set<FactPartition> parts, Date partSpec) { for (FactPartition part : parts) { if (part.getPartSpec().equals(partSpec)) { return true; } } return false; } }
@Test public void testGetAllTableWeights() throws Exception { Map<String, Double> weights = Maps.newHashMap(); assertEquals(fp1.getAllTableWeights(ImmutableMap.copyOf(weights)), 0.0); weights.put("st1", 0.2); weights.put("st2", 0.3); assertEquals(fp1.getAllTableWeights(ImmutableMap.copyOf(weights)), 0.5); weights.clear(); weights.put("db1.st1", 0.4); weights.put("db2.st2", 0.5); assertEquals(fp1.getAllTableWeights(ImmutableMap.copyOf(weights)), 0.9); } }
@Override public String toString() { return getFilter(); }
return 1; return this.containingPart.compareTo(o.containingPart); } else { if (o.containingPart != null) {
FactPartition part = new FactPartition(partCol, fromDate, maxInterval, null, partWhereClauseFormat); partitions.add(part); part.getStorageTables().add(storageTable); part = new FactPartition(partCol, toDate, maxInterval, null, partWhereClauseFormat); partitions.add(part); part.getStorageTables().add(storageTable); this.participatingUpdatePeriods.add(maxInterval); log.info("Added continuous fact partition for storage table {}", storageName); Date dt = iter.next(); Date nextDt = iter.peekNext(); FactPartition part = new FactPartition(partCol, dt, maxInterval, null, partWhereClauseFormat); updatePartitionStorage(part); log.debug("Storage tables containing Partition {} are: {}", part, part.getStorageTables()); if (part.isFound()) { log.debug("Adding existing partition {}", part); partitions.add(part); while (timeIter.hasNext()){ Date date = timeIter.next(); currFactPartition = new FactPartition(processTimePartCol, date, maxInterval, null, partWhereClauseFormat); updatePartitionStorage(currFactPartition); if (!currFactPartition.isFound()) { log.debug("Looked ahead process time partition {} is not found : " + currFactPartition); allProcessTimePartitionsFound = false; for (FactPartition pPart : processTimeParts) { log.debug("Looking for finer partitions in pPart: {}", pPart);
FactPartition first = null; for (FactPartition part : rangeParts) { if (part.hasContainingPart()) { throw new LensException(LensCubeErrorCode.CANNOT_USE_TIMERANGE_WRITER.getLensErrorInfo(), "Partition has containing part"); } else { if (!first.getPartCol().equalsIgnoreCase(part.getPartCol())) { throw new LensException(LensCubeErrorCode.CANNOT_USE_TIMERANGE_WRITER.getLensErrorInfo(), "Part columns are different in partitions"); if (!first.getPeriod().equals(part.getPeriod())) { throw new LensException(LensCubeErrorCode.CANNOT_USE_TIMERANGE_WRITER.getLensErrorInfo(), "Partitions are in different update periods"); start = start.previous(); end = end.next(); String partCol = start.getPartCol(); if (!cubeQueryContext.shouldReplaceTimeDimWithPart()) { partCol = cubeQueryContext.getTimeDimOfPartitionColumn(partCol); .append(start.getFormattedPartSpec()).append("' AND '").append(end.getFormattedPartSpec()).append("') ");