/** * Get the name prefix of the storage * * @return Name followed by storage separator */ public String getPrefix() { return getPrefix(getName()); }
/** * Get the name prefix of the storage * * @return Name followed by storage separator */ public String getPrefix() { return getPrefix(getName()); }
public static String getFactOrDimtableStorageTableName(String factName, String storageName) { return getStorageTableName(factName, Storage.getPrefix(storageName)); }
public static String getFactOrDimtableStorageTableName(String factName, String storageName) { return getStorageTableName(factName, Storage.getPrefix(storageName)); }
private Set<String> getAllTablesForStorage(LensSessionHandle sessionHandle, String fact, String storageName) throws LensException { Set<String> storageTableNames = new HashSet<>(); if (getClient(sessionHandle).isFactTable(fact)) { CubeFactTable cft = getClient(sessionHandle).getCubeFactTable(fact); Map<UpdatePeriod, String> storageMap = cft.getStoragePrefixUpdatePeriodMap().get(storageName); for (Map.Entry entry : storageMap.entrySet()) { storageTableNames.add(MetastoreUtil.getStorageTableName(fact, Storage.getPrefix((String) entry.getValue()))); } } else { storageTableNames.add(MetastoreUtil.getFactOrDimtableStorageTableName(fact, storageName)); } return storageTableNames; }
/** * get all timelines for all update periods and partition columns for the given fact-storage pair. If already loaded * in memory, it'll return that. If not, it'll first try to load it from table properties. If not found in table * properties, it'll get all partitions, compute timelines in memory, write back all loads timelines to table * properties for further usage and return them. * * @param fact fact * @param storage storage * @return all timelines for fact-storage pair. Load from properties/all partitions if needed. * @throws HiveException * @throws LensException */ public TreeMap<UpdatePeriod, CaseInsensitiveStringHashMap<PartitionTimeline>> get(String fact, String storage) throws HiveException, LensException { // SUSPEND CHECKSTYLE CHECK DoubleCheckedLockingCheck // Unique key for the timeline cache, based on storage and fact. String timeLineKey = (Storage.getPrefix(storage)+ fact).toLowerCase(); synchronized (this) { if (get(timeLineKey) == null) { loadTimeLines(fact, storage, timeLineKey); } log.debug("timeline for {} is: {}", storage, get(timeLineKey)); // return the final value from memory return get(timeLineKey); // RESUME CHECKSTYLE CHECK DoubleCheckedLockingCheck } }
boolean latestPartitionExists(String factOrDimTblName, String storageName, String latestPartCol) throws HiveException, LensException { String storageTableName = MetastoreUtil.getStorageTableName(factOrDimTblName, Storage.getPrefix(storageName)); if (isDimensionTable(factOrDimTblName)) { return dimTableLatestPartitionExists(storageTableName); } else { return !partitionTimelineCache.noPartitionsExist(factOrDimTblName, storageName, latestPartCol); } }
/** * get all timelines for all update periods and partition columns for the given fact-storage pair. If already loaded * in memory, it'll return that. If not, it'll first try to load it from table properties. If not found in table * properties, it'll get all partitions, compute timelines in memory, write back all loads timelines to table * properties for further usage and return them. * * @param fact fact * @param storage storage * @return all timelines for fact-storage pair. Load from properties/all partitions if needed. * @throws HiveException * @throws LensException */ public TreeMap<UpdatePeriod, CaseInsensitiveStringHashMap<PartitionTimeline>> get(String fact, String storage) throws HiveException, LensException { // SUSPEND CHECKSTYLE CHECK DoubleCheckedLockingCheck // Unique key for the timeline cache, based on storage and fact. String timeLineKey = (Storage.getPrefix(storage)+ fact).toLowerCase(); synchronized (this) { if (get(timeLineKey) == null) { loadTimeLines(fact, storage, timeLineKey); } log.debug("timeline for {} is: {}", storage, get(timeLineKey)); // return the final value from memory return get(timeLineKey); // RESUME CHECKSTYLE CHECK DoubleCheckedLockingCheck } }
boolean latestPartitionExists(String factOrDimTblName, String storageName, String latestPartCol) throws HiveException, LensException { String storageTableName = MetastoreUtil.getStorageTableName(factOrDimTblName, Storage.getPrefix(storageName)); if (isDimensionTable(factOrDimTblName)) { return dimTableLatestPartitionExists(storageTableName); } else { return !partitionTimelineCache.noPartitionsExist(factOrDimTblName, storageName, latestPartCol); } }
alterTablePartitionCache((Storage.getPrefix(storageName) + factOrDimTable).toLowerCase(), updatePeriod, storageTableName); return partsAdded;
this.alterTablePartitionCache((Storage.getPrefix(storageName) + cubeTableName).toLowerCase(), updatePeriod, storageTableName);
alterTablePartitionCache((Storage.getPrefix(storageName) + factOrDimTable).toLowerCase(), updatePeriod, storageTableName); return partsAdded;
this.alterTablePartitionCache((Storage.getPrefix(storageName) + cubeTableName).toLowerCase(), updatePeriod, storageTableName);
String storageTableName = MetastoreUtil.getStorageTableName(fact.getName(), Storage.getPrefix( storageName)); Map<String, String> params = client.getTable(storageTableName).getParameters();
private void assertTestFactTimelineClass(CubeMetastoreClient client) throws Exception { String factName = "testFact"; client.getTimelines(factName, c1, null, null); client.getTimelines(factName, c4, null, null); client.clearHiveTableCache(); CubeFactTable fact = client.getCubeFactTable(factName); Table table = client.getTable(MetastoreUtil.getStorageTableName(fact.getName(), Storage.getPrefix(c1))); assertEquals(table.getParameters().get(MetastoreUtil.getPartitionTimelineCachePresenceKey()), "true"); for (UpdatePeriod period : Lists.newArrayList(MINUTELY, HOURLY, DAILY, MONTHLY, YEARLY, QUARTERLY)) { for (String partCol : Lists.newArrayList("dt")) { assertTimeline(client, fact.getName(), c1, period, partCol, EndsAndHolesPartitionTimeline.class); } } table = client.getTable(MetastoreUtil.getStorageTableName(fact.getName(), Storage.getPrefix(c4))); assertEquals(table.getParameters().get(MetastoreUtil.getPartitionTimelineCachePresenceKey()), "true"); for (UpdatePeriod period : Lists.newArrayList(MINUTELY, HOURLY, DAILY, MONTHLY, YEARLY, QUARTERLY)) { for (String partCol : Lists.newArrayList("ttd", "ttd2")) { assertTimeline(client, fact.getName(), c4, period, partCol, EndsAndHolesPartitionTimeline.class); } } }
Storage.getPrefix(c4))); table.getParameters().put(MetastoreUtil.getPartitionTimelineStorageClassKey(HOURLY, "ttd"), StoreAllPartitionTimeline.class.getCanonicalName()); table = client.getTable(MetastoreUtil.getStorageTableName(fact.getName(), Storage.getPrefix(c4))); assertEquals(table.getParameters().get(MetastoreUtil.getPartitionTimelineCachePresenceKey()), "true"); assertTimeline(client, fact.getName(), c4, HOURLY, "ttd", ttdStoreAll);
String storageTableName = MetastoreUtil.getStorageTableName(dimName, Storage.getPrefix(storage)); assertTrue(client.tableExists(storageTableName));
.getStorageTableName(parent.getTableName(), Storage.getPrefix(storageTableNamePrefix)); Table tbl = client.getTable(storageTableName, false); if (tbl == null) {
.getStorageTableName(parent.getTableName(), Storage.getPrefix(storageTableNamePrefix)); Table tbl = client.getTable(storageTableName, false); if (tbl == null) {