@Override public RegionInfo getRegionInfo() { return region.getRegionInfo(); }
protected List<List<Cell>> getExistingLabelsWithAuths() throws IOException { Scan scan = new Scan(); RegionScanner scanner = labelsRegion.getScanner(scan); List<List<Cell>> existingLabels = new ArrayList<>(); try { while (true) { List<Cell> cells = new ArrayList<>(); scanner.next(cells); if (cells.isEmpty()) { break; } existingLabels.add(cells); } } finally { scanner.close(); } return existingLabels; }
/** * Creates a region with a number of Stores equal to the length of {@code storeSizes}. Each * {@link Store} will have a reported size corresponding to the element in {@code storeSizes}. * * @param storeSizes A list of sizes for each Store. * @return A mocked Region. */ private Region mockRegionWithSize(Collection<Long> storeSizes) { final Region r = mock(Region.class); final RegionInfo info = mock(RegionInfo.class); when(r.getRegionInfo()).thenReturn(info); List<Store> stores = new ArrayList<>(); when(r.getStores()).thenReturn((List) stores); for (Long storeSize : storeSizes) { final Store s = mock(Store.class); stores.add(s); when(s.getHFilesSize()).thenReturn(storeSize); } return r; }
private boolean isTooManyStoreFiles(Region region) { // When compaction is disabled, the region is flushable if (!region.getTableDescriptor().isCompactionEnabled()) { return false; } for (Store store : region.getStores()) { if (store.hasTooManyStoreFiles()) { return true; } } return false; }
@Override protected void chore() { for (Region r : regionServer.getOnlineRegionsLocalContext()) { if (!r.isReadOnly()) { if (onlyMetaRefresh && !r.getRegionInfo().isMetaRegion()) continue; String encodedName = r.getRegionInfo().getEncodedName(); long time = EnvironmentEdgeManager.currentTime(); if (!lastRefreshTimes.containsKey(encodedName)) { for (Store store : r.getStores()) { LOG.warn("Exception while trying to refresh store files for region:" + r.getRegionInfo() + ", exception:" + StringUtils.stringifyException(ex));
public Result getClosestRowBefore(Region r, byte[] row, byte[] family) throws IOException { Scan scan = new Scan(row); scan.setSmall(true); scan.setCaching(1); scan.setReversed(true); scan.addFamily(family); try (RegionScanner scanner = r.getScanner(scan)) { List<Cell> cells = new ArrayList<>(1); scanner.next(cells); if (r.getRegionInfo().isMetaRegion() && !isTargetTable(row, cells.get(0))) { return null; } return Result.create(cells); } }
p.addColumn(c1, c0, T1); p.addColumn(c1, c1, T1); region.put(p); p.addColumn(c1, c0, T2); p.addColumn(c1, c1, T2); region.put(p); region.delete(d); region.delete(d); InternalScanner scanner = region.getScanner(s); List<Cell> kvs = new ArrayList<>(); scanner.next(kvs); scanner = region.getScanner(s); kvs = new ArrayList<>(); scanner.next(kvs);
private void generateTestData(Region region, int numRows) throws IOException { // generating 1Mb values LoadTestKVGenerator dataGenerator = new LoadTestKVGenerator(1024 * 1024, 1024 * 1024); for (int i = 0; i < numRows; ++i) { byte[] key = Bytes.add(region.getRegionInfo().getStartKey(), Bytes.toBytes(i)); for (int j = 0; j < 1; ++j) { Put put = new Put(key); byte[] col = Bytes.toBytes(String.valueOf(j)); byte[] value = dataGenerator.generateRandomSizeValue(key, col); put.addColumn(FAMILYNAME, col, value); region.put(put); } } } }
private void commitBatch(Region region, List<Mutation> mutations, long blockingMemstoreSize) throws IOException { if (mutations.isEmpty()) { return; } Mutation[] mutationArray = new Mutation[mutations.size()]; // When memstore size reaches blockingMemstoreSize we are waiting 3 seconds for the // flush happen which decrease the memstore size and then writes allowed on the region. for (int i = 0; blockingMemstoreSize > 0 && (region.getMemStoreHeapSize() + region.getMemStoreOffHeapSize()) > blockingMemstoreSize && i < 30; i++) { try { checkForRegionClosing(); Thread.sleep(100); } catch (InterruptedException e) { Thread.currentThread().interrupt(); throw new IOException(e); } } // TODO: should we use the one that is all or none? logger.debug("Committing batch of " + mutations.size() + " mutations for " + region.getRegionInfo().getTable().getNameAsString()); region.batchMutate(mutations.toArray(mutationArray)); }
byte[] row = append.getRow(); List<RowLock> locks = Lists.newArrayList(); region.startRegionOperation(); try { ServerUtil.acquireLock(region, row, locks); get.setTimeRange(minGetTimestamp, maxGetTimestamp); get.addColumn(family, qualifier); Result result = region.get(get); if (result.isEmpty()) { if (op == Sequence.MetaOp.DROP_SEQUENCE || op == Sequence.MetaOp.RETURN_SEQUENCE) { region.batchMutate(mutations); long serverTimestamp = MetaDataUtil.getClientTimeStamp(m); return null; // Impossible } finally { region.closeRegionOperation();
@Override public void put(Put put) throws IOException { this.region.put(put); }
Region region = e.getEnvironment().getRegion(); region.batchMutate(mutations.toArray(new Mutation[0])); throw ServerUtil.createIOException( "Unable to process ON DUPLICATE IGNORE for " + e.getEnvironment().getRegion().getRegionInfo().getTable().getNameAsString() + "(" + Bytes.toStringBinary(inc.getRow()) + ")", t); } finally {
for (Store s : env.getRegion().getStores()) { if (!IndexUtil.isLocalIndexStore(s)) { scan.addFamily(s.getColumnFamilyDescriptor().getName()); PhoenixConnection conn = QueryUtil.getConnectionOnServer(env.getConfiguration()) .unwrap(PhoenixConnection.class); PTable dataPTable = IndexUtil.getPDataTable(conn, env.getRegion().getTableDescriptor()); final List<IndexMaintainer> maintainers = Lists .newArrayListWithExpectedSize(dataPTable.getIndexes().size()); return new DataTableLocalIndexRegionScanner(env.getRegion().getScanner(scan), env.getRegion(), maintainers, store.getColumnFamilyDescriptor().getName(),env.getConfiguration());
private int getStoreFileCount(Region region) { int count = 0; for (Store store : region.getStores()) { count += store.getStorefilesCount(); } return count; }
@Override public Result get(Get get) throws IOException { return this.region.get(get); }
scanner = region.getScanner(scan); while (hasMore) { List<List<Cell>> deleteRows = new ArrayList<>(rowBatchSize); deleteArr[i++] = createDeleteMutation(deleteRow, deleteType, timestamp); OperationStatus[] opStatus = region.batchMutate(deleteArr); for (i = 0; i < opStatus.length; i++) { if (opStatus[i].getOperationStatusCode() != OperationStatusCode.SUCCESS) {
/** * Adds the mutations to labels region and set the results to the finalOpStatus. finalOpStatus * might have some entries in it where the OpStatus is FAILURE. We will leave those and set in * others in the order. * @param mutations * @param finalOpStatus * @return whether we need a ZK update or not. */ private boolean mutateLabelsRegion(List<Mutation> mutations, OperationStatus[] finalOpStatus) throws IOException { OperationStatus[] opStatus = this.labelsRegion.batchMutate(mutations .toArray(new Mutation[mutations.size()])); int i = 0; boolean updateZk = false; for (OperationStatus status : opStatus) { // Update the zk when atleast one of the mutation was added successfully. updateZk = updateZk || (status.getOperationStatusCode() == OperationStatusCode.SUCCESS); for (; i < finalOpStatus.length; i++) { if (finalOpStatus[i] == null) { finalOpStatus[i] = status; break; } } } return updateZk; }
TableName tbl = r.getTableDescriptor().getTableName(); MetricsTableValues mt = localMetricsTableMap.get(tbl); if (mt == null) { localMetricsTableMap.put(tbl, mt); if (r.getStores() != null) { for (Store store : r.getStores()) { mt.storeFileCount += store.getStorefilesCount(); mt.memstoreSize += (store.getMemStoreSize().getDataSize() + mt.readRequestCount += r.getReadRequestsCount(); mt.filteredReadRequestCount += getFilteredReadRequestCount(tbl.getNameAsString()); mt.writeRequestCount += r.getWriteRequestsCount();
/** * Returns {@code true} if the given region is part of the {@code _acl_} * metadata table. */ static boolean isAclRegion(Region region) { return ACL_TABLE_NAME.equals(region.getTableDesc().getTableName()); }
joinResult = dataRegion.get(get); } else { TableName dataTable = TableName.valueOf(MetaDataUtil.getLocalIndexUserTableName( environment.getRegion().getTableDescriptor().getTableName().getNameAsString())); Table table = null; try {