Refine search
public ExportSnapshotInputSplit(final List<Pair<SnapshotFileInfo, Long>> snapshotFiles) { this.files = new ArrayList(snapshotFiles.size()); for (Pair<SnapshotFileInfo, Long> fileInfo: snapshotFiles) { this.files.add(new Pair<>( new BytesWritable(fileInfo.getFirst().toByteArray()), fileInfo.getSecond())); this.length += fileInfo.getSecond(); } }
private static void getSortedTagOrdinals(List<List<Integer>> fullTagsList, Tag tag) throws IOException { List<Integer> tagsOrdinalInSortedOrder = new ArrayList<>(); int offset = tag.getValueOffset(); int endOffset = offset + tag.getValueLength(); while (offset < endOffset) { Pair<Integer, Integer> result = TagUtil.readVIntValuePart(tag, offset); tagsOrdinalInSortedOrder.add(result.getFirst()); offset += result.getSecond(); } Collections.sort(tagsOrdinalInSortedOrder); fullTagsList.add(tagsOrdinalInSortedOrder); }
static List<Put> createPutForPreparedBulkload(TableName table, byte[] region, final byte[] family, final List<Pair<Path, Path>> pairs) { List<Put> puts = new ArrayList<>(pairs.size()); for (Pair<Path, Path> pair : pairs) { Path path = pair.getSecond(); String file = path.toString(); int lastSlash = file.lastIndexOf("/"); String filename = file.substring(lastSlash + 1); Put put = new Put(rowkey(BULK_LOAD_PREFIX, table.toString(), BLK_LD_DELIM, Bytes.toString(region), BLK_LD_DELIM, filename)); put.addColumn(BackupSystemTable.META_FAMILY, TBL_COL, table.getName()); put.addColumn(BackupSystemTable.META_FAMILY, FAM_COL, family); put.addColumn(BackupSystemTable.META_FAMILY, PATH_COL, file.getBytes()); put.addColumn(BackupSystemTable.META_FAMILY, STATE_COL, BL_PREPARE); puts.add(put); LOG.debug("writing raw bulk path " + file + " for " + table + " " + Bytes.toString(region)); } return puts; }
@Test public void testSortOrderForLeadingDescVarLengthColWithNonNullFollowing() throws SQLException { Connection conn = DriverManager.getConnection(getUrl()); conn.createStatement().execute("CREATE TABLE t (k1 VARCHAR, k2 VARCHAR NOT NULL, CONSTRAINT pk PRIMARY KEY (k1 DESC,k2))"); conn.createStatement().execute("UPSERT INTO t VALUES ('a','x')"); conn.createStatement().execute("UPSERT INTO t VALUES ('ab', 'x')"); Iterator<Pair<byte[],List<Cell>>> dataIterator = PhoenixRuntime.getUncommittedDataIterator(conn); List<Cell> kvs = dataIterator.next().getSecond(); Collections.sort(kvs, CellComparatorImpl.COMPARATOR); Cell first = kvs.get(0); assertEquals("ab", Bytes.toString(SortOrder.invert(first.getRowArray(), first.getRowOffset(), 2))); Cell second = kvs.get(1); assertEquals("a", Bytes.toString(SortOrder.invert(second.getRowArray(), second.getRowOffset(), 1))); }
/** * This is the client side interface/handle for calling the std method for a * given cf-cq combination. It was necessary to add one more call stack as its * return type should be a decimal value, irrespective of what * columninterpreter says. So, this methods collects the necessary parameters * to compute the std and returns the double value. * @param table table to scan. * @param ci the user's ColumnInterpreter implementation * @param scan the HBase scan object to use to read data from HBase * @return <R, S> * @throws Throwable The caller is supposed to handle the exception as they are thrown * & propagated to it. */ public <R, S, P extends Message, Q extends Message, T extends Message> double std( final Table table, ColumnInterpreter<R, S, P, Q, T> ci, Scan scan) throws Throwable { Pair<List<S>, Long> p = getStdArgs(table, ci, scan); double res = 0d; double avg = ci.divideForAvg(p.getFirst().get(0), p.getSecond()); double avgOfSumSq = ci.divideForAvg(p.getFirst().get(1), p.getSecond()); res = avgOfSumSq - (avg) * (avg); // variance res = Math.pow(res, 0.5); return res; }
@Override public void addHFileRefs(String peerId, List<Pair<Path, Path>> pairs) throws ReplicationException { String peerNode = getHFileRefsPeerNode(peerId); LOG.debug("Adding hfile references {} in queue {}", pairs, peerNode); List<ZKUtilOp> listOfOps = pairs.stream().map(p -> p.getSecond().getName()) .map(n -> getHFileNode(peerNode, n)) .map(f -> ZKUtilOp.createAndFailSilent(f, HConstants.EMPTY_BYTE_ARRAY)).collect(toList()); LOG.debug("The multi list size for adding hfile references in zk for node {} is {}", peerNode, listOfOps.size()); try { ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true); } catch (KeeperException e) { throw new ReplicationException("Failed to add hfile reference to peer " + peerId, e); } }
@Override public List<Pair<Cell, Cell>> postAppendBeforeWAL( ObserverContext<RegionCoprocessorEnvironment> ctx, Mutation mutation, List<Pair<Cell, Cell>> cellPairs) throws IOException { List<Pair<Cell, Cell>> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair<Cell, Cell> pair : cellPairs) { resultPairs .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); } return resultPairs; }
private void addLastSeqIdsToOps(String queueId, Map<String, Long> lastSeqIds, List<ZKUtilOp> listOfOps) throws KeeperException, ReplicationException { String peerId = new ReplicationQueueInfo(queueId).getPeerId(); for (Entry<String, Long> lastSeqEntry : lastSeqIds.entrySet()) { String path = getSerialReplicationRegionPeerNode(lastSeqEntry.getKey(), peerId); Pair<Long, Integer> p = getLastSequenceIdWithVersion(lastSeqEntry.getKey(), peerId); byte[] data = ZKUtil.positionToByteArray(lastSeqEntry.getValue()); if (p.getSecond() < 0) { // ZNode does not exist. ZKUtil.createWithParents(zookeeper, path.substring(0, path.lastIndexOf(ZNodePaths.ZNODE_PATH_SEPARATOR))); listOfOps.add(ZKUtilOp.createAndFailSilent(path, data)); continue; } // Perform CAS in a specific version v0 (HBASE-20138) int v0 = p.getSecond(); long lastPushedSeqId = p.getFirst(); if (lastSeqEntry.getValue() <= lastPushedSeqId) { continue; } listOfOps.add(ZKUtilOp.setData(path, data, v0)); } }
public HashMap<String, List<Pair<ServerName, ReplicationLoadSource>>> getReplicationLoad(ServerName[] serverNames) { List<ReplicationPeerDescription> peerList = this.getReplicationPeerManager().listPeers(null); if (peerList == null) { return null; } HashMap<String, List<Pair<ServerName, ReplicationLoadSource>>> replicationLoadSourceMap = new HashMap<>(peerList.size()); peerList.stream() .forEach(peer -> replicationLoadSourceMap.put(peer.getPeerId(), new ArrayList<>())); for (ServerName serverName : serverNames) { List<ReplicationLoadSource> replicationLoadSources = getServerManager().getLoad(serverName).getReplicationLoadSourceList(); for (ReplicationLoadSource replicationLoadSource : replicationLoadSources) { replicationLoadSourceMap.get(replicationLoadSource.getPeerID()) .add(new Pair<>(serverName, replicationLoadSource)); } } for (List<Pair<ServerName, ReplicationLoadSource>> loads : replicationLoadSourceMap.values()) { if (loads.size() > 0) { loads.sort(Comparator.comparingLong(load -> (-1) * load.getSecond().getReplicationLag())); } } return replicationLoadSourceMap; }
private StripeCompactionRequest selectNewStripesCompaction(StripeInformationProvider si) { List<HStoreFile> l0Files = si.getLevel0Files(); Pair<Long, Integer> kvsAndCount = estimateTargetKvs(l0Files, config.getInitialCount()); LOG.debug("Creating " + kvsAndCount.getSecond() + " initial stripes with " + kvsAndCount.getFirst() + " kvs each via L0 compaction of " + l0Files.size() + " files"); SplitStripeCompactionRequest request = new SplitStripeCompactionRequest( si.getLevel0Files(), OPEN_KEY, OPEN_KEY, kvsAndCount.getSecond(), kvsAndCount.getFirst()); request.setMajorRangeFull(); // L0 only, can drop deletes. return request; }
master.getReplicationLoad(new ServerName[] { serverName }); assertEquals("peer size ", 2, replicationLoad.size()); assertEquals("load size ", 1, replicationLoad.get(peer1).size()); assertEquals("log queue size of peer1", sizeOfLogQueue, replicationLoad.get(peer1).get(0).getSecond().getSizeOfLogQueue()); assertEquals("replication lag of peer2", replicationLag + 1, replicationLoad.get(peer2).get(0).getSecond().getReplicationLag()); master.stopMaster();
@Test public void testSortOrderForSingleDescTimestampCol() throws SQLException { Connection conn = DriverManager.getConnection(getUrl()); conn.createStatement().execute("CREATE TABLE t (k TIMESTAMP PRIMARY KEY DESC)"); conn.createStatement().execute("UPSERT INTO t VALUES ('2016-01-04 13:11:51.631')"); Iterator<Pair<byte[], List<Cell>>> dataIterator = PhoenixRuntime .getUncommittedDataIterator(conn); List<Cell> kvs = dataIterator.next().getSecond(); Collections.sort(kvs, CellComparatorImpl.COMPARATOR); Cell first = kvs.get(0); long millisDeserialized = PDate.INSTANCE.getCodec().decodeLong(first.getRowArray(), first.getRowOffset(), SortOrder.DESC); assertEquals(1451913111631L, millisDeserialized); } }
byte[] colFamily = scan.getFamilies()[0]; NavigableSet<byte[]> quals = scan.getFamilyMap().get(colFamily); NavigableMap<byte[], List<S>> map = p.getFirst(); S sumVal = p.getSecond().get(0); S sumWeights = p.getSecond().get(1); double halfSumVal = ci.divideForAvg(sumVal, 2L); double movingSumVal = 0; S s = weighted ? entry.getValue().get(1) : entry.getValue().get(0); double newSumVal = movingSumVal + ci.divideForAvg(s, 1L); if (newSumVal > halfSumVal) {
/** * @param p A pair where the first boolean says whether or not the daughter * region directory exists in the filesystem and then the second boolean says * whether the daughter has references to the parent. * @return True the passed <code>p</code> signifies no references. */ private boolean hasNoReferences(final Pair<Boolean, Boolean> p) { return !p.getFirst() || !p.getSecond(); }
@Override public List<Pair<Cell, Cell>> postIncrementBeforeWAL( ObserverContext<RegionCoprocessorEnvironment> ctx, Mutation mutation, List<Pair<Cell, Cell>> cellPairs) throws IOException { List<Pair<Cell, Cell>> resultPairs = new ArrayList<>(cellPairs.size()); for (Pair<Cell, Cell> pair : cellPairs) { resultPairs .add(new Pair<>(pair.getFirst(), createNewCellWithTags(mutation, pair.getSecond()))); } return resultPairs; }
logsToArchive = new ArrayList<>(); logsToArchive.add(Pair.newPair(log, e.getValue().logSize)); if (LOG.isTraceEnabled()) { LOG.trace("WAL file ready for archiving " + log); this.totalLogSize.addAndGet(-logAndSize.getSecond()); archiveLogFile(logAndSize.getFirst()); this.walFile2Props.remove(logAndSize.getFirst());
@Test public void testHfileRefsReplicationQueues() throws ReplicationException, KeeperException { rp.init(); List<Pair<Path, Path>> files1 = new ArrayList<>(3); files1.add(new Pair<>(null, new Path("file_1"))); files1.add(new Pair<>(null, new Path("file_2"))); files1.add(new Pair<>(null, new Path("file_3"))); assertTrue(rqs.getReplicableHFiles(ID_ONE).isEmpty()); assertEquals(0, rqs.getAllPeersFromHFileRefsQueue().size()); rp.getPeerStorage().addPeer(ID_ONE, ReplicationPeerConfig.newBuilder().setClusterKey(KEY_ONE).build(), true, SyncReplicationState.NONE); rqs.addPeerToHFileRefs(ID_ONE); rqs.addHFileRefs(ID_ONE, files1); assertEquals(1, rqs.getAllPeersFromHFileRefsQueue().size()); assertEquals(3, rqs.getReplicableHFiles(ID_ONE).size()); List<String> hfiles2 = new ArrayList<>(files1.size()); for (Pair<Path, Path> p : files1) { hfiles2.add(p.getSecond().getName()); } String removedString = hfiles2.remove(0); rqs.removeHFileRefs(ID_ONE, hfiles2); assertEquals(1, rqs.getReplicableHFiles(ID_ONE).size()); hfiles2 = new ArrayList<>(1); hfiles2.add(removedString); rqs.removeHFileRefs(ID_ONE, hfiles2); assertEquals(0, rqs.getReplicableHFiles(ID_ONE).size()); rp.getPeerStorage().removePeer(ID_ONE); }