@Override public Collection<HStoreFile> clearCompactedFiles() { List<HStoreFile> result = compactedfiles; compactedfiles = ImmutableList.of(); return result; }
@Override public ImmutableCollection<HStoreFile> clearFiles() { ImmutableList<HStoreFile> result = storefiles; storefiles = ImmutableList.of(); return result; }
@Parameters(name = "{index}: serialPeer={0}") public static List<Boolean> parameters() { return ImmutableList.of(true, false); }
@Parameters(name = "{index}: serialPeer={0}, syncPeer={1}") public static List<Object[]> parameters() { return ImmutableList.of(new Object[] { false, false }, new Object[] { false, true }, new Object[] { true, false }, new Object[] { true, true }); }
@Parameters(name = "{index}: serialPeer={0}") public static List<Boolean> parameters() { return ImmutableList.of(true, false); }
@Before public void setUp() throws Exception { this.subject = Mockito.spy(new MultiTableSnapshotInputFormatImpl()); // mock out restoreSnapshot // TODO: this is kind of meh; it'd be much nicer to just inject the RestoreSnapshotHelper // dependency into the // input format. However, we need a new RestoreSnapshotHelper per snapshot in the current // design, and it *also* // feels weird to introduce a RestoreSnapshotHelperFactory and inject that, which would // probably be the more "pure" // way of doing things. This is the lesser of two evils, perhaps? doNothing().when(this.subject). restoreSnapshot(any(), any(), any(), any(), any()); this.conf = new Configuration(); this.rootDir = new Path("file:///test-root-dir"); FSUtils.setRootDir(conf, rootDir); this.snapshotScans = ImmutableMap.<String, Collection<Scan>>of("snapshot1", ImmutableList.of(new Scan(Bytes.toBytes("1"), Bytes.toBytes("2"))), "snapshot2", ImmutableList.of(new Scan(Bytes.toBytes("3"), Bytes.toBytes("4")), new Scan(Bytes.toBytes("5"), Bytes.toBytes("6")))); this.restoreDir = new Path(FSUtils.getRootDir(conf), "restore-dir"); }
@Test @SuppressWarnings({ "deprecation", "unchecked" }) public void shouldCreateNewKeyAlthoughExtraKey() throws Exception { GroupingTableMap gTableMap = null; try { Result result = mock(Result.class); Reporter reporter = mock(Reporter.class); gTableMap = new GroupingTableMap(); Configuration cfg = new Configuration(); cfg.set(GroupingTableMap.GROUP_COLUMNS, "familyA:qualifierA familyB:qualifierB"); JobConf jobConf = new JobConf(cfg); gTableMap.configure(jobConf); byte[] row = {}; List<Cell> keyValues = ImmutableList.<Cell>of( new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")), new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), Bytes.toBytes("2222")), new KeyValue(row, "familyC".getBytes(), "qualifierC".getBytes(), Bytes.toBytes("3333"))); when(result.listCells()).thenReturn(keyValues); OutputCollector<ImmutableBytesWritable, Result> outputCollectorMock = mock(OutputCollector.class); gTableMap.map(null, result, outputCollectorMock, reporter); verify(result).listCells(); verify(outputCollectorMock, times(1)) .collect(any(), any()); verifyNoMoreInteractions(outputCollectorMock); } finally { if (gTableMap != null) gTableMap.close(); } }
@Test @SuppressWarnings({ "deprecation", "unchecked" }) public void shouldNotCallCollectonSinceFindUniqueKeyValueMoreThanOnes() throws Exception { GroupingTableMap gTableMap = null; try { Result result = mock(Result.class); Reporter reporter = mock(Reporter.class); gTableMap = new GroupingTableMap(); Configuration cfg = new Configuration(); cfg.set(GroupingTableMap.GROUP_COLUMNS, "familyA:qualifierA familyB:qualifierB"); JobConf jobConf = new JobConf(cfg); gTableMap.configure(jobConf); byte[] row = {}; List<Cell> keyValues = ImmutableList.<Cell>of( new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("1111")), new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), Bytes.toBytes("2222")), new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), Bytes.toBytes("3333"))); when(result.listCells()).thenReturn(keyValues); OutputCollector<ImmutableBytesWritable, Result> outputCollectorMock = mock(OutputCollector.class); gTableMap.map(null, result, outputCollectorMock, reporter); verify(result).listCells(); verifyZeroInteractions(outputCollectorMock); } finally { if (gTableMap != null) gTableMap.close(); } }
@Override public synchronized List<ServerName> getRegionServers() { // Return multiple server names for endpoint parallel replication. return new ArrayList<>( ImmutableList.of(ServerName.valueOf("www.example.com", 12016, 1525245876026L), ServerName.valueOf("www.example2.com", 12016, 1525245876026L), ServerName.valueOf("www.example3.com", 12016, 1525245876026L), ServerName.valueOf("www.example4.com", 12016, 1525245876026L), ServerName.valueOf("www.example4.com", 12016, 1525245876026L))); } }
final byte[] secondPartKeyValue = Bytes.toBytes("35245142671437"); byte[] row = {}; List<Cell> cells = ImmutableList.<Cell>of( new KeyValue(row, "familyA".getBytes(), "qualifierA".getBytes(), firstPartKeyValue), new KeyValue(row, "familyB".getBytes(), "qualifierB".getBytes(), secondPartKeyValue));
CallRunner priorityTask = createMockTask(); CallRunner replicationTask = createMockTask(); List<CallRunner> tasks = ImmutableList.of( generalTask, priorityTask,
@Test public void testSetDataWithVersion() throws Exception { ZKUtil.createWithParents(ZKW, "/s1/s2/s3"); int v0 = getZNodeDataVersion("/s1/s2/s3"); assertEquals(0, v0); ZKUtil.setData(ZKW, "/s1/s2/s3", Bytes.toBytes(12L)); int v1 = getZNodeDataVersion("/s1/s2/s3"); assertEquals(1, v1); ZKUtil.multiOrSequential(ZKW, ImmutableList.of(ZKUtilOp.setData("/s1/s2/s3", Bytes.toBytes(13L), v1)), false); int v2 = getZNodeDataVersion("/s1/s2/s3"); assertEquals(2, v2); }
@Before public void setUpBase() throws Exception { if (!peerExist(PEER_ID2)) { ReplicationPeerConfigBuilder builder = ReplicationPeerConfig.newBuilder() .setClusterKey(utility2.getClusterKey()).setSerial(isSerialPeer()); if (isSyncPeer()) { FileSystem fs2 = utility2.getTestFileSystem(); // The remote wal dir is not important as we do not use it in DA state, here we only need to // confirm that a sync peer in DA state can still replicate data to remote cluster // asynchronously. builder.setReplicateAllUserTables(false) .setTableCFsMap(ImmutableMap.of(tableName, ImmutableList.of())) .setRemoteWALDir(new Path("/RemoteWAL") .makeQualified(fs2.getUri(), fs2.getWorkingDirectory()).toUri().toString()); } hbaseAdmin.addReplicationPeer(PEER_ID2, builder.build()); } }
@BeforeClass public static void setUpSnapshots() throws Exception { TEST_UTIL.enableDebug(MultiTableSnapshotInputFormat.class); TEST_UTIL.enableDebug(MultiTableSnapshotInputFormatImpl.class); // take a snapshot of every table we have. for (String tableName : TABLES) { SnapshotTestingUtils .createSnapshotAndValidate(TEST_UTIL.getAdmin(), TableName.valueOf(tableName), ImmutableList.of(INPUT_FAMILY), null, snapshotNameForTable(tableName), FSUtils.getRootDir(TEST_UTIL.getConfiguration()), TEST_UTIL.getTestFileSystem(), true); } }
@Test public void testGetDoesntSeekWithNoHint() throws IOException { StoreFileScanner.instrument(); prepareRegion(); Get g = new Get(RK_BYTES); g.setFilter(new TimestampsFilter(ImmutableList.of(5L))); final long initialSeekCount = StoreFileScanner.getSeekCount(); region.get(g); final long finalSeekCount = StoreFileScanner.getSeekCount(); assertTrue(finalSeekCount >= initialSeekCount ); assertTrue(finalSeekCount < initialSeekCount + 3); }
/** Tests that the rpc scheduler is called when requests arrive. */ @Test public void testRpcMaxRequestSize() throws IOException, ServiceException { Configuration conf = new Configuration(CONF); conf.setInt(RpcServer.MAX_REQUEST_SIZE, 1000); RpcServer rpcServer = createRpcServer(null, "testRpcServer", Lists.newArrayList(new RpcServer.BlockingServiceAndInterface( SERVICE, null)), new InetSocketAddress("localhost", 0), conf, new FifoRpcScheduler(conf, 1)); try (AbstractRpcClient<?> client = createRpcClient(conf)) { rpcServer.start(); BlockingInterface stub = newBlockingStub(client, rpcServer.getListenerAddress()); StringBuilder message = new StringBuilder(1200); for (int i = 0; i < 200; i++) { message.append("hello."); } // set total RPC size bigger than 100 bytes EchoRequestProto param = EchoRequestProto.newBuilder().setMessage(message.toString()).build(); stub.echo( new HBaseRpcControllerImpl(CellUtil.createCellScanner(ImmutableList.<Cell> of(CELL))), param); fail("RPC should have failed because it exceeds max request size"); } catch (ServiceException e) { LOG.info("Caught expected exception: " + e); assertTrue(e.toString(), StringUtils.stringifyException(e).contains("RequestTooBigException")); } finally { rpcServer.stop(); } }
@Test public void testGetSeek() throws IOException { StoreFileScanner.instrument(); prepareRegion(); Get g = new Get(RK_BYTES); final TimestampsFilter timestampsFilter = new TimestampsFilter(ImmutableList.of(5L), true); g.setFilter(timestampsFilter); final long initialSeekCount = StoreFileScanner.getSeekCount(); region.get(g); final long finalSeekCount = StoreFileScanner.getSeekCount(); /* Make sure there's more than one. Aka one seek to get to the row, and one to get to the time. */ assertTrue(finalSeekCount >= initialSeekCount + 3 ); }
protected void compactEquals(long now, ArrayList<HStoreFile> candidates, long[] expectedFileSizes, long[] expectedBoundaries, boolean isMajor, boolean toCompact) throws IOException { ManualEnvironmentEdge timeMachine = new ManualEnvironmentEdge(); EnvironmentEdgeManager.injectEdge(timeMachine); timeMachine.setValue(now); DateTieredCompactionRequest request; DateTieredCompactionPolicy policy = (DateTieredCompactionPolicy) store.storeEngine.getCompactionPolicy(); if (isMajor) { for (HStoreFile file : candidates) { ((MockHStoreFile) file).setIsMajor(true); } assertEquals(toCompact, policy.shouldPerformMajorCompaction(candidates)); request = (DateTieredCompactionRequest) policy.selectMajorCompaction(candidates); } else { assertEquals(toCompact, policy.needsCompaction(candidates, ImmutableList.of())); request = (DateTieredCompactionRequest) policy.selectMinorCompaction(candidates, false, false); } List<HStoreFile> actual = Lists.newArrayList(request.getFiles()); assertEquals(Arrays.toString(expectedFileSizes), Arrays.toString(getSizes(actual))); assertEquals(Arrays.toString(expectedBoundaries), Arrays.toString(request.getBoundaries().toArray())); } }
.setClusterKey(getZKClusterKey()).setReplicationEndpointImpl(TestEndpoint.class.getName()) .setReplicateAllUserTables(false).setSerial(isSerial) .setTableCFsMap(ImmutableMap.of(tableName, ImmutableList.of())).build(); admin.addReplicationPeer(peerId, peerConfig);