Refine search
@Test public void textPlain_withAutocrypt() throws Exception { MimeMessage message = new MimeMessage(); message.setUid("msguid"); message.setHeader("Content-Type", "text/plain"); when(autocryptOperations.hasAutocryptHeader(message)).thenReturn(true); when(autocryptOperations.addAutocryptPeerUpdateToIntentIfPresent(same(message), any(Intent.class))).thenReturn(true); MessageCryptoCallback messageCryptoCallback = mock(MessageCryptoCallback.class); messageCryptoHelper.asyncStartOrResumeProcessingMessage(message, messageCryptoCallback, null, false); ArgumentCaptor<MessageCryptoAnnotations> captor = ArgumentCaptor.forClass(MessageCryptoAnnotations.class); verify(messageCryptoCallback).onCryptoOperationsFinished(captor.capture()); MessageCryptoAnnotations annotations = captor.getValue(); assertTrue(annotations.isEmpty()); verifyNoMoreInteractions(messageCryptoCallback); ArgumentCaptor<Intent> intentCaptor = ArgumentCaptor.forClass(Intent.class); verify(autocryptOperations).addAutocryptPeerUpdateToIntentIfPresent(same(message), intentCaptor.capture()); verify(openPgpApi).executeApiAsync(same(intentCaptor.getValue()), same((InputStream) null), same((OutputStream) null), any(IOpenPgpCallback.class)); }
@Test(expected = IOException.class) public void shouldThrowExceptionOnDistcpFailure() throws Exception { Path destination = mock(Path.class); Path source = mock(Path.class); FileSystem fs = mock(FileSystem.class); List<Path> srcPaths = Arrays.asList(source, source); HiveConf conf = mock(HiveConf.class); CopyUtils copyUtils = Mockito.spy(new CopyUtils(null, conf)); mockStatic(FileUtils.class); mockStatic(Utils.class); when(destination.getFileSystem(same(conf))).thenReturn(fs); when(source.getFileSystem(same(conf))).thenReturn(fs); when(FileUtils.distCp(same(fs), anyListOf(Path.class), same(destination), anyBoolean(), eq(null), same(conf), same(ShimLoader.getHadoopShims()))) .thenReturn(false); when(Utils.getUGI()).thenReturn(mock(UserGroupInformation.class)); doReturn(false).when(copyUtils).regularCopy(same(fs), same(fs), anyListOf(ReplChangeManager.FileInfo.class)); copyUtils.doCopy(destination, srcPaths); } }
@Test public void registerProvider_shouldAttachProviderInfo() { ContentProvider mock = mock(ContentProvider.class); ProviderInfo providerInfo0 = new ProviderInfo(); providerInfo0.authority = "the-authority"; // todo: support multiple authorities providerInfo0.grantUriPermissions = true; mock.attachInfo(ApplicationProvider.getApplicationContext(), providerInfo0); mock.onCreate(); ArgumentCaptor<ProviderInfo> captor = ArgumentCaptor.forClass(ProviderInfo.class); verify(mock) .attachInfo( same((Application) ApplicationProvider.getApplicationContext()), captor.capture()); ProviderInfo providerInfo = captor.getValue(); assertThat(providerInfo.authority).isEqualTo("the-authority"); assertThat(providerInfo.grantUriPermissions).isEqualTo(true); }
@Test public void testSelectStatementWithStatementsCache() { setUpPoolAndDatabase(1, 10 /* statement cache is enabled */ ); ConnectionProvider cp = sessionFactory().getServiceRegistry().getService(ConnectionProvider.class); ViburDBCPDataSource ds = ((ViburDBCPConnectionProvider) cp).getDataSource(); ConcurrentMap<StatementMethod, StatementHolder> mockedStatementCache = mockStatementCache(ds); doInHibernate(this::sessionFactory, ViburDBCPConnectionProviderTest::executeAndVerifySelect); // We set above the poolMaxSize = 1, that's why the second session will get and use the same underlying connection. doInHibernate(this::sessionFactory, ViburDBCPConnectionProviderTest::executeAndVerifySelect); InOrder inOrder = inOrder(mockedStatementCache); inOrder.verify(mockedStatementCache).get(key1.capture()); inOrder.verify(mockedStatementCache).putIfAbsent(same(key1.getValue()), val1.capture()); inOrder.verify(mockedStatementCache).get(key2.capture()); assertEquals(1, mockedStatementCache.size()); assertTrue(mockedStatementCache.containsKey(key1.getValue())); assertEquals(key1.getValue(), key2.getValue()); assertEquals(AVAILABLE, val1.getValue().state().get()); }
@Test public void testContentChangeListener() throws Exception { jobMeta.setChanged(); jobMeta.setChanged( true ); verify( listener, times( 2 ) ).contentChanged( same( jobMeta ) ); jobMeta.clearChanged(); jobMeta.setChanged( false ); verify( listener, times( 2 ) ).contentSafe( same( jobMeta ) ); jobMeta.removeContentChangedListener( listener ); jobMeta.setChanged(); jobMeta.setChanged( true ); verifyNoMoreInteractions( listener ); }
@Test public void empty() throws ConnectionException { final MvccLogEntrySerializationStrategy logEntrySerializationStrategy = mock( MvccLogEntrySerializationStrategy.class ); final ApplicationScope scope = new ApplicationScopeImpl( new SimpleId( "application" )); final Id entityId = new SimpleId( "entity" ); final int pageSize = 100; //set the start version, it should be discarded UUID start = UUIDGenerator.newTimeUUID(); when( logEntrySerializationStrategy.load( same( scope ), same( entityId ), same( start ), same( pageSize ) ) ) .thenReturn( new ArrayList<MvccLogEntry>() ); //now iterate we should get everything MinMaxLogEntryIterator itr = new MinMaxLogEntryIterator( logEntrySerializationStrategy, scope, entityId, pageSize ); assertFalse( itr.hasNext() ); }
@Test public void testContentChangeListener() throws Exception { ContentChangedListener listener = mock( ContentChangedListener.class ); transMeta.addContentChangedListener( listener ); transMeta.setChanged(); transMeta.setChanged( true ); verify( listener, times( 2 ) ).contentChanged( same( transMeta ) ); transMeta.clearChanged(); transMeta.setChanged( false ); verify( listener, times( 2 ) ).contentSafe( same( transMeta ) ); transMeta.removeContentChangedListener( listener ); transMeta.setChanged(); transMeta.setChanged( true ); verifyNoMoreInteractions( listener ); }
@Test public void invalidCountNoShards() { when( edgeShardSerialization.writeShardMeta( same( scope ), shardValue.capture(), same( targetEdgeMeta ) ) ) .thenReturn( mock( MutationBatch.class ) ); .getEdgesFromSourceByTargetType( same( edgeColumnFamilies ), same( scope ), any( SearchByIdType.class ), any( Collection.class ) ) ).thenReturn( edgeIterator );
@Test public void noShardsReturns() throws ConnectionException { .getShardMetaData( same( scope ), any( Optional.class ), same( directedEdgeMeta ) ) ) .thenReturn( Collections.<Shard>emptyList().iterator() ); .writeShardMeta( same( scope ), shardArgumentCaptor.capture(), same( directedEdgeMeta ) ) ) .thenReturn( batch );
@Test public void existingSingleShard() { final ApplicationScope scope = new ApplicationScopeImpl( IdGenerator.createId( "application" ) ); final DirectedEdgeMeta directedEdgeMeta = DirectedEdgeMeta.fromSourceNode( IdGenerator.createId( "source" ), "test" ); final ShardGroupCompaction shardGroupCompaction = mock( ShardGroupCompaction.class ); final Shard minShard = new Shard( 0, 0, true ); final long delta = 10000; final Iterator<Shard> noShards = Collections.singleton( minShard ).iterator(); ShardEntryGroupIterator entryGroupIterator = new ShardEntryGroupIterator( noShards, delta, shardGroupCompaction, scope, directedEdgeMeta ); assertTrue( "Root shard always present", entryGroupIterator.hasNext() ); ShardEntryGroup group = entryGroupIterator.next(); assertNotNull( "Group returned", group ); //verify we ran our compaction check verify( shardGroupCompaction ).evaluateShardGroup( same( scope ), same( directedEdgeMeta ), eq( group ) ); Collection<Shard> readShards = group.getReadShards(); assertEquals( "Min shard present", 1, readShards.size() ); assertTrue( "Min shard present", readShards.contains( minShard ) ); Collection<Shard> writeShards = group.getWriteShards( 0 ); assertEquals( "Min shard present", 1, writeShards.size() ); assertTrue( "Min shard present", writeShards.contains( minShard ) ); writeShards = group.getWriteShards( Long.MAX_VALUE ); assertEquals( "Min shard present", 1, writeShards.size() ); assertTrue( "Min shard present", writeShards.contains( minShard ) ); }
@Test(expected = TestException.class) public void removeDBPropertyToPreventRenameWhenBootstrapDumpOfTableFails() throws Exception { List<String> tableList = Arrays.asList("a1", "a2"); when(Utils.matchesDb(same(hive), eq("default"))) .thenReturn(Collections.singletonList("default")); when(Utils.getAllTables(same(hive), eq("default"))).thenReturn(tableList); when(Utils.setDbBootstrapDumpState(same(hive), eq("default"))).thenReturn(dbRandomKey); when(Utils.matchesTbl(same(hive), eq("default"), anyString())).thenReturn(tableList); } finally { verifyStatic(); Utils.resetDbBootstrapDumpState(same(hive), eq("default"), eq(dbRandomKey));
@Test public void testDeletion() throws ExecutionException, InterruptedException, ConnectionException { final long createTime = 10000; final long currentTime = createTime * 2; final Shard shard0 = new Shard( 1000, createTime, true ); ////set a delta for way in the future final ShardEntryGroup group = new ShardEntryGroup( 1 ); group.addShard( shard0 ); assertFalse( "this should return false for our test to succeed", group.isCompactionPending() ); assertFalse( "this should return false for our test to succeed", group.isNew( currentTime ) ); final DirectedEdgeMeta directedEdgeMeta = getDirectedEdgeMeta(); //mock up returning a mutation final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class ); final MutationBatch batch = mock( MutationBatch.class ); when( edgeShardSerialization.removeShardMeta( same( scope ), same( shard0 ), same( directedEdgeMeta ) ) ) .thenReturn( batch ); final TimeService timeService = mock( TimeService.class ); when( timeService.getCurrentTime() ).thenReturn( currentTime ); initExecutor( 1, 1 ); final ShardGroupDeletionImpl shardGroupDeletion = new ShardGroupDeletionImpl( asyncTaskExecutor, edgeShardSerialization, timeService ); final ListenableFuture<ShardGroupDeletion.DeleteResult> future = shardGroupDeletion.maybeDeleteShard( this.scope, directedEdgeMeta, group, Collections.emptyIterator() ); final ShardGroupDeletion.DeleteResult result = future.get(); assertEquals( "should delete", ShardGroupDeletion.DeleteResult.DELETED, result ); verify(batch).execute(); }
@Test public void equalCountFutureShard() { when( edgeShardSerialization.writeShardMeta( same( scope ), shardValue.capture(), same( targetEdgeMeta ) ) ) .thenReturn( mock( MutationBatch.class ) ); .getEdgesFromSourceByTargetType( same( edgeColumnFamilies ), same( scope ), any( SearchByIdType.class ), any( Collection.class ) ) ).thenReturn( edgeIterator );
@Test public void futureCountShardCleanup() { .getShardMetaData( same( scope ), any( Optional.class ), same( directedEdgeMeta ) ) ).thenReturn( Arrays.asList( futureShard3, futureShard2, futureShard1, compactedShard, minShard ).iterator() ); .removeShardMeta( same( scope ), newLongValue.capture(), same( directedEdgeMeta ) ) ) .thenReturn( mock( MutationBatch.class ) );
@Test public void overAllocatedShard() { final ShardGroupCompaction shardGroupCompaction = mock( ShardGroupCompaction.class ); .getEdgesFromSourceByTargetType( same( edgeColumnFamilies ), same( scope ), any( SearchByIdType.class ), any( Collection.class ) ) ).thenReturn( edgeIterator ); when( edgeShardSerialization.writeShardMeta( same( scope ), shardValue.capture(), same( targetEdgeMeta ) ) ) .thenReturn( mock( MutationBatch.class ) );
@Test public void boundedShardSets() { verify( shardGroupCompaction ).evaluateShardGroup( same( scope ), same( directedEdgeMeta ), eq( group ) ); verify( shardGroupCompaction ).evaluateShardGroup( same( scope ), same( directedEdgeMeta ), eq( group ) ); verify( shardGroupCompaction ).evaluateShardGroup( same( scope ), same( directedEdgeMeta ), eq( group ) );