Refine search
@Override @BeforeMethod(groups = "fast") public void beforeMethod() throws Exception { super.beforeMethod(); final Account account = Mockito.mock(Account.class); Mockito.when(account.getTimeZone()).thenReturn(DateTimeZone.UTC); Mockito.when(accountApi.getAccountById(Mockito.<UUID>any(), Mockito.<InternalTenantContext>any())).thenReturn(account); }
@BeforeMethod public void setUp() { executor = newCachedThreadPool(daemonThreadsNamed("test-executor-%s")); scheduledExecutor = newScheduledThreadPool(2, daemonThreadsNamed("test-scheduledExecutor-%s")); }
@BeforeMethod private void setup() { state = new State(); state.setProp(JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME, destinationTable); state.setProp(JdbcPublisher.JDBC_PUBLISHER_DATABASE_NAME, database); commands = mock(JdbcWriterCommands.class); factory = mock(JdbcWriterCommandsFactory.class); conn = mock(Connection.class); when(factory.newInstance(state, conn)).thenReturn(commands); workUnitStates = new ArrayList<>(); workUnitState = mock(WorkUnitState.class); when(workUnitState.getProp(ConfigurationKeys.WRITER_STAGING_TABLE)).thenReturn(stagingTable); workUnitStates.add(workUnitState); publisher = new JdbcPublisher(state, factory); publisher = spy(publisher); doReturn(conn).when(publisher).createConnection(); }
@BeforeMethod public void setUp() throws Exception { _mockHelixManager = mock(HelixManager.class); final int maxInstances = 20; consumingServerNames = new String[maxInstances]; for (int i = 0; i < maxInstances; i++) { consumingServerNames[i] = "ConsumingServer_" + i; } }
@BeforeMethod public void setUp() throws Exception { this.fs = Mockito.mock(FileSystem.class); Mockito.when(fs.makeQualified(Mockito.any(Path.class))).thenAnswer(new Answer<Path>() { @Override public Path answer(InvocationOnMock invocation) throws Throwable { return (Path) invocation.getArguments()[0]; } }); }
@BeforeMethod public void setup() { Mockito.reset(expiry); when(expiry.expireAfterCreate(anyInt(), anyInt(), anyLong())).thenReturn(ONE_MINUTE); when(expiry.expireAfterUpdate(anyInt(), anyInt(), anyLong(), anyLong())).thenReturn(ONE_MINUTE); when(expiry.expireAfterRead(anyInt(), anyInt(), anyLong(), anyLong())).thenReturn(ONE_MINUTE); }
@BeforeMethod private void setup() throws SQLException { this.state = new State(); this.state.setProp(ConfigurationKeys.WRITER_DESTINATION_TYPE_KEY, DestinationType.MYSQL.name()); this.state.setProp(JdbcPublisher.JDBC_PUBLISHER_DATABASE_NAME, DB); this.state.setProp(JdbcPublisher.JDBC_PUBLISHER_FINAL_TABLE_NAME, DEST_TABLE); this.workUnit = WorkUnit.createEmpty(); this.workUnits = Lists.newArrayList(); this.workUnits.add(this.workUnit); this.factory = mock(JdbcWriterCommandsFactory.class); this.commands = mock(JdbcWriterCommands.class); this.conn = mock(Connection.class); doReturn(this.commands).when(this.factory).newInstance(any(Destination.class), eq(this.conn)); this.initializer = new JdbcWriterInitializer(this.state, this.workUnits, this.factory, 1, 0); this.initializer = spy(this.initializer); doReturn(this.conn).when(this.initializer).createConnection(); }
@BeforeMethod public void setUp() { executor = MoreExecutors.listeningDecorator(Executors.newCachedThreadPool()); closer.register(() -> executor.shutdownNow()); spillPath1 = Files.createTempDir(); closer.register(() -> deleteRecursively(spillPath1.toPath(), ALLOW_INSECURE)); spillPath2 = Files.createTempDir(); closer.register(() -> deleteRecursively(spillPath2.toPath(), ALLOW_INSECURE)); }
@BeforeMethod public void setUp() { this.spillPath = Files.createTempDir(); }
@BeforeMethod public void setUp() throws Exception { final int numCores = Runtime.getRuntime().availableProcessors(); _scheduler = Executors.newScheduledThreadPool(numCores + 1); _engine = new EngineBuilder().setTaskExecutor(_scheduler).setTimerScheduler(_scheduler).build(); }
@BeforeMethod public void setUp() throws Exception { executor = Executors.newCachedThreadPool(); InMemoryMetaStore.reset(); setMetastoreImplClass(baseConf); setMetastoreImplClass(baseClientConf); try { // start zookeeper service startZKCluster(); // start bookkeeper service startBKCluster(); zkc.create("/managed-ledgers", new byte[0], ZooDefs.Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); } catch (Exception e) { LOG.error("Error setting up", e); throw e; } }
@BeforeMethod public void setUp() { mockClient = new MockHiveMetastoreClient(); MockHiveCluster mockHiveCluster = new MockHiveCluster(mockClient); ListeningExecutorService executor = listeningDecorator(newCachedThreadPool(daemonThreadsNamed("test-%s"))); ThriftHiveMetastore thriftHiveMetastore = new ThriftHiveMetastore(mockHiveCluster); metastore = new CachingHiveMetastore( new BridgingHiveMetastore(thriftHiveMetastore), executor, new Duration(5, TimeUnit.MINUTES), new Duration(1, TimeUnit.MINUTES), 1000); stats = thriftHiveMetastore.getStats(); }
@BeforeMethod public void setUp() { // Before/AfterMethod is chosen here because the executor needs to be shutdown // after every single test case to terminate outstanding threads, if any. // The line below is the same as newCachedThreadPool(daemonThreadsNamed(...)) except RejectionExecutionHandler. // RejectionExecutionHandler is set to DiscardPolicy (instead of the default AbortPolicy) here. // Otherwise, a large number of RejectedExecutionException will flood logging, resulting in Travis failure. executor = new ThreadPoolExecutor( 0, Integer.MAX_VALUE, 60L, SECONDS, new SynchronousQueue<Runnable>(), daemonThreadsNamed("test-executor-%s"), new ThreadPoolExecutor.DiscardPolicy()); scheduledExecutor = newScheduledThreadPool(2, daemonThreadsNamed("test-scheduledExecutor-%s")); }
@BeforeMethod public void setUp() throws Exception { _scheduler = Executors.newSingleThreadScheduledExecutor(); }
@BeforeMethod public void setUp() { executor = newSingleThreadScheduledExecutor(daemonThreadsNamed("test-merge-operator-%s")); serdeFactory = new TestingPagesSerdeFactory(); taskBuffers = CacheBuilder.newBuilder().build(CacheLoader.from(TestingTaskBuffer::new)); httpClient = new TestingHttpClient(new TestingExchangeHttpClientHandler(taskBuffers), executor); exchangeClientFactory = new ExchangeClientFactory(new ExchangeClientConfig(), httpClient, executor); orderingCompiler = new OrderingCompiler(); }
@BeforeMethod public void setUp() { memoryPool = new MemoryPool(GENERAL_POOL, new DataSize(10, BYTE)); TaskExecutor taskExecutor = new TaskExecutor(8, 16, 3, 4, Ticker.systemTicker()); taskExecutor.start(); // Must be single threaded executor = newScheduledThreadPool(1, threadsNamed("task-notification-%s")); scheduledExecutor = newScheduledThreadPool(2, threadsNamed("task-notification-%s")); LocalExecutionPlanner planner = createTestingPlanner(); sqlTaskExecutionFactory = new SqlTaskExecutionFactory( executor, taskExecutor, planner, createTestSplitMonitor(), new TaskManagerConfig()); allOperatorContexts = null; }
/** * Delegates to the configured {@link TestContextManager} to * {@linkplain TestContextManager#beforeTestMethod(Object,Method) pre-process} * the test method before the actual test is executed. * @param testMethod the test method which is about to be executed * @throws Exception allows all exceptions to propagate */ @BeforeMethod(alwaysRun = true) protected void springTestContextBeforeTestMethod(Method testMethod) throws Exception { this.testContextManager.beforeTestMethod(this, testMethod); }
@BeforeMethod public void initialize() { Mockito.doReturn(PARTITION_NAME).when(this.partition).getCompleteName(); }
@BeforeMethod(alwaysRun=true) @Override public void setUp() throws Exception { super.setUp(); file = File.createTempFile("fileBasedStoreObject", ".txt"); Files.write("initial", file, Charsets.UTF_8); fileAccessor = new FileBasedStoreObjectAccessor(file, "mytmpextension"); app.start(ImmutableList.of(loc)); }
@BeforeMethod(groups = "short") public void resetListeners() { for (SchemaChangeListener listener : listeners) { reset(listener); } reset(schemaDisabledControlConnection); }