/** * Builds a {@link TemporaryFolder} instance using the values in this builder. */ public TemporaryFolder build() { return new TemporaryFolder(this); } }
@Rule public TemporaryFolder folder = new TemporaryFolder();
public class FolderRuleOrderingTest { private TemporaryFolder privateFolder = new TemporaryFolder(); @Rule public MyNumberServer server = new MyNumberServer(privateFolder); @Rule public TemporaryFolder folder = privateFolder; @Test public void testMyNumberServer() throws IOException { server.storeNumber(10); assertEquals(10, server.getNumber()); } ... }
public TestDiskResourceService() { this.folder = new TemporaryFolder(); }
public TestDiskResourceService(File folder) { this.folder = new TemporaryFolder(folder); }
@BeforeClass public static void beforeClass() throws IOException { temporaryFolder = new TemporaryFolder(); temporaryFolder.create(); }
/** * Creates and starts an embedded Kafka broker. * * @param config Broker configuration settings. Used to modify, for example, the listeners * the broker should use. Note that you cannot change some settings such as * `log.dirs`. */ KafkaEmbedded(final Properties config) throws IOException { this.tmpFolder = new TemporaryFolder(); this.tmpFolder.create(); this.logDir = tmpFolder.newFolder(); this.effectiveConfig = effectiveConfigFrom(config, logDir); final KafkaConfig kafkaConfig = new KafkaConfig(effectiveConfig, true); log.debug("Starting embedded Kafka broker (with log.dirs={} and ZK ensemble at {}) ...", logDir, zookeeperConnect()); kafka = TestUtils.createServer(kafkaConfig, new SystemTime()); log.debug("Startup of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...", brokerList(), zookeeperConnect()); }
protected Configuration createClusterConfig() throws IOException { TemporaryFolder temporaryFolder = new TemporaryFolder(); temporaryFolder.create(); final File haDir = temporaryFolder.newFolder(); Configuration config = new Configuration(); config.setString(TaskManagerOptions.MANAGED_MEMORY_SIZE, "48m"); // the default network buffers size (10% of heap max =~ 150MB) seems to much for this test case config.setString(TaskManagerOptions.NETWORK_BUFFERS_MEMORY_MAX, String.valueOf(80L << 20)); // 80 MB config.setString(AkkaOptions.FRAMESIZE, String.valueOf(MAX_MEM_STATE_SIZE) + "b"); if (zkServer != null) { config.setString(HighAvailabilityOptions.HA_MODE, "ZOOKEEPER"); config.setString(HighAvailabilityOptions.HA_ZOOKEEPER_QUORUM, zkServer.getConnectString()); config.setString(HighAvailabilityOptions.HA_STORAGE_PATH, haDir.toURI().toString()); } return config; }
@Before public void before() throws Exception { AuthenticatedClientUser.set(LoginUser.get(ServerConfiguration.global()).getName()); TemporaryFolder tmpFolder = new TemporaryFolder(); tmpFolder.create(); File ufsRoot = tmpFolder.newFolder(); ServerConfiguration.set(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS, ufsRoot.getAbsolutePath()); ServerConfiguration.set(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS, 0); ServerConfiguration.set(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS, 1000); ServerConfiguration.set(PropertyKey.MASTER_PERSISTENCE_INITIAL_WAIT_TIME_MS, 0); ServerConfiguration.set(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS, 1000); mJournalFolder = tmpFolder.newFolder(); mSafeModeManager = new DefaultSafeModeManager(); mStartTimeMs = System.currentTimeMillis(); mPort = ServerConfiguration.getInt(PropertyKey.MASTER_RPC_PORT); startServices(); }
@Rule public TemporaryFolder folder = new TemporaryFolder(); @Test public void testXYZ() { final File expected = new File("xyz.txt"); final File output = folder.newFile("xyz.txt"); TestClass.xyz(output); Assert.assertEquals(FileUtils.readLines(expected), FileUtils.readLines(output)); }
public class JunitRuleTest { @Rule public TemporaryFolder tempFolder = new TemporaryFolder(); @Test public void testRule() throws IOException { File newFolder = tempFolder.newFolder("Temp Folder"); assertTrue(newFolder.exists()); } }
public TemporaryFolder temporaryFolder = new TemporaryFolder(); public SpecificSegmentsQuerySegmentWalker walker = null; public QueryLogHook queryLogHook;
private static class TestFirehoseFactory implements FirehoseFactory<InputRowParser> { public TestFirehoseFactory() { } @Override @SuppressWarnings("unchecked") public Firehose connect(InputRowParser parser, File temporaryDirectory) throws ParseException { return new TestFirehose(parser); } }
public class DummyFileClassTest { @Rule public TemporaryFolder folder = new TemporaryFolder(); @Test public void someMethod() { // given final File file1 = folder.newFile("myfile1.txt"); final File file2 = folder.newFile("myfile2.txt"); ... etc... } }
final TemporaryFolder temporaryFolder = new TemporaryFolder(); temporaryFolder.create();
@Before public void before() throws IOException, NoSuchFieldException, IllegalAccessException { folder = new TemporaryFolder(); folder.create(); String path = folder.getRoot().getAbsolutePath(); destinationPath = path + DESTINATION_PATH; props = new Properties(); props.setProperty(EtlMultiOutputFormat.ETL_DESTINATION_PATH, destinationPath); props.setProperty(CamusJob.ETL_EXECUTION_BASE_PATH, path + EXECUTION_BASE_PATH); props.setProperty(CamusJob.ETL_EXECUTION_HISTORY_PATH, path + EXECUTION_HISTORY_PATH); props.setProperty(EtlInputFormat.CAMUS_MESSAGE_DECODER_CLASS, JsonStringMessageDecoder.class.getName()); props.setProperty(EtlMultiOutputFormat.ETL_RECORD_WRITER_PROVIDER_CLASS, SequenceFileRecordWriterProvider.class.getName()); props.setProperty(EtlMultiOutputFormat.ETL_RUN_TRACKING_POST, Boolean.toString(false)); props.setProperty(CamusJob.KAFKA_CLIENT_NAME, KAFKA_CLIENT_ID); props.setProperty(CamusJob.KAFKA_TIMEOUT_VALUE, Integer.toString(KAFKA_TIMEOUT_VALUE)); props.setProperty(CamusJob.KAFKA_FETCH_BUFFER_SIZE, Integer.toString(KAFKA_BUFFER_SIZE)); props.setProperty(CamusJob.KAFKA_BROKERS, KAFKA_HOST + ":" + KAFKA_PORT); // Run Map/Reduce tests in process for hadoop2 props.setProperty("mapreduce.framework.name", "local"); // Run M/R for Hadoop1 props.setProperty("mapreduce.jobtracker.address", "local"); job = new CamusJob(props); }
@Before public void before() throws IOException, NoSuchFieldException, IllegalAccessException { resetCamus(); folder = new TemporaryFolder(); folder.create(); String path = folder.getRoot().getAbsolutePath(); destinationPath = path + DESTINATION_PATH; props = cluster.getProps(); props.setProperty(EtlMultiOutputFormat.ETL_DESTINATION_PATH, destinationPath); props.setProperty(CamusJob.ETL_EXECUTION_BASE_PATH, path + EXECUTION_BASE_PATH); props.setProperty(CamusJob.ETL_EXECUTION_HISTORY_PATH, path + EXECUTION_HISTORY_PATH); props.setProperty(EtlInputFormat.CAMUS_MESSAGE_DECODER_CLASS, JsonStringMessageDecoder.class.getName()); props.setProperty(EtlMultiOutputFormat.ETL_RECORD_WRITER_PROVIDER_CLASS, SequenceFileRecordWriterProvider.class.getName()); props.setProperty(EtlMultiOutputFormat.ETL_RUN_TRACKING_POST, Boolean.toString(false)); props.setProperty(CamusJob.KAFKA_CLIENT_NAME, "Camus"); props.setProperty(CamusJob.KAFKA_BROKERS, props.getProperty("metadata.broker.list")); // Run Map/Reduce tests in process for hadoop2 props.setProperty("mapreduce.framework.name", "local"); // Run M/R for Hadoop1 props.setProperty("mapreduce.jobtracker.address", "local"); job = new CamusJob(props); }
@Test (expected = IllegalStateException.class) public void testFailToGenerateIndexHotspottingSchema() throws IOException { getIndexHotspottingSchema().renderTables(new TemporaryFolder().getRoot()); }
@Test (expected = IllegalStateException.class) public void testFailToGenerateTableHotspottingSchema() throws IOException { getHotspottingSchema().renderTables(new TemporaryFolder().getRoot()); }
/** * Creates and starts an embedded Kafka broker. * * @param config Broker configuration settings. Used to modify, for example, on which port the * broker should listen to. Note that you cannot change some settings such as * `log.dirs`, `port`. */ public KafkaEmbedded(Properties config) throws IOException { tmpFolder = new TemporaryFolder(); tmpFolder.create(); logDir = tmpFolder.newFolder(); effectiveConfig = effectiveConfigFrom(config); boolean loggingEnabled = true; KafkaConfig kafkaConfig = new KafkaConfig(effectiveConfig, loggingEnabled); log.debug("Starting embedded Kafka broker (with log.dirs={} and ZK ensemble at {}) ...", logDir, zookeeperConnect()); kafka = TestUtils.createServer(kafkaConfig, Time.SYSTEM); log.debug("Startup of embedded Kafka broker at {} completed (with ZK ensemble at {}) ...", brokerList(), zookeeperConnect()); }