Refine search
private void setupConf() { for (String f : LlapDaemonConfiguration.DAEMON_CONFIGS) { conf.addResource(f); } conf.reloadConfiguration(); // Setup timeouts for various services. // Once we move to a Hadoop-2.8 dependency, the following paramteer can be used. // conf.set(YarnConfiguration.TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC); conf.set("yarn.timeline-service.entity-group-fs-store.retry-policy-spec", conf.get(CONFIG_TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC, CONFIG_TIMELINE_SERVICE_ENTITYGROUP_FS_STORE_RETRY_POLICY_SPEC_DEFAULT)); conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, conf.getLong(CONFIG_YARN_RM_TIMEOUT_MAX_WAIT_MS, CONFIG_YARN_RM_TIMEOUT_MAX_WAIT_MS_DEFAULT)); conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, conf.getLong(CONFIG_YARN_RM_RETRY_INTERVAL_MS, CONFIG_YARN_RM_RETRY_INTERVAL_MS_DEFAULT)); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, conf.getInt(CONFIG_IPC_CLIENT_CONNECT_MAX_RETRIES, CONFIG_IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT)); conf.setLong(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY, conf.getLong(CONFIG_IPC_CLIENT_CONNECT_RETRY_INTERVAL_MS, CONFIG_IPC_CLIENT_CONNECT_RETRY_INTERVAL_MS_DEFAULT)); HiveConf.setVar(conf, HiveConf.ConfVars.HIVE_ZOOKEEPER_SESSION_TIMEOUT, ( conf.getLong(CONFIG_LLAP_ZK_REGISTRY_TIMEOUT_MS, CONFIG_LLAP_ZK_REGISTRY_TIMEOUT_MS_DEFAULT) + "ms")); llapRegistryConf = new Configuration(conf); }
protected Configuration initSplit() { // Always compact if there is more than one store file. CONF.setInt("hbase.hstore.compactionThreshold", 2); CONF.setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 10 * 1000); // Increase the amount of time between client retries CONF.setLong("hbase.client.pause", 15 * 1000); // This size should make it so we always split using the addContent // below. After adding all data, the first region is 1.3M CONF.setLong(HConstants.HREGION_MAX_FILESIZE, 1024 * 128); return CONF; }
@Test public void testRegionReplicasOnMidClusterHighReplication() { conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 4000000L); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 1000); // 120 sec loadBalancer.setConf(conf); int numNodes = 80; int numRegions = 6 * numNodes; int replication = 80; // 80 replicas per region, one for each server int numRegionsPerServer = 5; int numTables = 10; testWithCluster(numNodes, numRegions, numRegionsPerServer, replication, numTables, false, true); } }
@Test public void testHFileCleaning() throws Exception { final EnvironmentEdge originalEdge = EnvironmentEdgeManager.getDelegate(); conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, "org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner"); conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl); Server server = new DummyServer(); Path archivedHfileDir = new Path(UTIL.getDataTestDirOnTestFS(), HConstants.HFILE_ARCHIVE_DIRECTORY);
@Test public void testWhenSizeGivenByHeapTunerGoesOutsideRange() throws Exception { BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4)); MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4)); Configuration conf = HBaseConfiguration.create(); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.7f); conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.1f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f); conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.1f); conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000); conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0); conf.setClass(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_CLASS, CustomHeapMemoryTuner.class, HeapMemoryTuner.class); HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher, new RegionServerStub(conf), new RegionServerAccountingStub(conf)); final ChoreService choreService = new ChoreService("TEST_SERVER_NAME"); heapMemoryManager.start(choreService); CustomHeapMemoryTuner.memstoreSize = 0.78f; CustomHeapMemoryTuner.blockCacheSize = 0.02f; Thread.sleep(1500); // Allow the tuner to run once and do necessary memory up // Even if the tuner says to set the memstore to 78%, HBase makes it as 70% as that is the // upper bound. Same with block cache as 10% is the lower bound. assertHeapSpace(0.7f, memStoreFlusher.memstoreSize); assertHeapSpace(0.1f, blockCache.maxSize); }
@Test public void testRegionReplicationOnMidClusterWithRacks() { conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 10000000L); conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 1000); // 120 sec loadBalancer.setConf(conf); int numNodes = 30; int numRegions = numNodes * 30; int replication = 3; // 3 replicas per region int numRegionsPerServer = 28; int numTables = 10; int numRacks = 4; // all replicas should be on a different rack Map<ServerName, List<RegionInfo>> serverMap = createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); RackManager rm = new ForTestRackManager(numRacks); testWithCluster(serverMap, rm, false, true); } }
@Test public void testNonDefaultConfigurationProperties() { final Configuration conf = getDefaultHBaseConfiguration(); final HRegionServer rs = mockRegionServer(conf); final int period = RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_PERIOD_DEFAULT + 1; final long delay = RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_DELAY_DEFAULT + 1L; final String timeUnit = TimeUnit.SECONDS.name(); conf.setInt(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_PERIOD_KEY, period); conf.setLong(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_DELAY_KEY, delay); conf.set(RegionSizeReportingChore.REGION_SIZE_REPORTING_CHORE_TIMEUNIT_KEY, timeUnit); RegionSizeReportingChore chore = new RegionSizeReportingChore(rs); assertEquals(delay, chore.getInitialDelay()); assertEquals(period, chore.getPeriod()); assertEquals(TimeUnit.valueOf(timeUnit), chore.getTimeUnit()); }
@Test public void testCallQueueTooBigExceptionDoesntTriggerPffe() throws Exception { Admin admin = TEST_UTIL.getAdmin(); conf.setLong(HConstants.HBASE_CLIENT_OPERATION_TIMEOUT, 100); conf.setInt(HConstants.HBASE_CLIENT_PAUSE, 500); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 1); conf.setLong(HConstants.HBASE_CLIENT_FAST_FAIL_THREASHOLD_MS, 0); conf.setClass(HConstants.HBASE_CLIENT_FAST_FAIL_INTERCEPTOR_IMPL, CallQueueTooBigPffeInterceptor.class, PreemptiveFastFailInterceptor.class); TEST_UTIL.getHBaseCluster().getRegionServer(0).getRpcServer().getScheduler(); Configuration newConf = HBaseConfiguration.create(TEST_UTIL.getConfiguration()); newConf.setInt("hbase.ipc.server.max.callqueue.length", 0); srs.onConfigurationChange(newConf);
@Test public void testBusyRegionSplitPolicy() throws Exception { conf.set(HConstants.HBASE_REGION_SPLIT_POLICY_KEY, BusyRegionSplitPolicy.class.getName()); conf.setLong("hbase.busy.policy.minAge", 1000000L); conf.setFloat("hbase.busy.policy.blockedRequests", 0.1f); conf.setLong("hbase.busy.policy.minAge", 0L); conf.setLong("hbase.busy.policy.aggWindow", 500L); policy = (BusyRegionSplitPolicy)RegionSplitPolicy.create(mockRegion, conf);
@Test public void testConfigurationPreserved() throws Throwable { Configuration conf = new Configuration(); conf.setBoolean("_ENABLED", false); conf.setLong("_PRIORITY", 10); HTableDescriptor desc = new HTableDescriptor(TableName.valueOf(name.getMethodName())); Constraints.add(desc, AlsoWorks.class, conf); Constraints.add(desc, WorksConstraint.class); assertFalse(Constraints.enabled(desc, AlsoWorks.class)); List<? extends Constraint> constraints = Constraints.getConstraints(desc, this.getClass().getClassLoader()); for (Constraint c : constraints) { Configuration storedConf = c.getConf(); if (c instanceof AlsoWorks) assertEquals(10, storedConf.getLong("_PRIORITY", -1)); // its just a worksconstraint else assertEquals(2, storedConf.getLong("_PRIORITY", -1)); } }
config.set("fs.s3.impl", PrestoS3FileSystem.class.getName()); config.set("fs.s3a.impl", PrestoS3FileSystem.class.getName()); config.set("fs.s3n.impl", PrestoS3FileSystem.class.getName()); config.set(S3_SSE_KMS_KEY_ID, sseKmsKeyId); config.setInt(S3_MAX_CLIENT_RETRIES, maxClientRetries); config.setInt(S3_MAX_ERROR_RETRIES, maxErrorRetries); config.set(S3_MAX_BACKOFF_TIME, maxBackoffTime.toString()); config.set(S3_MAX_RETRY_TIME, maxRetryTime.toString()); config.set(S3_SOCKET_TIMEOUT, socketTimeout.toString()); config.set(S3_STAGING_DIRECTORY, stagingDirectory.toString()); config.setInt(S3_MAX_CONNECTIONS, maxConnections); config.setLong(S3_MULTIPART_MIN_FILE_SIZE, multipartMinFileSize.toBytes()); config.setLong(S3_MULTIPART_MIN_PART_SIZE, multipartMinPartSize.toBytes()); config.setBoolean(S3_PIN_CLIENT_TO_CURRENT_REGION, pinClientToCurrentRegion); config.set(S3_USER_AGENT_PREFIX, userAgentPrefix);
BackupManager.decorateMasterConfiguration(conf1); BackupManager.decorateRegionServerConfiguration(conf1); conf1.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/1"); conf1.setLong(TimeToLiveLogCleaner.TTL_CONF_KEY, 1000); conf1.setLong(LogCleaner.OLD_WALS_CLEANER_THREAD_TIMEOUT_MSEC, 1000); conf1.set(WALFactory.WAL_PROVIDER, provider); TEST_UTIL.startMiniCluster(); conf2.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "/2"); TEST_UTIL2 = new HBaseTestingUtility(conf2); TEST_UTIL2.setZkCluster(TEST_UTIL.getZkCluster());
private void setupAutoReducerParallelism(TezEdgeProperty edgeProp, Vertex v) throws IOException { if (edgeProp.isAutoReduce()) { Configuration pluginConf = new Configuration(false); VertexManagerPluginDescriptor desc = VertexManagerPluginDescriptor.create(ShuffleVertexManager.class.getName()); pluginConf.setBoolean( ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_ENABLE_AUTO_PARALLEL, true); pluginConf.setInt(ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_MIN_TASK_PARALLELISM, edgeProp.getMinReducer()); pluginConf.setLong( ShuffleVertexManager.TEZ_SHUFFLE_VERTEX_MANAGER_DESIRED_TASK_INPUT_SIZE, edgeProp.getInputSizePerReducer()); UserPayload payload = TezUtils.createUserPayloadFromConf(pluginConf); desc.setUserPayload(payload); v.setVertexManagerPlugin(desc); } }
@Override public void setConf(Configuration conf) { conf.setIfUnset( String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_REGION_REPLICATION), String.valueOf(DEFAULT_REGION_REPLICATION)); conf.setIfUnset( String.format("%s.%s", TEST_NAME, LoadTestTool.OPT_COLUMN_FAMILIES), StringUtils.join(",", DEFAULT_COLUMN_FAMILIES)); conf.setBoolean("hbase.table.sanity.checks", true); // enable async wal replication to region replicas for unit tests conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true); conf.setLong(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024L * 1024 * 4); // flush every 4 MB conf.setInt("hbase.hstore.blockingStoreFiles", 100); super.setConf(conf); }
@BeforeClass public static void setupBeforeClass() throws Exception { Configuration conf = util.getConfiguration(); conf.setStrings(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY, RowProcessorEndpoint.class.getName()); conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 2); conf.setLong("hbase.hregion.row.processor.timeout", 1000L); conf.setLong(RpcScheduler.IPC_SERVER_MAX_CALLQUEUE_LENGTH, 2048); util.startMiniCluster(); }
/** * setup config values necessary for store */ protected void config() { this.conf = TEST_UTIL.getConfiguration(); this.conf.setLong(HConstants.MAJOR_COMPACTION_PERIOD, 0); this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_KEY, minFiles); this.conf.setInt(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_KEY, maxFiles); this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MIN_SIZE_KEY, minSize); this.conf.setLong(CompactionConfiguration.HBASE_HSTORE_COMPACTION_MAX_SIZE_KEY, maxSize); this.conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 1.0F); }
@Test public void testSetPeakHourOutsideCurrentSelection() { conf.setLong(CompactionConfiguration.HBASE_HSTORE_OFFPEAK_START_HOUR, hourMinusTwo); conf.setLong(CompactionConfiguration.HBASE_HSTORE_OFFPEAK_END_HOUR, hourMinusOne); OffPeakHours target = OffPeakHours.getInstance(conf); assertFalse(target.isOffPeakHour(hourOfDay)); } }
@Test public void testWALRecordReader() throws Exception { final WALFactory walfactory = new WALFactory(conf, getName()); jobConf.set("mapreduce.input.fileinputformat.inputdir", logDir.toString()); jobConf.setLong(WALInputFormat.END_TIME_KEY, secondTs-1); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf)); assertEquals(1, splits.size()); jobConf.setLong(WALInputFormat.END_TIME_KEY, Long.MAX_VALUE); jobConf.setLong(WALInputFormat.START_TIME_KEY, thirdTs); splits = input.getSplits(MapreduceTestingShim.createJobContext(jobConf));