long getModTime(final HRegion r) throws IOException { FileStatus[] statuses = r.getRegionFileSystem().getFileSystem().listStatus( new Path(r.getRegionFileSystem().getRegionDir(), HRegionFileSystem.REGION_INFO_FILE)); assertTrue(statuses != null && statuses.length == 1); return statuses[0].getModificationTime(); }
long getModTime(final HRegion r) throws IOException { FileStatus[] statuses = r.getRegionFileSystem().getFileSystem().listStatus( new Path(r.getRegionFileSystem().getRegionDir(), HRegionFileSystem.REGION_INFO_FILE)); assertTrue(statuses != null && statuses.length == 1); return statuses[0].getModificationTime(); }
@Test public void testSkipRecoveredEditsReplayAllIgnored() throws Exception { byte[] family = Bytes.toBytes("family"); this.region = initHRegion(tableName, method, CONF, family); Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regiondir); for (int i = 1000; i < 1050; i += 10) { Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", i)); FSDataOutputStream dos = fs.create(recoveredEdits); dos.writeInt(i); dos.close(); } long minSeqId = 2000; Path recoveredEdits = new Path(recoveredEditsDir, String.format("%019d", minSeqId - 1)); FSDataOutputStream dos = fs.create(recoveredEdits); dos.close(); Map<byte[], Long> maxSeqIdInStores = new TreeMap<>(Bytes.BYTES_COMPARATOR); for (HStore store : region.getStores()) { maxSeqIdInStores.put(Bytes.toBytes(store.getColumnFamilyName()), minSeqId); } long seqId = region.replayRecoveredEditsIfAny(maxSeqIdInStores, null, null); assertEquals(minSeqId, seqId); }
/** * Useful when reopening a closed region (normally for unit tests) * @param other original object * @param reporter An interface we can report progress against. * @return new HRegion */ public static HRegion openHRegion(final HRegion other, final CancelableProgressable reporter) throws IOException { HRegionFileSystem regionFs = other.getRegionFileSystem(); HRegion r = newHRegion(regionFs.getTableDir(), other.getWAL(), regionFs.getFileSystem(), other.baseConf, other.getRegionInfo(), other.getTableDescriptor(), null); return r.openHRegion(reporter); }
LOG.info("[" + thread + "] Created new empty container region: " + newRegion + " to contain regions: " + Joiner.on(",").join(overlap)); debugLsr(region.getRegionFileSystem().getRegionDir()); Path target = region.getRegionFileSystem().getRegionDir(); for (HbckInfo contained : overlap) { LOG.info("[" + thread + "] Merging " + contained + " into " + target );
/** * Setting up a Store * @throws IOException with error */ protected void initialize() throws IOException { Path basedir = new Path(DIR); String logName = "logs"; Path logdir = new Path(DIR, logName); HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family")); FileSystem fs = FileSystem.get(conf); fs.delete(logdir, true); HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table"))); htd.addFamily(hcd); HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false); hlog = new FSHLog(fs, basedir, logName, conf); hlog.init(); ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null); region = HRegion.createHRegion(info, basedir, conf, htd, hlog); region.close(); Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName()); region = new HRegion(tableDir, hlog, fs, conf, info, htd, null); store = new HStore(region, hcd, conf); TEST_FILE = region.getRegionFileSystem().createTempName(); fs.createNewFile(TEST_FILE); }
Path regionDir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); HBaseTestingUtility.closeRegionAndWAL(region); assertEquals(regionDir, region.getRegionFileSystem().getRegionDir()); HBaseTestingUtility.closeRegionAndWAL(region); assertEquals(regionDir, region.getRegionFileSystem().getRegionDir()); HBaseTestingUtility.closeRegionAndWAL(region);
/** * Helper method to get the store archive directory for the specified region * @param conf {@link Configuration} to check for the name of the archive directory * @param region region that is being archived * @param store store that is archiving files * @return {@link Path} to the store archive directory for the given region */ public static Path getStoreArchivePath(Configuration conf, HRegion region, Store store) throws IOException { return HFileArchiveUtil.getStoreArchivePath(conf, region.getRegionInfo(), region.getRegionFileSystem().getTableDir(), store.getColumnFamilyDescriptor().getName()); }
assertFalse(region.hasReferences()); Path referencePath = region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(), "f", storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy()); assertNull(referencePath); referencePath = region.getRegionFileSystem().splitStoreFile(region.getRegionInfo(), "i_f", storefiles.iterator().next(), Bytes.toBytes("row1"), false, region.getSplitPolicy()); assertNotNull(referencePath);
HRegion region = servingRegions.get(0); FileSystem fs = region.getRegionFileSystem().getFileSystem();
primaryRegion.getRegionFileSystem().removeStoreFiles(Bytes.toString(families[0]), storeFiles); Collection<StoreFileInfo> storeFileInfos = primaryRegion.getRegionFileSystem() .getStoreFiles(families[0]); Assert.assertTrue(storeFileInfos == null || storeFileInfos.isEmpty());
final WALFactory wals = new WALFactory(CONF, method); try { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
final WALFactory wals = new WALFactory(CONF, method); try { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes();
((FailingHRegionFileSystem)replica1.getRegionFileSystem()).fail = true;
FileSystem fs = r.getRegionFileSystem().getFileSystem(); Path path = r.getRegionFileSystem().getStoreDir(famStr); waitUntilFilesShowup(fs, path, 1);
Path rootDir = region.getRegionFileSystem().getTableDir().getParent(); Path regionDir = HRegion.getRegionDir(rootDir, region.getRegionInfo());
HRegionFileSystem regionFS = region.getRegionFileSystem(); Path errFile = regionFS.getStoreFilePath(Bytes.toString(fam), ERROR_FILE); FSDataOutputStream out = regionFS.getFileSystem().create(errFile);
@Test public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException { HBaseTestingUtility htu = new HBaseTestingUtility(); HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO; Path basedir = htu.getDataTestDir(); // Create a region. That'll write the .regioninfo file. FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, basedir, htu.getConfiguration(), fsTableDescriptors.get(TableName.META_TABLE_NAME)); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtility.closeRegionAndWAL(r); Thread.sleep(1001); r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME), null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); assertEquals(modtime, modtime2); // Now load the file. org.apache.hadoop.hbase.client.RegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent( r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir()); assertTrue(org.apache.hadoop.hbase.client.RegionInfo.COMPARATOR.compare(hri, deserializedHri) == 0); HBaseTestingUtility.closeRegionAndWAL(r); }
final WALFactory wals = new WALFactory(CONF, method); try { Path regiondir = region.getRegionFileSystem().getRegionDir(); FileSystem fs = region.getRegionFileSystem().getFileSystem(); byte[] regionName = region.getRegionInfo().getEncodedNameAsBytes(); byte[][] columns = region.getTableDescriptor().getColumnFamilyNames().toArray(new byte[0][]);
@Test public void testReadAndWriteRegionInfoFile() throws IOException, InterruptedException { HBaseTestingUtility htu = new HBaseTestingUtility(); RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO; Path basedir = htu.getDataTestDir(); // Create a region. That'll write the .regioninfo file. FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration()); HRegion r = HBaseTestingUtility.createRegionAndWAL(convert(ri), basedir, htu.getConfiguration(), fsTableDescriptors.get(TableName.META_TABLE_NAME)); // Get modtime on the file. long modtime = getModTime(r); HBaseTestingUtility.closeRegionAndWAL(r); Thread.sleep(1001); r = HRegion.openHRegion(basedir, convert(ri), fsTableDescriptors.get(TableName.META_TABLE_NAME), null, htu.getConfiguration()); // Ensure the file is not written for a second time. long modtime2 = getModTime(r); assertEquals(modtime, modtime2); // Now load the file. RegionInfo deserializedRi = HRegionFileSystem.loadRegionInfoFileContent( r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir()); HBaseTestingUtility.closeRegionAndWAL(r); }