public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException { return getStoreFiles(familyName, true); }
/** * Returns the store files available for the family. * This methods performs the filtering based on the valid store files. * @param familyName Column Family Name * @return a set of {@link StoreFileInfo} for the specified family. */ public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) throws IOException { return getStoreFiles(Bytes.toString(familyName)); }
@Override public Collection<StoreFileInfo> getStoreFiles(String familyName) throws IOException { if (fail) { throw new IOException("simulating FS failure"); } return super.getStoreFiles(familyName); } }
@Override public void refreshStoreFiles() throws IOException { Collection<StoreFileInfo> newFiles = fs.getStoreFiles(getColumnFamilyName()); refreshStoreFilesInternal(newFiles); }
/** * Creates an unsorted list of StoreFile loaded in parallel * from the given directory. * @throws IOException */ private List<HStoreFile> loadStoreFiles() throws IOException { Collection<StoreFileInfo> files = fs.getStoreFiles(getColumnFamilyName()); return openStoreFiles(files); }
private HRegionFileSystem mockFileSystem(RegionInfo info, boolean hasReferenceFiles, List<StoreFileInfo> storeFiles, long referenceFileTimestamp) throws IOException { FileSystem fileSystem = mock(FileSystem.class); if (hasReferenceFiles) { FileStatus fileStatus = mock(FileStatus.class); doReturn(referenceFileTimestamp).when(fileStatus).getModificationTime(); doReturn(fileStatus).when(fileSystem).getFileLinkStatus(isA(Path.class)); } HRegionFileSystem mockSystem = mock(HRegionFileSystem.class); doReturn(info).when(mockSystem).getRegionInfo(); doReturn(regionStoreDir).when(mockSystem).getStoreDir(FAMILY); doReturn(hasReferenceFiles).when(mockSystem).hasReferences(anyString()); doReturn(storeFiles).when(mockSystem).getStoreFiles(anyString()); doReturn(fileSystem).when(mockSystem).getFileSystem(); return mockSystem; }
new HashMap<String, Collection<StoreFileInfo>>(regionFs.getFamilies().size()); for (String family: regionFs.getFamilies()) { Collection<StoreFileInfo> sfis = regionFs.getStoreFiles(family); if (sfis == null) continue; Collection<StoreFileInfo> filteredSfis = null;
for (String family : requestedStores) { Collection<StoreFileInfo> storeFiles = fileSystem.getStoreFiles(family); if (storeFiles == null) { LOG.info("Excluding store: " + family + " for compaction for region: " + fileSystem
if (familyNames != null) { for (String familyName: familyNames) { Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName, false); if (storeFiles == null) { LOG.debug("No files under family: " + familyName);
monitor.rethrowException(); Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName); if (storeFiles == null) { if (LOG.isDebugEnabled()) {
Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(familyName); assertEquals(0, storeFiles != null ? storeFiles.size() : 0); storeFiles = regionFs.getStoreFiles(familyName); assertEquals(0, storeFiles != null ? storeFiles.size() : 0); storeFiles = regionFs.getStoreFiles(familyName); assertEquals(0, storeFiles != null ? storeFiles.size() : 0); assertFalse(fs.exists(buildPath));
/** * Create reference file(s) of merging regions under the merged directory * @param env MasterProcedureEnv * @param regionFs region file system * @param mergedDir the temp directory of merged region */ private void mergeStoreFiles( final MasterProcedureEnv env, final HRegionFileSystem regionFs, final Path mergedDir) throws IOException { final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem(); final Configuration conf = env.getMasterConfiguration(); final TableDescriptor htd = env.getMasterServices().getTableDescriptors().get(getTableName()); for (String family : regionFs.getFamilies()) { final ColumnFamilyDescriptor hcd = htd.getColumnFamily(Bytes.toBytes(family)); final Collection<StoreFileInfo> storeFiles = regionFs.getStoreFiles(family); if (storeFiles != null && storeFiles.size() > 0) { for (StoreFileInfo storeFileInfo : storeFiles) { // Create reference file(s) of the region in mergedDir. // As this procedure is running on master, use CacheConfig.DISABLED means // don't cache any block. regionFs.mergeStoreFile(mergedRegion, family, new HStoreFile(mfs.getFileSystem(), storeFileInfo, conf, CacheConfig.DISABLED, hcd.getBloomFilterType(), true), mergedDir); } } } }
@Override public Object run() throws Exception { // Make sure it worked (above is sensitive to caching details in hadoop core) FileSystem fs = FileSystem.get(conf); assertEquals(FaultyFileSystem.class, fs.getClass()); // Initialize region init(name.getMethodName(), conf); LOG.info("Adding some data"); store.add(new KeyValue(row, family, qf1, 1, (byte[])null), null); store.add(new KeyValue(row, family, qf2, 1, (byte[])null), null); store.add(new KeyValue(row, family, qf3, 1, (byte[])null), null); LOG.info("Before flush, we should have no files"); Collection<StoreFileInfo> files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); assertEquals(0, files != null ? files.size() : 0); //flush try { LOG.info("Flushing"); flush(1); fail("Didn't bubble up IOE!"); } catch (IOException ioe) { assertTrue(ioe.getMessage().contains("Fault injected")); } LOG.info("After failed flush, we should still have no files!"); files = store.getRegionFileSystem().getStoreFiles(store.getColumnFamilyName()); assertEquals(0, files != null ? files.size() : 0); store.getHRegion().getWAL().close(); return null; } });
primaryRegion.getRegionFileSystem().removeStoreFiles(Bytes.toString(families[0]), storeFiles); Collection<StoreFileInfo> storeFileInfos = primaryRegion.getRegionFileSystem() .getStoreFiles(families[0]); Assert.assertTrue(storeFileInfos == null || storeFileInfos.isEmpty());
int count = 0; for(ColumnFamilyDescriptor colFamily : columnFamilies) { count += hrfs.getStoreFiles(colFamily.getName()).size(); while (System.currentTimeMillis() < timeout) { for(ColumnFamilyDescriptor colFamily : columnFamilies) { newcount += hrfs.getStoreFiles(colFamily.getName()).size(); int newcount1 = 0; for(ColumnFamilyDescriptor colFamily : columnFamilies) { newcount1 += hrfs.getStoreFiles(colFamily.getName()).size();
public Collection<StoreFileInfo> getStoreFiles(final String familyName) throws IOException { return getStoreFiles(familyName, true); }
/** * Returns the store files available for the family. * This methods performs the filtering based on the valid store files. * @param familyName Column Family Name * @return a set of {@link StoreFileInfo} for the specified family. */ public Collection<StoreFileInfo> getStoreFiles(final byte[] familyName) throws IOException { return getStoreFiles(Bytes.toString(familyName)); }
@Override public Collection<StoreFileInfo> getStoreFiles(String familyName) throws IOException { if (fail) { throw new IOException("simulating FS failure"); } return super.getStoreFiles(familyName); } }
/** * Checks the underlying store files, and opens the files that have not * been opened, and removes the store file readers for store files no longer * available. Mainly used by secondary region replicas to keep up to date with * the primary region files. * @throws IOException */ @Override public void refreshStoreFiles() throws IOException { Collection<StoreFileInfo> newFiles = fs.getStoreFiles(getColumnFamilyName()); refreshStoreFilesInternal(newFiles); }
/** * Creates an unsorted list of StoreFile loaded in parallel * from the given directory. * @throws IOException */ private List<StoreFile> loadStoreFiles() throws IOException { Collection<StoreFileInfo> files = fs.getStoreFiles(getColumnFamilyName()); return openStoreFiles(files); }