/** * Creates a writer for a new file in a temporary directory. * @param fd The file details. * @return Writer for a new StoreFile in the tmp dir. * @throws IOException if creation failed */ protected final StoreFileWriter createTmpWriter(FileDetails fd, boolean shouldDropBehind) throws IOException { // When all MVCC readpoints are 0, don't write them. // See HBASE-8166, HBASE-12600, and HBASE-13389. return store.createWriterInTmp(fd.maxKeyCount, this.compactionCompression, true, fd.maxMVCCReadpoint > 0, fd.maxTagsLength > 0, shouldDropBehind); }
@Override public StoreFileWriter createWriter(InternalScanner scanner, org.apache.hadoop.hbase.regionserver.compactions.Compactor.FileDetails fd, boolean shouldDropBehind) throws IOException { // make this writer with tags always because of possible new cells with tags. return store.createWriterInTmp(fd.maxKeyCount, compactionCompression, true, true, true, shouldDropBehind); } };
@Override public StoreFileWriter createWriter() throws IOException { StoreFileWriter writer = store.createWriterInTmp(kvCount, store.getColumnFamilyDescriptor().getCompressionType(), false, true, true, false); return writer; } };
private StripeCompactor createCompactor() throws Exception { HColumnDescriptor col = new HColumnDescriptor(Bytes.toBytes("foo")); StoreFileWritersCapture writers = new StoreFileWritersCapture(); HStore store = mock(HStore.class); HRegionInfo info = mock(HRegionInfo.class); when(info.getRegionNameAsString()).thenReturn("testRegion"); when(store.getColumnFamilyDescriptor()).thenReturn(col); when(store.getRegionInfo()).thenReturn(info); when( store.createWriterInTmp(anyLong(), any(), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); Configuration conf = HBaseConfiguration.create(); conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders); final Scanner scanner = new Scanner(); return new StripeCompactor(conf, store) { @Override protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException { return scanner; } @Override protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException { return scanner; } }; }
when(store.getFileSystem()).thenReturn(mock(FileSystem.class)); when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME)); when(store.createWriterInTmp(anyLong(), any(), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); when(store.getComparator()).thenReturn(CellComparatorImpl.COMPARATOR);
when(store.getFileSystem()).thenReturn(mock(FileSystem.class)); when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME)); when(store.createWriterInTmp(anyLong(), any(), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); when(store.getComparator()).thenReturn(CellComparatorImpl.COMPARATOR);
@Test public void testCacheOnWriteInSchema() throws IOException { // Write some random data into the store StoreFileWriter writer = store.createWriterInTmp(Integer.MAX_VALUE, HFile.DEFAULT_COMPRESSION_ALGORITHM, false, true, false, false); writeStoreFile(writer); writer.close(); // Verify the block types of interest were cached on write readStoreFile(writer.getPath()); }
status.setStatus("Flushing " + store + ": creating writer"); writer = store.createWriterInTmp(cellsCount, store.getColumnFamilyDescriptor().getCompressionType(), false, true, true, false); IOException e = null;
status.setStatus("Flushing " + store + ": creating writer"); writer = store.createWriterInTmp(cellsCount, store.getColumnFamilyDescriptor().getCompressionType(), false, true, snapshot.isTagsPresent(), false);
/** * Verify that compression and data block encoding are respected by the * Store.createWriterInTmp() method, used on store flush. */ @Test public void testCreateWriter() throws Exception { Configuration conf = HBaseConfiguration.create(); FileSystem fs = FileSystem.get(conf); ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(family) .setCompressionType(Compression.Algorithm.GZ).setDataBlockEncoding(DataBlockEncoding.DIFF) .build(); init(name.getMethodName(), conf, hcd); // Test createWriterInTmp() StoreFileWriter writer = store.createWriterInTmp(4, hcd.getCompressionType(), false, true, false, false); Path path = writer.getPath(); writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1))); writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2))); writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3))); writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4))); writer.close(); // Verify that compression and encoding settings are respected HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), true, conf); assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm()); assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding()); reader.close(); }
@Override public StoreFile.Writer createWriterInTmp(long maxKeyCount, Compression.Algorithm compression, boolean isCompaction, boolean includeMVCCReadpoint, boolean includesTag) throws IOException { return createWriterInTmp(maxKeyCount, compression, isCompaction, includeMVCCReadpoint, includesTag, false); }
private StripeCompactor createCompactor() throws Exception { HColumnDescriptor col = new HColumnDescriptor(Bytes.toBytes("foo")); StoreFileWritersCapture writers = new StoreFileWritersCapture(); HStore store = mock(HStore.class); HRegionInfo info = mock(HRegionInfo.class); when(info.getRegionNameAsString()).thenReturn("testRegion"); when(store.getColumnFamilyDescriptor()).thenReturn(col); when(store.getRegionInfo()).thenReturn(info); when( store.createWriterInTmp(anyLong(), any(), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); Configuration conf = HBaseConfiguration.create(); conf.setBoolean("hbase.regionserver.compaction.private.readers", usePrivateReaders); final Scanner scanner = new Scanner(); return new StripeCompactor(conf, store) { @Override protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, List<StoreFileScanner> scanners, long smallestReadPoint, long earliestPutTs, byte[] dropDeletesFromRow, byte[] dropDeletesToRow) throws IOException { return scanner; } @Override protected InternalScanner createScanner(HStore store, ScanInfo scanInfo, List<StoreFileScanner> scanners, ScanType scanType, long smallestReadPoint, long earliestPutTs) throws IOException { return scanner; } }; }
when(store.getFileSystem()).thenReturn(mock(FileSystem.class)); when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME)); when(store.createWriterInTmp(anyLong(), any(), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); when(store.getComparator()).thenReturn(CellComparatorImpl.COMPARATOR);
when(store.getFileSystem()).thenReturn(mock(FileSystem.class)); when(store.getRegionInfo()).thenReturn(new HRegionInfo(TABLE_NAME)); when(store.createWriterInTmp(anyLong(), any(), anyBoolean(), anyBoolean(), anyBoolean(), anyBoolean())).thenAnswer(writers); when(store.getComparator()).thenReturn(CellComparatorImpl.COMPARATOR);
@Test public void testCacheOnWriteInSchema() throws IOException { // Write some random data into the store StoreFileWriter writer = store.createWriterInTmp(Integer.MAX_VALUE, HFile.DEFAULT_COMPRESSION_ALGORITHM, false, true, false, false); writeStoreFile(writer); writer.close(); // Verify the block types of interest were cached on write readStoreFile(writer.getPath()); }
/** * Verify that compression and data block encoding are respected by the * Store.createWriterInTmp() method, used on store flush. */ @Test public void testCreateWriter() throws Exception { Configuration conf = HBaseConfiguration.create(); FileSystem fs = FileSystem.get(conf); ColumnFamilyDescriptor hcd = ColumnFamilyDescriptorBuilder.newBuilder(family) .setCompressionType(Compression.Algorithm.GZ).setDataBlockEncoding(DataBlockEncoding.DIFF) .build(); init(name.getMethodName(), conf, hcd); // Test createWriterInTmp() StoreFileWriter writer = store.createWriterInTmp(4, hcd.getCompressionType(), false, true, false, false); Path path = writer.getPath(); writer.append(new KeyValue(row, family, qf1, Bytes.toBytes(1))); writer.append(new KeyValue(row, family, qf2, Bytes.toBytes(2))); writer.append(new KeyValue(row2, family, qf1, Bytes.toBytes(3))); writer.append(new KeyValue(row2, family, qf2, Bytes.toBytes(4))); writer.close(); // Verify that compression and encoding settings are respected HFile.Reader reader = HFile.createReader(fs, path, new CacheConfig(conf), true, conf); assertEquals(hcd.getCompressionType(), reader.getCompressionAlgorithm()); assertEquals(hcd.getDataBlockEncoding(), reader.getDataBlockEncoding()); reader.close(); }