@Override protected void closeInternal() { if (shutdownGroupWhenClose) { group.shutdownGracefully(); } } }
Supplier<Boolean> hasConsumerTask; if (conf.getBoolean(ASYNC_WAL_USE_SHARED_EVENT_LOOP, DEFAULT_ASYNC_WAL_USE_SHARED_EVENT_LOOP)) { this.consumeExecutor = eventLoopGroup.next(); if (consumeExecutor instanceof SingleThreadEventExecutor) { try {
@Override @SuppressWarnings("unchecked") public void channelRead(ChannelHandlerContext ctx, Object msg) { final Channel child = (Channel) msg; child.pipeline().addLast(childHandler); setChannelOptions(child, childOptions, logger); for (Entry<AttributeKey<?>, Object> e: childAttrs) { child.attr((AttributeKey<Object>) e.getKey()).set(e.getValue()); } try { childGroup.register(child).addListener(new ChannelFutureListener() { @Override public void operationComplete(ChannelFuture future) throws Exception { if (!future.isSuccess()) { forceClose(child, future.cause()); } } }); } catch (Throwable t) { forceClose(child, t); } }
ChannelFuture regFuture = config().group().register(channel); if (regFuture.cause() != null) { if (channel.isRegistered()) {
@Override public void close() { if (channel != null) { channel.close(); channel = null; } group.shutdownGracefully(); }
private void test(Path file) throws IOException, InterruptedException, ExecutionException { EventLoop eventLoop = EVENT_LOOP_GROUP.next(); FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, file, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS); TestFanOutOneBlockAsyncDFSOutput.writeAndVerify(FS, file, out); }
@Override public void close() { if (channel != null) { channel.close(); } group.shutdownGracefully(); } }
@Test public void testHeartbeat() throws IOException, InterruptedException, ExecutionException { Path f = new Path("/" + name.getMethodName()); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS); Thread.sleep(READ_TIMEOUT_MS * 2); // the connection to datanode should still alive. writeAndVerify(FS, f, out); }
@AfterClass public static void tearDownAfterClass() throws Exception { AbstractTestWALReplay.tearDownAfterClass(); GROUP.shutdownGracefully(); }
@Override protected Writer createWriter(Path path) throws IOException { return new WriterOverAsyncWriter(AsyncFSWALProvider.createAsyncWriter( TEST_UTIL.getConfiguration(), fs, path, false, EVENT_LOOP_GROUP.next(), CHANNEL_CLASS)); } }
@AfterClass public static void tearDownAfterClass() throws Exception { AbstractTestFSWAL.tearDownAfterClass(); GROUP.shutdownGracefully(); }
@Test public void test() throws IOException, InterruptedException, ExecutionException { Path f = new Path("/" + name.getMethodName()); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS); writeAndVerify(FS, f, out); }
@AfterClass public static void tearDownAfterClass() throws IOException { TEST_UTIL.cleanupTestDir(); GROUP.shutdownGracefully(); }
/** * This is important for fencing when recover from RS crash. */ @Test public void testCreateParentFailed() throws IOException { Path f = new Path("/" + name.getMethodName() + "/test"); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); try { FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS); fail("should fail with parent does not exist"); } catch (RemoteException e) { LOG.info("expected exception caught", e); assertThat(e.unwrapRemoteException(), instanceOf(FileNotFoundException.class)); } }
@AfterClass public static void tearDownAfterClass() throws Exception { AbstractTestProtobufLog.tearDownAfterClass(); EVENT_LOOP_GROUP.shutdownGracefully().syncUninterruptibly(); }
@Test public void testConnectToDatanodeFailed() throws IOException, ClassNotFoundException, NoSuchMethodException, IllegalAccessException, InvocationTargetException, InterruptedException, NoSuchFieldException { Field xceiverServerDaemonField = DataNode.class.getDeclaredField("dataXceiverServer"); xceiverServerDaemonField.setAccessible(true); Class<?> xceiverServerClass = Class.forName("org.apache.hadoop.hdfs.server.datanode.DataXceiverServer"); Method numPeersMethod = xceiverServerClass.getDeclaredMethod("getNumPeers"); numPeersMethod.setAccessible(true); // make one datanode broken DataNodeProperties dnProp = TEST_UTIL.getDFSCluster().stopDataNode(0); Path f = new Path("/test"); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); try (FanOutOneBlockAsyncDFSOutput output = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, FS.getDefaultBlockSize(), eventLoop, CHANNEL_CLASS)) { // should exclude the dead dn when retry so here we only have 2 DNs in pipeline assertEquals(2, output.getPipeline().length); } finally { TEST_UTIL.getDFSCluster().restartDataNode(dnProp); } }
@AfterClass public static void tearDown() throws IOException, InterruptedException { if (EVENT_LOOP_GROUP != null) { EVENT_LOOP_GROUP.shutdownGracefully().sync(); } TEST_UTIL.shutdownMiniDFSCluster(); }
private void doTest(boolean withTrailer) throws IOException { int columnCount = 5; int recordCount = 5; TableName tableName = TableName.valueOf("tablename"); byte[] row = Bytes.toBytes("row"); long timestamp = System.currentTimeMillis(); Path path1 = getPath(1); Path path2 = getPath(2); FileSystem fs = UTIL.getTestFileSystem(); Configuration conf = UTIL.getConfiguration(); try ( AsyncWriter writer1 = AsyncFSWALProvider.createAsyncWriter(conf, fs, path1, false, EVENT_LOOP_GROUP.next(), CHANNEL_CLASS); AsyncWriter writer2 = AsyncFSWALProvider.createAsyncWriter(conf, fs, path2, false, EVENT_LOOP_GROUP.next(), CHANNEL_CLASS); CombinedAsyncWriter writer = CombinedAsyncWriter.create(writer1, writer2)) { ProtobufLogTestHelper.doWrite(new WriterOverAsyncWriter(writer), withTrailer, tableName, columnCount, recordCount, row, timestamp); try (ProtobufLogReader reader = (ProtobufLogReader) WALS.createReader(fs, path1)) { ProtobufLogTestHelper.doRead(reader, withTrailer, tableName, columnCount, recordCount, row, timestamp); } try (ProtobufLogReader reader = (ProtobufLogReader) WALS.createReader(fs, path2)) { ProtobufLogTestHelper.doRead(reader, withTrailer, tableName, columnCount, recordCount, row, timestamp); } } } }
@AfterClass public static void tearDownAfterClass() throws IOException, InterruptedException { if (EVENT_LOOP_GROUP != null) { EVENT_LOOP_GROUP.shutdownGracefully().sync(); } if (KDC != null) { KDC.stop(); } }
@Test public void testWriteLargeChunk() throws IOException, InterruptedException, ExecutionException { Path f = new Path("/" + name.getMethodName()); EventLoop eventLoop = EVENT_LOOP_GROUP.next(); FanOutOneBlockAsyncDFSOutput out = FanOutOneBlockAsyncDFSOutputHelper.createOutput(FS, f, true, false, (short) 3, 1024 * 1024 * 1024, eventLoop, CHANNEL_CLASS); byte[] b = new byte[50 * 1024 * 1024]; ThreadLocalRandom.current().nextBytes(b); out.write(b); out.flush(false); assertEquals(b.length, out.flush(false).get().longValue()); out.close(); assertEquals(b.length, FS.getFileStatus(f).getLen()); byte[] actual = new byte[b.length]; try (FSDataInputStream in = FS.open(f)) { in.readFully(actual); } assertArrayEquals(b, actual); } }