public void close() { if (connection != null) { try { connection.close(); } catch (Exception ex) { LOG.warn("Could not close the connection: " + ex); } } }
@Override protected void doStop() { if (outputSink != null) { try { outputSink.finishWritingAndClose(); } catch (IOException ex) { LOG.warn("Got exception while trying to close OutputSink", ex); } } if (this.pool != null) { this.pool.shutdownNow(); try { // wait for 10 sec boolean shutdown = this.pool.awaitTermination(10000, TimeUnit.MILLISECONDS); if (!shutdown) { LOG.warn("Failed to shutdown the thread pool after 10 seconds"); } } catch (InterruptedException e) { LOG.warn("Got interrupted while waiting for the thread pool to shut down" + e); } } if (connection != null) { try { connection.close(); } catch (IOException ex) { LOG.warn("Got exception closing connection :" + ex); } } super.doStop(); }
@Override protected void doStop() { disconnect(); // don't call super.doStop() if (this.conn != null) { try { this.conn.close(); this.conn = null; } catch (IOException e) { LOG.warn("Failed to close the connection"); } } // Allow currently running replication tasks to finish exec.shutdown(); try { exec.awaitTermination(maxTerminationWait, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { } // Abort if the tasks did not terminate in time if (!exec.isTerminated()) { String errMsg = "HBaseInterClusterReplicationEndpoint termination failed. The " + "ThreadPoolExecutor failed to finish all tasks within " + maxTerminationWait + "ms. " + "Aborting to prevent Replication from deadlocking. See HBASE-16081."; abortable.abort(errMsg, new IOException(errMsg)); } notifyStopped(); }
@AfterClass public static void cleanupTest() throws Exception { try { CONNECTION.close(); UTIL.shutdownMiniZKCluster(); } catch (Exception e) { LOG.warn("problem shutting down cluster", e); } }
this.clusterConnection.close();
private ClusterConnection getMockedConnection(final Configuration conf) throws IOException, org.apache.hbase.thirdparty.com.google.protobuf.ServiceException { ClusterConnection c = Mockito.mock(ClusterConnection.class); Mockito.when(c.getConfiguration()).thenReturn(conf); Mockito.doNothing().when(c).close(); // Make it so we return a particular location when asked. final HRegionLocation loc = new HRegionLocation(RegionInfoBuilder.FIRST_META_REGIONINFO, ServerName.valueOf("example.org", 1234, 0)); Mockito.when( c.getRegionLocation((TableName) Mockito.any(), (byte[]) Mockito.any(), Mockito.anyBoolean())) .thenReturn(loc); Mockito.when(c.locateRegion((TableName) Mockito.any(), (byte[]) Mockito.any())).thenReturn(loc); ClientProtos.ClientService.BlockingInterface hri = Mockito.mock(ClientProtos.ClientService.BlockingInterface.class); Mockito .when( hri.bulkLoadHFile((RpcController) Mockito.any(), (BulkLoadHFileRequest) Mockito.any())) .thenThrow(new ServiceException(new IOException("injecting bulk load error"))); Mockito.when(c.getClient(Mockito.any())).thenReturn(hri); return c; }
/** * This test checks that one can connect to the cluster with only the * ZooKeeper quorum set. Other stuff like master address will be read * from ZK by the client. */ @Test public void testConnection() throws Exception{ // We create an empty config and add the ZK address. Configuration c = new Configuration(); c.set(HConstants.ZOOKEEPER_QUORUM, TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_QUORUM)); c.set(HConstants.ZOOKEEPER_CLIENT_PORT, TEST_UTIL.getConfiguration().get(HConstants.ZOOKEEPER_CLIENT_PORT)); // This should be enough to connect ClusterConnection conn = (ClusterConnection) ConnectionFactory.createConnection(c); assertTrue(conn.isMasterRunning()); conn.close(); }
} finally { admin.close(); if (connection != null) connection.close();
scan((RpcController)Mockito.any(), (ScanRequest)Mockito.any()); } finally { if (connection != null && !connection.isClosed()) connection.close(); zkw.close();
this.clusterConnection.close(); } catch (IOException e) {
connection.close();
@Test public void testReplayCallable() throws Exception { // tests replaying the edits to a secondary region replica using the Callable directly openRegion(HTU, rs0, hriSecondary); ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(HTU.getConfiguration()); //load some data to primary HTU.loadNumericRows(table, f, 0, 1000); Assert.assertEquals(1000, entries.size()); // replay the edits to the secondary using replay callable replicateUsingCallable(connection, entries); Region region = rs0.getRegion(hriSecondary.getEncodedName()); HTU.verifyNumericRows(region, f, 0, 1000); HTU.deleteNumericRows(table, f, 0, 1000); closeRegion(HTU, rs0, hriSecondary); connection.close(); }
@Test public void testReplayCallableWithRegionMove() throws Exception { // tests replaying the edits to a secondary region replica using the Callable directly while // the region is moved to another location.It tests handling of RME. openRegion(HTU, rs0, hriSecondary); ClusterConnection connection = (ClusterConnection) ConnectionFactory.createConnection(HTU.getConfiguration()); //load some data to primary HTU.loadNumericRows(table, f, 0, 1000); Assert.assertEquals(1000, entries.size()); // replay the edits to the secondary using replay callable replicateUsingCallable(connection, entries); Region region = rs0.getRegion(hriSecondary.getEncodedName()); HTU.verifyNumericRows(region, f, 0, 1000); HTU.loadNumericRows(table, f, 1000, 2000); // load some more data to primary // move the secondary region from RS0 to RS1 closeRegion(HTU, rs0, hriSecondary); openRegion(HTU, rs1, hriSecondary); // replicate the new data replicateUsingCallable(connection, entries); region = rs1.getRegion(hriSecondary.getEncodedName()); // verify the new data. old data may or may not be there HTU.verifyNumericRows(region, f, 1000, 2000); HTU.deleteNumericRows(table, f, 0, 2000); closeRegion(HTU, rs1, hriSecondary); connection.close(); }
/** * Closes the internal {@link Connection}. Does nothing if the {@link Connection} has already * been closed. * @throws IOException If there is an error closing the connection. */ @SuppressWarnings("deprecation") public synchronized void close() throws IOException { if (!getConnection().isClosed()) { getConnection().close(); } }
@SuppressWarnings("deprecation") @Test public void testClosingAlreadyClosedConnection() throws IOException { doCallRealMethod().when(mockMultiplexer).close(); // If the connection is already closed when(mockConnection.isClosed()).thenReturn(true); mockMultiplexer.close(); // We should not close it again verify(mockConnection, times(0)).close(); } }
@SuppressWarnings("deprecation") @Test public void testConnectionClosing() throws IOException { doCallRealMethod().when(mockMultiplexer).close(); // If the connection is not closed when(mockConnection.isClosed()).thenReturn(false); mockMultiplexer.close(); // We should close it verify(mockConnection).close(); }
if (hcon != null) { try { hcon.close(); } catch (IOException ignored) {
public void close() { if (connection != null) { try { connection.close(); } catch (Exception ex) { LOG.warn("Could not close the connection: " + ex); } } }
/** * Closes the internal {@link Connection}. Does nothing if the {@link Connection} has already * been closed. * @throws IOException If there is an error closing the connection. */ @SuppressWarnings("deprecation") public synchronized void close() throws IOException { if (!getConnection().isClosed()) { getConnection().close(); } }
@AfterClass public static void cleanupTest() throws Exception { try { CONNECTION.close(); UTIL.shutdownMiniZKCluster(); } catch (Exception e) { LOG.warn("problem shutting down cluster", e); } }