public void cleanup() { try { session.close(); } catch (Exception e) { LOG.warn("Error occurred while closing Session", e); } finally { client.close(); } }
@Override public void close() throws Exception { this.lastCommittedCheckpoints.clear(); try { session.close(); } catch (Exception e) { LOG.error("Error while closing session.", e); } try { cluster.close(); } catch (Exception e) { LOG.error("Error while closing cluster.", e); } }
/** * Closes all resources used. */ @Override public void close() { try { if (session != null) { session.close(); } } catch (Exception e) { logger.error("Error while closing session.", e); } try { if (cluster != null) { cluster.close(); } } catch (Exception e) { logger.error("Error while closing cluster.", e); } }
/** * Closes all resources used. */ @Override public void close() throws IOException { try { if (session != null) { session.close(); } } catch (Exception e) { LOG.error("Error while closing session.", e); } try { if (cluster != null) { cluster.close(); } } catch (Exception e) { LOG.error("Error while closing cluster.", e); } } }
/** * Closes all resources used. */ @Override public void close() { mapper = null; try { if (session != null) { session.close(); } } catch (Exception e) { LOG.error("Error while closing session.", e); } try { if (cluster != null) { cluster.close(); } } catch (Exception e) { LOG.error("Error while closing cluster.", e); } } }
public void close() { m_session.close(); m_cassandraClient.close(); }
@Override public void close() throws Exception { super.close(); try { if (session != null) { session.close(); } } catch (Exception e) { LOG.error("Error while closing session.", e); } try { if (cluster != null) { cluster.close(); } } catch (Exception e) { LOG.error("Error while closing cluster.", e); } }
@Override public void close() throws Exception { super.close(); try { if (session != null) { session.close(); } } catch (Exception e) { LOG.error("Error while closing session.", e); } try { if (cluster != null) { cluster.close(); } } catch (Exception e) { LOG.error("Error while closing cluster.", e); } }
/** * Generates the necessary tables to store information. * * @throws Exception */ @Override public void createResource() throws Exception { cluster = builder.getCluster(); session = cluster.connect(); session.execute(String.format("CREATE KEYSPACE IF NOT EXISTS %s with replication={'class':'SimpleStrategy', 'replication_factor':1};", keySpace)); session.execute(String.format("CREATE TABLE IF NOT EXISTS %s.%s (sink_id text, sub_id int, checkpoint_id bigint, PRIMARY KEY (sink_id, sub_id));", keySpace, table)); try { session.close(); } catch (Exception e) { LOG.error("Error while closing session.", e); } try { cluster.close(); } catch (Exception e) { LOG.error("Error while closing cluster.", e); } }
/** * {@inheritDoc} */ @Override public void close() { if (cluster != null && !cluster.isClosed()) { LOG.info("Try to close connection to cluster: {}", cluster.getClusterName()); session.close(); cluster.close(); } }
/** * Cleanup any state for this DB. Called once per DB instance; there is one DB * instance per client thread. */ @Override public void cleanup() throws DBException { synchronized (INIT_COUNT) { final int curInitCount = INIT_COUNT.decrementAndGet(); if (curInitCount <= 0) { readStmts.clear(); scanStmts.clear(); insertStmts.clear(); updateStmts.clear(); readAllStmt.set(null); scanAllStmt.set(null); deleteStmt.set(null); session.close(); cluster.close(); cluster = null; session = null; } if (curInitCount < 0) { // This should never happen. throw new DBException( String.format("initCount is negative: %d", curInitCount)); } } }
public void stop(ProcessContext context) { // We don't want to close the connection when using 'Cassandra Connection Provider' // because each time @OnUnscheduled/@OnShutdown annotated method is triggered on a // processor, the connection would be closed which is not ideal for a centralized // connection provider controller service if (!context.getProperty(CONNECTION_PROVIDER_SERVICE).isSet()) { if (cassandraSession.get() != null) { cassandraSession.get().close(); cassandraSession.set(null); } if (cluster.get() != null) { cluster.get().close(); cluster.set(null); } } }
@OnStopped public void onStopped() { if (cassandraSession != null) { cassandraSession.close(); } if (cluster != null) { cluster.close(); } }
@OnDisabled public void onDisabled(){ if (cassandraSession != null) { cassandraSession.close(); } if (cluster != null) { cluster.close(); } }
@Override public void close() throws Exception { try { checkAsyncErrors(); flush(); checkAsyncErrors(); } finally { try { if (session != null) { session.close(); } } catch (Exception e) { log.error("Error while closing session.", e); } try { if (cluster != null) { cluster.close(); } } catch (Exception e) { log.error("Error while closing cluster.", e); } } }
/** * Runs transformations. */ public void transform() { if (!Options.DEFAULT_NO_SQL.equalsIgnoreCase(nosql)) { try { cassandraSession = cluster.connect(dbName); addEndpointSpecificConfigurationTable(); alterEndpointProfileTable(); } finally { cassandraSession.close(); cluster.close(); } } }
/** * Add field use_raw_configuration_schema to endpointProfile that used to support devices using * SDK version 0.9.0 */ public void transform() { //mongo MongoClient client = new MongoClient(host); MongoDatabase database = client.getDatabase(dbName); MongoCollection<Document> endpointProfile = database.getCollection("endpoint_profile"); endpointProfile.updateMany(new Document(), eq("$set", eq("use_raw_schema", false))); //cassandra Cluster cluster = Cluster.builder().addContactPoint(host).build(); Session session = cluster.connect(dbName); session.execute("ALTER TABLE ep_profile ADD use_raw_schema boolean"); session.close(); cluster.close(); } }
@Override protected void before() throws Throwable { dependency.before(); Cluster cluster = Cluster.builder().addContactPoint(getHost()).withPort(getPort()) .withNettyOptions(new NettyOptions() { @Override public void onClusterClose(EventLoopGroup eventLoopGroup) { eventLoopGroup.shutdownGracefully(0, 0, TimeUnit.MILLISECONDS).syncUninterruptibly(); } }).build(); Session session = cluster.newSession(); try { if (requiredVersion != null) { Version cassandraReleaseVersion = CassandraVersion.getReleaseVersion(session); if (cassandraReleaseVersion.isLessThan(requiredVersion)) { throw new AssumptionViolatedException( String.format("Cassandra at %s:%s runs in Version %s but we require at least %s", getHost(), getPort(), cassandraReleaseVersion, requiredVersion)); } } session.execute(String.format("CREATE KEYSPACE IF NOT EXISTS %s \n" + "WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 };", keyspaceName)); } finally { session.close(); cluster.close(); } }
@Override public void destroy() { try { logger.info( "dropping application keyspace" ); dataStaxCluster.getClusterSession() .execute("DROP KEYSPACE "+ CQLUtils.quote(cassandraConfig.getApplicationKeyspace())); dataStaxCluster.waitForSchemaAgreement(); logger.info( "dropping application local keyspace" ); dataStaxCluster.getClusterSession() .execute("DROP KEYSPACE "+ CQLUtils.quote(cassandraConfig.getApplicationLocalKeyspace())); dataStaxCluster.waitForSchemaAgreement(); dataStaxCluster.getClusterSession().close(); // close session so it's meta will get refreshed } catch ( Exception e ) { logger.error("Error dropping application keyspaces: {} error: {}", cassandraConfig.getApplicationKeyspace(), e); } logger.info( "keyspaces dropped" ); logger.info( "dropping indices" ); final EsProvider provider = SpringResource.getInstance().getBean( Injector.class ).getInstance( EsProvider.class ); provider.getClient().admin().indices().prepareDelete( "_all" ).execute().actionGet(); } }