@Override public void start() { Preconditions.checkArgument(client == null, "Please call stop " + "before calling start on an old instance."); sinkCounter.start(); sinkCounter.incrementConnectionCreatedCount(); client = initHBaseClient(); super.start(); }
/** * Locate writer that has not been used for longest time and retire it */ private void closeEldestWriter() throws InterruptedException { long oldestTimeStamp = System.currentTimeMillis(); HiveEndPoint eldest = null; for (Entry<HiveEndPoint,HiveWriter> entry : allWriters.entrySet()) { if (entry.getValue().getLastUsed() < oldestTimeStamp) { eldest = entry.getKey(); oldestTimeStamp = entry.getValue().getLastUsed(); } } try { sinkCounter.incrementConnectionCreatedCount(); LOG.info(getName() + ": Closing least used Writer to Hive EndPoint : " + eldest); allWriters.remove(eldest).close(); } catch (InterruptedException e) { LOG.warn(getName() + ": Interrupted when attempting to close writer for end point: " + eldest, e); throw e; } }
private HiveWriter getOrCreateWriter(Map<HiveEndPoint, HiveWriter> activeWriters, HiveEndPoint endPoint) throws HiveWriter.ConnectException, InterruptedException { try { HiveWriter writer = allWriters.get( endPoint ); if (writer == null) { LOG.info(getName() + ": Creating Writer to Hive end point : " + endPoint); writer = new HiveWriter(endPoint, txnsPerBatchAsk, autoCreatePartitions, callTimeout, callTimeoutPool, proxyUser, serializer, sinkCounter); sinkCounter.incrementConnectionCreatedCount(); if (allWriters.size() > maxOpenConnections) { int retired = closeIdleWriters(); if (retired == 0) { closeEldestWriter(); } } allWriters.put(endPoint, writer); activeWriters.put(endPoint, writer); } else { if (activeWriters.get(endPoint) == null) { activeWriters.put(endPoint,writer); } } return writer; } catch (HiveWriter.ConnectException e) { sinkCounter.incrementConnectionFailedCount(); throw e; } }
sinkCounter.incrementConnectionCreatedCount(); resetCounters();
Preconditions.checkNotNull(client, "Rpc Client could not be " + "initialized. " + getName() + " could not be started"); sinkCounter.incrementConnectionCreatedCount(); if (cxnResetInterval > 0) { cxnResetExecutor.schedule(new Runnable() {
sinkCounter.incrementConnectionCreatedCount(); resetCounters();
/** * Starts a grid and initializes an event transformer. */ @SuppressWarnings("unchecked") @Override public synchronized void start() { A.notNull(springCfgPath, "Ignite config file"); A.notNull(cacheName, "Cache name"); A.notNull(eventTransformerCls, "Event transformer class"); sinkCounter.start(); try { if (ignite == null) ignite = Ignition.start(springCfgPath); if (eventTransformerCls != null && !eventTransformerCls.isEmpty()) { Class<? extends EventTransformer> clazz = (Class<? extends EventTransformer<Event, Object, Object>>)Class.forName(eventTransformerCls); eventTransformer = clazz.newInstance(); } } catch (Exception e) { log.error("Failed to start grid", e); sinkCounter.incrementConnectionFailedCount(); throw new FlumeException("Failed to start grid", e); } sinkCounter.incrementConnectionCreatedCount(); super.start(); }
@Override public synchronized void start() { producer = new DefaultMQProducer(producerGroup); producer.setNamesrvAddr(nameServer); try { producer.start(); } catch (MQClientException e) { sinkCounter.incrementConnectionFailedCount(); log.error("RocketMQ producer start failed", e); throw new FlumeException("Failed to start RocketMQ producer", e); } sinkCounter.incrementConnectionCreatedCount(); sinkCounter.start(); super.start(); }
@Override public void start() { ElasticSearchClientFactory clientFactory = new ElasticSearchClientFactory(); logger.info("ElasticSearch sink {} started"); sinkCounter.start(); try { if (isLocal) { client = clientFactory.getLocalClient( clientType, eventSerializer, indexRequestFactory); } else { client = clientFactory.getClient(clientType, serverAddresses, clusterName, eventSerializer, indexRequestFactory); client.configure(elasticSearchClientContext); } sinkCounter.incrementConnectionCreatedCount(); } catch (Exception ex) { ex.printStackTrace(); sinkCounter.incrementConnectionFailedCount(); if (client != null) { client.close(); sinkCounter.incrementConnectionClosedCount(); } } super.start(); }
sinkCounter.incrementConnectionCreatedCount(); sinkCounter.start();
sinkCounter.incrementConnectionCreatedCount(); sinkCounter.start();
serializerType, serializerContext, outputStream); serializer.afterCreate(); sinkCounter.incrementConnectionCreatedCount(); } catch (IOException e) { sinkCounter.incrementConnectionFailedCount();
@Override public void start() { logger.info("Starting sink {} ",this.getName()); sinkCounter.start(); try { serializer.initialize(); sinkCounter.incrementConnectionCreatedCount(); } catch(Exception ex) { sinkCounter.incrementConnectionFailedCount(); logger.error("Error {} in initializing the serializer.",ex.getMessage()); Throwables.propagate(ex); } super.start(); }
@Override public void start() { logger.info("Starting sink {} ",this.getName()); sinkCounter.start(); try { serializer.initialize(); sinkCounter.incrementConnectionCreatedCount(); } catch(Exception ex) { sinkCounter.incrementConnectionFailedCount(); logger.error("Error {} in initializing the serializer.",ex.getMessage()); Throwables.propagate(ex); } super.start(); }
@Override public void start() { Preconditions.checkArgument(client == null, "Please call stop " + "before calling start on an old instance."); sinkCounter.start(); sinkCounter.incrementConnectionCreatedCount(); client = initHBaseClient(); super.start(); }
/** * Locate writer that has not been used for longest time and retire it */ private void closeEldestWriter() throws InterruptedException { long oldestTimeStamp = System.currentTimeMillis(); HiveEndPoint eldest = null; for (Entry<HiveEndPoint,HiveWriter> entry : allWriters.entrySet()) { if (entry.getValue().getLastUsed() < oldestTimeStamp) { eldest = entry.getKey(); oldestTimeStamp = entry.getValue().getLastUsed(); } } try { sinkCounter.incrementConnectionCreatedCount(); LOG.info(getName() + ": Closing least used Writer to Hive EndPoint : " + eldest); allWriters.remove(eldest).close(); } catch (InterruptedException e) { LOG.warn(getName() + ": Interrupted when attempting to close writer for end point: " + eldest, e); throw e; } }
private HiveWriter getOrCreateWriter(Map<HiveEndPoint, HiveWriter> activeWriters, HiveEndPoint endPoint) throws HiveWriter.ConnectException, InterruptedException { try { HiveWriter writer = allWriters.get( endPoint ); if (writer == null) { LOG.info(getName() + ": Creating Writer to Hive end point : " + endPoint); writer = new HiveWriter(endPoint, txnsPerBatchAsk, autoCreatePartitions, callTimeout, callTimeoutPool, proxyUser, serializer, sinkCounter); sinkCounter.incrementConnectionCreatedCount(); if (allWriters.size() > maxOpenConnections) { int retired = closeIdleWriters(); if (retired == 0) { closeEldestWriter(); } } allWriters.put(endPoint, writer); activeWriters.put(endPoint, writer); } else { if (activeWriters.get(endPoint) == null) { activeWriters.put(endPoint,writer); } } return writer; } catch (HiveWriter.ConnectException e) { sinkCounter.incrementConnectionFailedCount(); throw e; } }
Preconditions.checkNotNull(client, "Rpc Client could not be " + "initialized. " + getName() + " could not be started"); sinkCounter.incrementConnectionCreatedCount(); if (cxnResetInterval > 0) { cxnResetExecutor.schedule(new Runnable() {
@Override public void start() { ElasticSearchClientFactory clientFactory = new ElasticSearchClientFactory(); logger.info("ElasticSearch sink {} started"); sinkCounter.start(); try { if (isLocal) { client = clientFactory.getLocalClient( clientType, eventSerializer, indexRequestFactory); } else { client = clientFactory.getClient(clientType, serverAddresses, clusterName, eventSerializer, indexRequestFactory); client.configure(elasticSearchClientContext); } sinkCounter.incrementConnectionCreatedCount(); } catch (Exception ex) { ex.printStackTrace(); sinkCounter.incrementConnectionFailedCount(); if (client != null) { client.close(); sinkCounter.incrementConnectionClosedCount(); } } super.start(); }
@Override public void start() { ElasticSearchClientFactory clientFactory = new ElasticSearchClientFactory(); logger.info("ElasticSearch sink {} started"); sinkCounter.start(); try { if (isLocal) { client = clientFactory.getLocalClient( clientType, eventSerializer, indexRequestFactory); } else { client = clientFactory.getClient(clientType, serverAddresses, clusterName, eventSerializer, indexRequestFactory); client.configure(elasticSearchClientContext); } sinkCounter.incrementConnectionCreatedCount(); } catch (Exception ex) { ex.printStackTrace(); sinkCounter.incrementConnectionFailedCount(); if (client != null) { client.close(); sinkCounter.incrementConnectionClosedCount(); } } super.start(); }