sinkCounter.incrementConnectionFailedCount(); throw new IOException("Callable timed out after " + callTimeout + " ms" + " on file: " + bucketPath, eT); } catch (ExecutionException e1) { sinkCounter.incrementConnectionFailedCount(); Throwable cause = e1.getCause(); if (cause instanceof IOException) {
sinkCounter.incrementConnectionFailedCount(); throw new IOException("Callable timed out after " + callTimeout + " ms", ex); } catch (ExecutionException e1) { sinkCounter.incrementConnectionFailedCount(); Throwable cause = e1.getCause(); if (cause instanceof IOException) {
sinkCounter.incrementConnectionFailedCount(); throw eT; } catch (ExecutionException e1) { sinkCounter.incrementConnectionFailedCount(); Throwable cause = e1.getCause(); if (cause instanceof IOException) {
LOG.warn("Unsuccessfully attempted to close " + path + " " + maxRetries + " times. Initializing lease recovery."); sinkCounter.incrementConnectionFailedCount(); recoverLease();
private void destroyConnection() { if (client != null) { logger.debug("Rpc sink {} closing Rpc client: {}", getName(), client); try { client.close(); sinkCounter.incrementConnectionClosedCount(); } catch (FlumeException e) { sinkCounter.incrementConnectionFailedCount(); logger.error("Rpc sink " + getName() + ": Attempt to close Rpc " + "client failed. Exception follows.", e); } } client = null; }
private HiveWriter getOrCreateWriter(Map<HiveEndPoint, HiveWriter> activeWriters, HiveEndPoint endPoint) throws HiveWriter.ConnectException, InterruptedException { try { HiveWriter writer = allWriters.get( endPoint ); if (writer == null) { LOG.info(getName() + ": Creating Writer to Hive end point : " + endPoint); writer = new HiveWriter(endPoint, txnsPerBatchAsk, autoCreatePartitions, callTimeout, callTimeoutPool, proxyUser, serializer, sinkCounter); sinkCounter.incrementConnectionCreatedCount(); if (allWriters.size() > maxOpenConnections) { int retired = closeIdleWriters(); if (retired == 0) { closeEldestWriter(); } } allWriters.put(endPoint, writer); activeWriters.put(endPoint, writer); } else { if (activeWriters.get(endPoint) == null) { activeWriters.put(endPoint,writer); } } return writer; } catch (HiveWriter.ConnectException e) { sinkCounter.incrementConnectionFailedCount(); throw e; } }
writer.open(currentBucket); } catch (Exception ex) { sinkCounter.incrementConnectionFailedCount(); if (ex instanceof IOException) { throw (IOException) ex;
LOG.warn("failed to rename() file (" + bucketPath + "). Exception follows.", e); sinkCounter.incrementConnectionFailedCount(); final Callable<Void> scheduledRename = new ScheduledRenameCallable(); timedRollerPool.schedule(scheduledRename, retryInterval, TimeUnit.SECONDS);
+ currentBucket + "). Exception follows.", ex); sinkCounter.incrementConnectionFailedCount();
logger.info("callback received"); } catch (InterruptedException e) { sinkCounter.incrementConnectionFailedCount(); throw new FlumeException( "Interrupted while waiting for Hbase Callbacks", e); sinkCounter.incrementConnectionFailedCount(); if (client != null) { shutdownHBaseClient();
sinkCounter.incrementConnectionFailedCount(); if (ex instanceof FlumeException) { throw (FlumeException) ex;
FlumeAuthenticationUtil.getAuthenticator(kerberosPrincipal, kerberosKeytab); } catch (Exception ex) { sinkCounter.incrementConnectionFailedCount(); throw new FlumeException("Failed to login to HBase using " + "provided credentials.", ex); sinkCounter.incrementConnectionFailedCount(); logger.error("Could not load table, " + tableName + " from HBase", e); sinkCounter.incrementConnectionFailedCount(); throw new FlumeException("Error getting column family from HBase." + "Please verify that the table " + tableName + " and Column Family, "
sinkCounter.incrementConnectionClosedCount(); } catch (IOException e) { sinkCounter.incrementConnectionFailedCount(); logger.error("Unable to close output stream. Exception follows.", e); } finally {
sinkCounter.incrementConnectionFailedCount(); if (ex instanceof IOException) { throw (IOException) ex;
/** * Starts a grid and initializes an event transformer. */ @SuppressWarnings("unchecked") @Override public synchronized void start() { A.notNull(springCfgPath, "Ignite config file"); A.notNull(cacheName, "Cache name"); A.notNull(eventTransformerCls, "Event transformer class"); sinkCounter.start(); try { if (ignite == null) ignite = Ignition.start(springCfgPath); if (eventTransformerCls != null && !eventTransformerCls.isEmpty()) { Class<? extends EventTransformer> clazz = (Class<? extends EventTransformer<Event, Object, Object>>)Class.forName(eventTransformerCls); eventTransformer = clazz.newInstance(); } } catch (Exception e) { log.error("Failed to start grid", e); sinkCounter.incrementConnectionFailedCount(); throw new FlumeException("Failed to start grid", e); } sinkCounter.incrementConnectionCreatedCount(); super.start(); }
FlumeAuthenticationUtil.getAuthenticator(kerberosPrincipal, kerberosKeytab); } catch (Exception ex) { sinkCounter.incrementConnectionFailedCount(); throw new FlumeException("Failed to login to HBase using " + "provided credentials.", ex); sinkCounter.incrementConnectionFailedCount(); logger.error("Could not load table, " + tableName + " from HBase", e); sinkCounter.incrementConnectionFailedCount(); throw new FlumeException("Error getting column family from HBase." + "Please verify that the table " + tableName + " and Column Family, "
@Override public synchronized void start() { producer = new DefaultMQProducer(producerGroup); producer.setNamesrvAddr(nameServer); try { producer.start(); } catch (MQClientException e) { sinkCounter.incrementConnectionFailedCount(); log.error("RocketMQ producer start failed", e); throw new FlumeException("Failed to start RocketMQ producer", e); } sinkCounter.incrementConnectionCreatedCount(); sinkCounter.start(); super.start(); }
@Override public void start() { ElasticSearchClientFactory clientFactory = new ElasticSearchClientFactory(); logger.info("ElasticSearch sink {} started"); sinkCounter.start(); try { if (isLocal) { client = clientFactory.getLocalClient( clientType, eventSerializer, indexRequestFactory); } else { client = clientFactory.getClient(clientType, serverAddresses, clusterName, eventSerializer, indexRequestFactory); client.configure(elasticSearchClientContext); } sinkCounter.incrementConnectionCreatedCount(); } catch (Exception ex) { ex.printStackTrace(); sinkCounter.incrementConnectionFailedCount(); if (client != null) { client.close(); sinkCounter.incrementConnectionClosedCount(); } } super.start(); }
shouldRotate = false; } catch (IOException e) { sinkCounter.incrementConnectionFailedCount(); throw new EventDeliveryException("Unable to rotate file " + pathController.getCurrentFile() + " while delivering event", e); sinkCounter.incrementConnectionCreatedCount(); } catch (IOException e) { sinkCounter.incrementConnectionFailedCount(); throw new EventDeliveryException("Failed to open file " + pathController.getCurrentFile() + " while delivering event", e);
@Override public void start() { logger.info("Starting sink {} ",this.getName()); sinkCounter.start(); try { serializer.initialize(); sinkCounter.incrementConnectionCreatedCount(); } catch(Exception ex) { sinkCounter.incrementConnectionFailedCount(); logger.error("Error {} in initializing the serializer.",ex.getMessage()); Throwables.propagate(ex); } super.start(); }