@Override public RecordReader getRecordReader() { try { _recordReader.rewind(); } catch (Exception e) { LOGGER.error("Caught exception while rewinding record reader", e); Utils.rethrowException(e); } return _recordReader; } }
/** * Merge all HLLs in list to the first HLL in the list, the list must contain at least one element * @param resultList * @return */ public static HyperLogLog mergeHLLResultsToFirstInList(List<HyperLogLog> resultList) { HyperLogLog hllResult = resultList.get(0); for (int i = 1; i < resultList.size(); ++i) { try { hllResult.addAll(resultList.get(i)); } catch (CardinalityMergeException e) { Utils.rethrowException(e); } } return hllResult; }
private KeyManager[] setupKeyManagers() { if (_keyStoreFile == null) { return null; } try { KeyStore keyStore = KeyStore.getInstance(KEYSTORE_TYPE); LOGGER.info("Setting up keystore with file {}", _keyStoreFile); keyStore.load(new FileInputStream(new File(_keyStoreFile)), _keyStorePassword.toCharArray()); KeyManagerFactory kmf = KeyManagerFactory.getInstance(KEYMANAGER_FACTORY_ALGORITHM); kmf.init(keyStore, _keyStorePassword.toCharArray()); LOGGER.info("Successfully initialized keystore"); return kmf.getKeyManagers(); } catch (Exception e) { Utils.rethrowException(e); } return null; } }
public SSLContext generate() { SSLContext sslContext = null; try { TrustManager[] trustManagers = setupTrustManagers(); KeyManager[] keyManagers = setupKeyManagers(); sslContext = SSLContext.getInstance(SECURITY_ALGORITHM); sslContext.init(keyManagers, trustManagers, null); } catch (Exception e) { Utils.rethrowException(e); } return sslContext; }
public String sendPQLRaw(String url, String pqlRequest, String traceEnabled) { try { final long startTime = System.currentTimeMillis(); ObjectNode bqlJson = JsonUtils.newObjectNode().put("pql", pqlRequest); if (traceEnabled != null && !traceEnabled.isEmpty()) { bqlJson.put("trace", traceEnabled); } final String pinotResultString = sendPostRaw(url, bqlJson.toString(), null); final long bqlQueryTime = System.currentTimeMillis() - startTime; LOGGER.info("BQL: " + pqlRequest + " Time: " + bqlQueryTime); return pinotResultString; } catch (final Exception ex) { LOGGER.error("Caught exception in sendPQLRaw", ex); Utils.rethrowException(ex); throw new AssertionError("Should not reach this"); } } }
/** * Constructs the {@link StreamConsumerFactory} using the {@link StreamConfig::getConsumerFactoryClassName()} property and initializes it * @param streamConfig * @return */ public static StreamConsumerFactory create(StreamConfig streamConfig) { StreamConsumerFactory factory = null; try { factory = (StreamConsumerFactory) Class.forName(streamConfig.getConsumerFactoryClassName()).newInstance(); } catch (Exception e) { Utils.rethrowException(e); } factory.init(streamConfig); return factory; } }
private void processPropertyStoreChange(String path) { try { LOGGER.info("Processing change notification for path: {}", path); refreshWatchers(path); if (isLeader()) { if (path.matches(REALTIME_SEGMENT_PROPERTY_STORE_PATH_PATTERN) || path .matches(REALTIME_TABLE_CONFIG_PROPERTY_STORE_PATH_PATTERN) || path.equals(CONTROLLER_LEADER_CHANGE)) { assignRealtimeSegmentsToServerInstancesIfNecessary(); } } else { LOGGER.info("Not the leader of this cluster, ignoring realtime segment property store change."); } } catch (Exception e) { LOGGER.error("Caught exception while processing change for path {}", path, e); Utils.rethrowException(e); } }
@Transition(from = "CONSUMING", to = "DROPPED") public void onBecomeDroppedFromConsuming(Message message, NotificationContext context) { _logger.info("SegmentOnlineOfflineStateModel.onBecomeDroppedFromConsuming() : " + message); try { onBecomeOfflineFromConsuming(message, context); onBecomeDroppedFromOffline(message, context); } catch (final Exception e) { _logger.error("Caught exception on CONSUMING -> DROPPED state transition", e); Utils.rethrowException(e); } }
@Transition(from = "ONLINE", to = "DROPPED") public void onBecomeDroppedFromOnline(Message message, NotificationContext context) { _logger.info("SegmentOnlineOfflineStateModel.onBecomeDroppedFromOnline() : " + message); try { onBecomeOfflineFromOnline(message, context); onBecomeDroppedFromOffline(message, context); } catch (final Exception e) { _logger.error("Caught exception on ONLINE -> DROPPED state transition", e); Utils.rethrowException(e); } }
@Override public void clear() { try { close(); init(); } catch (IOException e) { Utils.rethrowException(e); } }
public static String createTarGzOfDirectory(String directoryPath, String tarGzPath, String entryPrefix) throws IOException { if (!tarGzPath.endsWith(TAR_GZ_FILE_EXTENSION)) { tarGzPath = tarGzPath + TAR_GZ_FILE_EXTENSION; } try (FileOutputStream fOut = new FileOutputStream(new File(tarGzPath)); BufferedOutputStream bOut = new BufferedOutputStream(fOut); GzipCompressorOutputStream gzOut = new GzipCompressorOutputStream(bOut); TarArchiveOutputStream tOut = new TarArchiveOutputStream(gzOut)) { tOut.setLongFileMode(TarArchiveOutputStream.LONGFILE_GNU); addFileToTarGz(tOut, directoryPath, entryPrefix); } catch (IOException e) { LOGGER.error("Failed to create tar.gz file for {} at path: {}", directoryPath, tarGzPath, e); Utils.rethrowException(e); } return tarGzPath; }
@Transition(from = "ONLINE", to = "OFFLINE") public void onBecomeOfflineFromOnline(Message message, NotificationContext context) { try { LOGGER.info("BrokerResourceOnlineOfflineStateModel.onBecomeOfflineFromOnline() : " + message); String tableName = message.getPartitionName(); _helixExternalViewBasedRouting.markDataResourceOffline(tableName); _tableQueryQuotaManager.dropTableQueryQuota(tableName); } catch (Exception e) { LOGGER.error("Caught exception during ONLINE -> OFFLINE transition", e); Utils.rethrowException(e); throw new AssertionError("Should not reach this"); } }
@Transition(from = "OFFLINE", to = "DROPPED") public void onBecomeDroppedFromOffline(Message message, NotificationContext context) { try { LOGGER.info("BrokerResourceOnlineOfflineStateModel.onBecomeDroppedFromOffline() : " + message); String tableName = message.getPartitionName(); _helixExternalViewBasedRouting.markDataResourceOffline(tableName); _tableQueryQuotaManager.dropTableQueryQuota(tableName); } catch (Exception e) { LOGGER.error("Caught exception during OFFLINE -> DROPPED transition", e); Utils.rethrowException(e); throw new AssertionError("Should not reach this"); } }
@Transition(from = "ONLINE", to = "DROPPED") public void onBecomeDroppedFromOnline(Message message, NotificationContext context) { try { LOGGER.info("BrokerResourceOnlineOfflineStateModel.onBecomeDroppedFromOnline() : " + message); String tableName = message.getPartitionName(); _helixExternalViewBasedRouting.markDataResourceOffline(tableName); _tableQueryQuotaManager.dropTableQueryQuota(tableName); } catch (Exception e) { LOGGER.error("Caught exception during ONLINE -> DROPPED transition", e); Utils.rethrowException(e); throw new AssertionError("Should not reach this"); } }
@Transition(from = "OFFLINE", to = "DROPPED") public void onBecomeDroppedFromOffline(Message message, NotificationContext context) { _logger.info("SegmentOnlineOfflineStateModel.onBecomeDroppedFromOffline() : " + message); String tableNameWithType = message.getResourceName(); String segmentName = message.getPartitionName(); // This method might modify the file on disk. Use segment lock to prevent race condition Lock segmentLock = SegmentLocks.getSegmentLock(tableNameWithType, segmentName); try { segmentLock.lock(); final File segmentDir = new File(_fetcherAndLoader.getSegmentLocalDirectory(tableNameWithType, segmentName)); if (segmentDir.exists()) { FileUtils.deleteQuietly(segmentDir); _logger.info("Deleted segment directory {}", segmentDir); } } catch (final Exception e) { _logger.error("Cannot delete the segment : " + segmentName + " from local directory!\n" + e.getMessage(), e); Utils.rethrowException(e); } finally { segmentLock.unlock(); } }
@Transition(from = "CONSUMING", to = "OFFLINE") public void onBecomeOfflineFromConsuming(Message message, NotificationContext context) { _logger.info("SegmentOnlineOfflineStateModel.onBecomeOfflineFromConsuming() : " + message); String realtimeTableName = message.getResourceName(); String segmentName = message.getPartitionName(); try { _instanceDataManager.removeSegment(realtimeTableName, segmentName); } catch (Exception e) { _logger.error("Caught exception in state transition from CONSUMING -> OFFLINE for resource: {}, partition: {}", realtimeTableName, segmentName, e); Utils.rethrowException(e); } }
@Transition(from = "ONLINE", to = "OFFLINE") public void onBecomeOfflineFromOnline(Message message, NotificationContext context) { _logger.info("SegmentOnlineOfflineStateModel.onBecomeOfflineFromOnline() : " + message); String tableNameWithType = message.getResourceName(); String segmentName = message.getPartitionName(); try { _instanceDataManager.removeSegment(tableNameWithType, segmentName); } catch (Exception e) { _logger.error("Caught exception in state transition from ONLINE -> OFFLINE for resource: {}, partition: {}", tableNameWithType, segmentName, e); Utils.rethrowException(e); } }
/** * Constructs a {@link StreamMessageDecoder} using properties in {@link StreamConfig} and initializes it * @param streamConfig * @param schema * @return */ public static StreamMessageDecoder create(StreamConfig streamConfig, Schema schema) { StreamMessageDecoder decoder = null; String decoderClass = streamConfig.getDecoderClass(); Map<String, String> decoderProperties = streamConfig.getDecoderProperties(); try { decoder = (StreamMessageDecoder) Class.forName(decoderClass).newInstance(); decoder.init(decoderProperties, schema, streamConfig.getTopicName()); } catch (Exception e) { Utils.rethrowException(e); } return decoder; } }
@Override public HelixTaskResult handleMessage() throws InterruptedException { HelixTaskResult result = new HelixTaskResult(); _logger.info("Handling message: {}", _message); try { acquireSema(_segmentName, LOGGER); // The number of retry times depends on the retry count in SegmentOperations. _fetcherAndLoader.addOrReplaceOfflineSegment(_tableNameWithType, _segmentName); result.setSuccess(true); } catch (Exception e) { _metrics.addMeteredTableValue(_tableNameWithType, ServerMeter.REFRESH_FAILURES, 1); Utils.rethrowException(e); } finally { releaseSema(); } return result; }
@Transition(from = "OFFLINE", to = "ONLINE") public void onBecomeOnlineFromOffline(Message message, NotificationContext context) { _logger.info("SegmentOnlineOfflineStateModel.onBecomeOnlineFromOffline() : " + message); String tableNameWithType = message.getResourceName(); String segmentName = message.getPartitionName(); try { TableType tableType = TableNameBuilder.getTableTypeFromTableName(message.getResourceName()); Preconditions.checkNotNull(tableType); if (tableType == TableType.OFFLINE) { _fetcherAndLoader.addOrReplaceOfflineSegment(tableNameWithType, segmentName); } else { _instanceDataManager.addRealtimeSegment(tableNameWithType, segmentName); } } catch (Exception e) { _logger.error("Caught exception in state transition from OFFLINE -> ONLINE for resource: {}, partition: {}", tableNameWithType, segmentName, e); Utils.rethrowException(e); } }