/** * Closes a (potentially null) closeable, swallowing any IOExceptions thrown by * c.close(). The exception will be logged. * @param c can be null */ public static void closeAndSwallowIOExceptions(Closeable c) { if (c == null) { return; } try { c.close(); } catch (IOException e) { LOG.warn("Encountered exception closing closeable", e); } } }
/** * Closes a (potentially null) closeable, swallowing any IOExceptions thrown by * c.close(). The exception will be logged. * @param c can be null */ public static void closeAndSwallowIOExceptions(Closeable c) { if (c == null) { return; } try { c.close(); } catch (IOException e) { LOG.warn("Encountered exception closing closeable", e); } } }
public static void writeMetaDataFile(Configuration configuration, Path outputPath) { if (configuration.getBoolean(ParquetOutputFormat.ENABLE_JOB_SUMMARY, true)) { try { final FileSystem fileSystem = outputPath.getFileSystem(configuration); FileStatus outputStatus = fileSystem.getFileStatus(outputPath); List<Footer> footers = ParquetFileReader.readAllFootersInParallel(configuration, outputStatus); try { ParquetFileWriter.writeMetadataFile(configuration, outputPath, footers); } catch (Exception e) { LOG.warn("could not write summary file for " + outputPath, e); final Path metadataPath = new Path(outputPath, ParquetFileWriter.PARQUET_METADATA_FILE); if (fileSystem.exists(metadataPath)) { fileSystem.delete(metadataPath, true); } } } catch (Exception e) { LOG.warn("could not write summary file for " + outputPath, e); } } }
private static String readFullVersion() { String sha = null; try { String jarPath = getJarPath(); if (jarPath != null) { URL manifestUrl = getResourceFromJar(jarPath, "META-INF/MANIFEST.MF"); if (manifestUrl != null) { Manifest manifest = new Manifest(manifestUrl.openStream()); sha = manifest.getMainAttributes().getValue("git-SHA-1"); } } } catch (Exception e) { LOG.warn("can't read from META-INF", e); } return "parquet-mr" + (VERSION_NUMBER != null ? " version " + VERSION_NUMBER : "") + (sha != null ? " (build " + sha + ")" : ""); }
private static String readFullVersion() { String sha = null; try { String jarPath = getJarPath(); if (jarPath != null) { URL manifestUrl = getResourceFromJar(jarPath, "META-INF/MANIFEST.MF"); if (manifestUrl != null) { Manifest manifest = new Manifest(manifestUrl.openStream()); sha = manifest.getMainAttributes().getValue("git-SHA-1"); } } } catch (Exception e) { LOG.warn("can't read from META-INF", e); } return "parquet-mr" + (VERSION_NUMBER != null ? " version " + VERSION_NUMBER : "") + (sha != null ? " (build " + sha + ")" : ""); }
private static String readVersionNumber() { String version = null; try { String jarPath = getJarPath(); if (jarPath != null) { URL pomPropertiesUrl = getResourceFromJar(jarPath, "META-INF/maven/com.twitter/parquet-column/pom.properties"); if (pomPropertiesUrl != null) { Properties properties = new Properties(); properties.load(pomPropertiesUrl.openStream()); version = properties.getProperty("version"); } } } catch (Exception e) { LOG.warn("can't read from META-INF", e); } return version; }
private static String readVersionNumber() { String version = null; try { String jarPath = getJarPath(); if (jarPath != null) { URL pomPropertiesUrl = getResourceFromJar(jarPath, "META-INF/maven/com.twitter/parquet-column/pom.properties"); if (pomPropertiesUrl != null) { Properties properties = new Properties(); properties.load(pomPropertiesUrl.openStream()); version = properties.getProperty("version"); } } } catch (Exception e) { LOG.warn("can't read from META-INF", e); } return version; }
private CompressionCodecName getHadoopCompressionCodec() { CompressionCodecName codec; try { // find the right codec Class<?> codecClass = getHadoopOutputCompressorClass(CompressionCodecName.UNCOMPRESSED.getHadoopCompressionCodecClass()); if (INFO) LOG.info("Compression set through hadoop codec: " + codecClass.getName()); codec = CompressionCodecName.fromCompressionCodec(codecClass); } catch (CompressionCodecNotSupportedException e) { if (WARN) LOG.warn("codec defined in hadoop config is not supported by parquet [" + e.getCodecClass().getName() + "] and will use UNCOMPRESSED", e); codec = CompressionCodecName.UNCOMPRESSED; } catch (IllegalArgumentException e) { if (WARN) LOG.warn("codec class not found: " + e.getMessage(), e); codec = CompressionCodecName.UNCOMPRESSED; } return codec; }
} else { scale = (double) totalMemoryPool / totalAllocations; LOG.warn(String.format( "Total allocation exceeds %.2f%% (%,d bytes) of heap memory\n" + "Scaling row group sizes to %.2f%% for %d writers",
if (newValue == null || !newValue.isCurrent(key)) { if (Log.WARN) { LOG.warn("Ignoring new cache entry for '" + key + "' because it is " + (newValue == null ? "null" : "not current")); if (oldValue != null && oldValue.isNewerThan(newValue)) { if (Log.WARN) { LOG.warn("Ignoring new cache entry for '" + key + "' because " + "existing cache entry is newer");
private void flushStore() throws IOException { LOG.info(format("Flushing mem store to file. allocated memory: %,d", store.allocatedSize())); if (store.allocatedSize() > 3 * blockSize) { LOG.warn("Too much memory used: " + store.memUsageString()); } w.startBlock(recordCount); store.flush(); pageStore.flushToFileWriter(w); recordCount = 0; w.endBlock(); store = null; pageStore = null; } }
private void flushRowGroupToStore() throws IOException { LOG.info(format("Flushing mem columnStore to file. allocated memory: %,d", columnStore.getAllocatedSize())); if (columnStore.getAllocatedSize() > 3 * (long)rowGroupSizeThreshold) { LOG.warn("Too much memory used: " + columnStore.memUsageString()); } if (recordCount > 0) { parquetFileWriter.startBlock(recordCount); columnStore.flush(); pageStore.flushToFileWriter(parquetFileWriter); recordCount = 0; parquetFileWriter.endBlock(); } columnStore = null; pageStore = null; }
@Override public ResourceStatistics getStatistics(String location, Job job) throws IOException { if (DEBUG) LOG.debug("LoadMetadata.getStatistics(" + location + ", " + job + ")"); /* We need to call setInput since setLocation is not guaranteed to be called before this */ setInput(location, job); long length = 0; try { for (InputSplit split : getParquetInputFormat().getSplits(job)) { length += split.getLength(); } } catch (InterruptedException e) { LOG.warn("Interrupted: ", e); return null; } ResourceStatistics stats = new ResourceStatistics(); // TODO use pig-0.12 setBytes api when its available stats.setmBytes(length / 1024 / 1024); return stats; }
memoryManager = new MemoryManager(maxLoad, minAllocation); } else if (memoryManager.getMemoryPoolRatio() != maxLoad) { LOG.warn("The configuration " + MEMORY_POOL_RATIO + " has been set. It should not " + "be reset by the new value: " + maxLoad);