public void updateConfiguration(Configuration config) { copy(resourcesConfiguration, config); // this is to prevent dfs client from doing reverse DNS lookups to determine whether nodes are rack local config.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, NoOpDNSToSwitchMapping.class, DNSToSwitchMapping.class); if (socksProxy != null) { config.setClass(HADOOP_RPC_SOCKET_FACTORY_CLASS_DEFAULT_KEY, SocksSocketFactory.class, SocketFactory.class); config.set(HADOOP_SOCKS_SERVER_KEY, socksProxy.toString()); } if (domainSocketPath != null) { config.setStrings(DFS_DOMAIN_SOCKET_PATH_KEY, domainSocketPath); } // only enable short circuit reads if domain socket path is properly configured if (!config.get(DFS_DOMAIN_SOCKET_PATH_KEY, "").trim().isEmpty()) { config.setBooleanIfUnset(DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true); } config.setInt(DFS_CLIENT_SOCKET_TIMEOUT_KEY, toIntExact(dfsTimeout.toMillis())); config.setInt(IPC_PING_INTERVAL_KEY, toIntExact(ipcPingInterval.toMillis())); config.setInt(IPC_CLIENT_CONNECT_TIMEOUT_KEY, toIntExact(dfsConnectTimeout.toMillis())); config.setInt(IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, dfsConnectMaxRetries); if (isHdfsWireEncryptionEnabled) { config.set(HADOOP_RPC_PROTECTION, "privacy"); config.setBoolean("dfs.encrypt.data.transfer", true); } config.setInt("fs.cache.max-size", fileSystemMaxCacheSize); config.setInt(LineRecordReader.MAX_LINE_LENGTH, textMaxLineLength); configureCompression(config, compressionCodec); s3ConfigurationUpdater.updateConfiguration(config); }
public RecordFormatWriter(File targetFile, List<String> columnNames, List<Type> columnTypes, HiveCompressionCodec compressionCodec, HiveStorageFormat format, ConnectorSession session) { JobConf config = new JobConf(conf); configureCompression(config, compressionCodec); recordWriter = new RecordFileWriter( new Path(targetFile.toURI()), columnNames, fromHiveStorageFormat(format), createSchema(format, columnNames, columnTypes), format.getEstimatedWriterSystemMemoryUsage(), config, TYPE_MANAGER, session); }
configureCompression(jobConf, compressionCodec);
configureCompression(jobConf, compressionCodec);
configureCompression(config, compressionCodec);
public RecordFormatWriter(File targetFile, List<String> columnNames, List<Type> columnTypes, HiveCompressionCodec compressionCodec, HiveStorageFormat format, ConnectorSession session) { JobConf config = new JobConf(conf); configureCompression(config, compressionCodec); recordWriter = new RecordFileWriter( new Path(targetFile.toURI()), columnNames, fromHiveStorageFormat(format), createSchema(format, columnNames, columnTypes), format.getEstimatedWriterSystemMemoryUsage(), config, TYPE_MANAGER, session); }
public RecordFormatWriter(File targetFile, List<String> columnNames, List<Type> columnTypes, HiveCompressionCodec compressionCodec, HiveStorageFormat format) { JobConf config = new JobConf(conf); configureCompression(config, compressionCodec); List<DataColumn> dataColumns = new ArrayList<>(columnNames.size()); for (int i = 0; i < columnNames.size(); i++) { dataColumns.add(new DataColumn(columnNames.get(i), columnTypes.get(i), HiveType.toHiveType(columnTypes.get(i)))); } recordWriter = new HiveRecordWriter( "test_schema", "test_table", null, compressionCodec != HiveCompressionCodec.NONE, true, dataColumns, format.getOutputFormat(), format.getSerDe(), createSchema(format, columnNames, columnTypes), targetFile.getName(), targetFile.getParent(), targetFile.toString(), TYPE_MANAGER, config); }
configureCompression(jobConf, compressionCodec);
configureCompression(jobConf, compressionCodec);