Refine search
.withHeader() .withDelimiter(delimiter) .withQuote(quote) .withRecordSeparator(recordSeparator); try (final CSVParser parser = new CSVParser(reader, format)) { ThreadMethods.throttledExecution(StreamMethods.enumerate(StreamMethods.stream(parser.spliterator(), false)).skip(skip).limit(limit), e -> { Integer rId = e.getKey(); CSVRecord row = e.getValue(); if (!row.isConsistent()) { logger.warn("WARNING: Skipping row {} because its size does not match the header size.", row.getRecordNumber()); TypeInference.DataType dataType = entry.getValue(); Object value = TypeInference.DataType.parse(row.get(column), dataType); //parse the string value according to the DataType if (yVariable != null && yVariable.equals(column)) { y = value;
public static String[] parseAsCsv(String key, String value, Function<String, String> valueProcessor) { String cleanValue = MultivalueProperty.trimFieldsAndRemoveEmptyFields(value); List<String> result = new ArrayList<>(); try (CSVParser csvParser = CSVFormat.RFC4180 .withHeader((String) null) .withIgnoreEmptyLines() .withIgnoreSurroundingSpaces() .parse(new StringReader(cleanValue))) { List<CSVRecord> records = csvParser.getRecords(); if (records.isEmpty()) { return ArrayUtils.EMPTY_STRING_ARRAY; } processRecords(result, records, valueProcessor); return result.toArray(new String[result.size()]); } catch (IOException e) { throw new IllegalStateException("Property: '" + key + "' doesn't contain a valid CSV value: '" + value + "'", e); } }
private static String[] doParseDelimited(String delimited, CSVFormat format) { try (CSVParser parser = CSVParser.parse(delimited, format)) { Iterator<CSVRecord> records = parser.iterator(); return records.hasNext() ? StreamSupport.stream(records.next().spliterator(), false).toArray(String[]::new) : EMPTY_STRING; } catch (IOException e) { throw new IllegalStateException(e); // Can't happen } }
@Override public List<Object> deserialize(ByteBuffer ser) { try { String data = new String(Utils.toByteArray(ser), StandardCharsets.UTF_8); CSVParser parser = CSVParser.parse(data, CSVFormat.RFC4180); CSVRecord record = parser.getRecords().get(0); Preconditions.checkArgument(record.size() == fieldNames.size(), "Invalid schema"); ArrayList<Object> list = new ArrayList<>(fieldNames.size()); for (int i = 0; i < record.size(); i++) { list.add(record.get(i)); } return list; } catch (IOException e) { throw new RuntimeException(e); } }
@Override public RecordSchema getSchema(Map<String, String> variables, final InputStream contentStream, final RecordSchema readSchema) throws SchemaNotFoundException { if (this.context == null) { throw new SchemaNotFoundException("Schema Access Strategy intended only for validation purposes and cannot obtain schema"); } try { final CSVFormat csvFormat = CSVUtils.createCSVFormat(context).withFirstRecordAsHeader(); try (final Reader reader = new InputStreamReader(new BOMInputStream(contentStream)); final CSVParser csvParser = new CSVParser(reader, csvFormat)) { final List<RecordField> fields = new ArrayList<>(); for (final String columnName : csvParser.getHeaderMap().keySet()) { fields.add(new RecordField(columnName, RecordFieldType.STRING.getDataType(), true)); } return new SimpleRecordSchema(fields); } } catch (final Exception e) { throw new SchemaNotFoundException("Failed to read Header line from CSV", e); } }
.unwrap(PhoenixConnection.class); PhoenixRuntime.executeStatements(conn, new StringReader(statements), null); ENCAPSULATED_CHARS_TABLE, Collections.<String> emptyList(), true); csvUtil.upsert(new StringReader( CSV_VALUES_ENCAPSULATED_CONTROL_CHARS_WITH_HEADER)); + ENCAPSULATED_CHARS_TABLE); ResultSet phoenixResultSet = statement.executeQuery(); parser = new CSVParser(new StringReader( CSV_VALUES_ENCAPSULATED_CONTROL_CHARS_WITH_HEADER), csvUtil.getFormat()); } finally { if (parser != null) parser.close(); if (conn != null) conn.close();
.unwrap(PhoenixConnection.class); PhoenixRuntime.executeStatements(conn, new StringReader(statements), null); true); try { csvUtil.upsert(new StringReader( CSV_VALUES_BAD_ENCAPSULATED_CONTROL_CHARS_WITH_HEADER)); fail(); parser.close(); if (conn != null) conn.close();
public CSVRecord parse(String input) throws IOException { CSVParser csvParser = new CSVParser(new StringReader(input), this.csvFormat); return Iterables.getFirst(csvParser, null); } }
try (CSVParser parser = new CSVParser(new StringReader(content), getCSVFormat().withHeader())) { Set<String> columns = parser.getHeaderMap().keySet(); Map<String, Field> fieldMap = getFieldMap(objectType); for (String column : columns) { Field field = fieldMap.get(caseSensitiveFieldNames ? column : column.toLowerCase()); String value = record.get(column); Object object = objectFromString(value, field.getType()); field.set(o, object);
String contents = IOUtils.toString(reader); ourLog.info("File contents for: {}\n{}", nextFilename, contents); reader = new StringReader(contents); CSVFormat format = CSVFormat.newFormat(theDelimiter).withFirstRecordAsHeader(); if (theQuoteMode != null) { format = format.withQuote('"').withQuoteMode(theQuoteMode); parsed = new CSVParser(reader, format); Iterator<CSVRecord> iter = parsed.iterator(); ourLog.debug("Header map: {}", parsed.getHeaderMap()); while (iter.hasNext()) { CSVRecord nextRecord = iter.next(); if (nextRecord.isConsistent()==false) { continue;
@Test public void testParseCustomNullValues() throws IOException { final StringWriter sw = new StringWriter(); final CSVFormat format = CSVFormat.DEFAULT.withNullString("NULL"); try (final CSVPrinter printer = new CSVPrinter(sw, format)) { printer.printRecord("a", null, "b"); } final String csvString = sw.toString(); assertEquals("a,NULL,b" + recordSeparator, csvString); try (final CSVParser iterable = format.parse(new StringReader(csvString))) { final Iterator<CSVRecord> iterator = iterable.iterator(); final CSVRecord record = iterator.next(); assertEquals("a", record.get(0)); assertEquals(null, record.get(1)); assertEquals("b", record.get(2)); assertFalse(iterator.hasNext()); } }
.unwrap(PhoenixConnection.class); PhoenixRuntime.executeStatements(conn, new StringReader(statements), null); csvUtil.upsert(new StringReader(STOCK_CSV_VALUES)); + stockTableName); ResultSet phoenixResultSet = statement.executeQuery(); parser = new CSVParser(new StringReader(STOCK_CSV_VALUES), csvUtil.getFormat()); for (CSVRecord record : parser) { assertTrue(phoenixResultSet.next()); assertEquals(record.get(0), phoenixResultSet.getString(1)); assertNull(phoenixResultSet.getString(2)); } finally { if (parser != null) parser.close(); if (conn != null) conn.close();
private List<T> loadValues() { try { final String load = persistService.load(persistKey, "", Optional::of); Reader in = new StringReader(load); return new CSVParser(in, CSVFormat.DEFAULT).getRecords() .stream() .map(v -> v.get(0)) .map(deserializer) .collect(toList()); } catch (IOException e) { LOGGER.error("Cant read suggestion from persistence for key: " + persistKey); return new ArrayList<>(); } }
public List<StringMap> records(String input) { try { CSVParser parser = CSVFormat.RFC4180 .withHeader(header.toArray(new String[]{})) .parse(new StringReader(input)); return parser.getRecords().stream() .map(record -> new StringMap(record.toMap())) .collect(Collectors.toList()); } catch (IOException e) { String message = String.format("OOPS I couldn't parse the csv line! '%s'", input); throw new RuntimeException(message); } } }
protected CSVRecord getListFileCsvRecord(String listFileLine) { try { return CSVFormat.EXCEL.parse(new StringReader(listFileLine)).iterator().next(); } catch (IOException e) { throw new RuntimeException("error parsing list file CSV record: " + listFileLine, e); } }
try (CSVParser csvParser = new CSVParser(reader, CSVFormat.TDF)) { Iterator<CSVRecord> iterator = csvParser.iterator(); String field = record.get(0); if ((field.toUpperCase(Locale.ENGLISH).equals(field)) && (record.size() == 1)) { investigationSection = Arrays.asList(sections).contains(field); studySection = (studyFileName != null) && (field.equals(studySectionField)); break; String value = record.get(1); map.put(field, value); studyTarget = (field.equals(studyFileNameField)) && (value.equals(studyFileName));
InputStream gzipStream = new GZIPInputStream(fileStream); Reader decoder = new InputStreamReader(gzipStream); csvParser = new CSVParser(decoder, CSVFormat.DEFAULT.withHeader()); } else { File csvFile = new File(conf.getString(MacroBaseConf.CSV_INPUT_FILE)); csvParser = CSVParser.parse(csvFile, Charset.defaultCharset(), CSVFormat.DEFAULT.withHeader()); schema = csvParser.getHeaderMap(); Iterator<CSVRecord> rawIterator = csvParser.iterator(); int rowCount = 0; columnValues.add(new ColumnValue(se.getKey(),record.get(se.getValue())));
@Override public Map<String, String> next() throws IOException { if (csvParser == null) { csvParser = CSVFormat.DEFAULT.withHeader().withDelimiter(delimiter).parse(inputStream); iterator = csvParser.iterator(); } if (!iterator.hasNext()) { return null; } CSVRecord csvRecord = iterator.next(); return csvRecord.toMap(); }
(double) totalHunspellComputationTime.get() / numMatches.get()))); SuggestionsOrdererConfig.setNgramsPath(args[1]); try (CSVParser parser = new CSVParser(new FileReader(args[0]), CSVFormat.DEFAULT.withFirstRecordAsHeader())) { for (CSVRecord record : parser) { String lang = record.get("language"); String covered = record.get("covered"); String replacement = record.get("replacement"); String sentenceStr = record.get("sentence");