/** * Constructs a tokenizer splitting on the specified character. * * @param input the string which is to be parsed, not cloned * @param delim the field delimiter character */ public StrTokenizer(final char[] input, final char delim) { this(input); setDelimiterChar(delim); }
/** * Constructs a tokenizer splitting on the specified delimiter character. * * @param input the string which is to be parsed * @param delim the field delimiter character */ public StrTokenizer(final String input, final char delim) { this(input); setDelimiterChar(delim); }
@Test public void test4() { final String input = "a;b; c;\"d;\"\"e\";f; ; ;"; final StrTokenizer tok = new StrTokenizer(input); tok.setDelimiterChar(';'); tok.setQuoteChar('"'); tok.setIgnoredMatcher(StrMatcher.trimMatcher()); tok.setIgnoreEmptyTokens(true); final String tokens[] = tok.getTokenArray(); final String expected[] = new String[]{"a", "b", "c", "d;\"e", "f",}; assertEquals(ArrayUtils.toString(tokens), expected.length, tokens.length); for (int i = 0; i < expected.length; i++) { assertEquals("token[" + i + "] was '" + tokens[i] + "' but was expected to be '" + expected[i] + "'", expected[i], tokens[i]); } }
@Test public void test3() { final String input = "a;b; c;\"d;\"\"e\";f; ; ;"; final StrTokenizer tok = new StrTokenizer(input); tok.setDelimiterChar(';'); tok.setQuoteChar('"'); tok.setIgnoredMatcher(StrMatcher.noneMatcher()); tok.setIgnoreEmptyTokens(false); final String tokens[] = tok.getTokenArray(); final String expected[] = new String[]{"a", "b", " c", "d;\"e", "f", " ", " ", "",}; assertEquals(ArrayUtils.toString(tokens), expected.length, tokens.length); for (int i = 0; i < expected.length; i++) { assertEquals("token[" + i + "] was '" + tokens[i] + "' but was expected to be '" + expected[i] + "'", expected[i], tokens[i]); } }
@Test public void test1() { final String input = "a;b;c;\"d;\"\"e\";f; ; ; "; final StrTokenizer tok = new StrTokenizer(input); tok.setDelimiterChar(';'); tok.setQuoteChar('"'); tok.setIgnoredMatcher(StrMatcher.trimMatcher()); tok.setIgnoreEmptyTokens(false); final String tokens[] = tok.getTokenArray(); final String expected[] = new String[]{"a", "b", "c", "d;\"e", "f", "", "", "",}; assertEquals(ArrayUtils.toString(tokens), expected.length, tokens.length); for (int i = 0; i < expected.length; i++) { assertEquals("token[" + i + "] was '" + tokens[i] + "' but was expected to be '" + expected[i] + "'", expected[i], tokens[i]); } }
@Test public void test2() { final String input = "a;b;c ;\"d;\"\"e\";f; ; ;"; final StrTokenizer tok = new StrTokenizer(input); tok.setDelimiterChar(';'); tok.setQuoteChar('"'); tok.setIgnoredMatcher(StrMatcher.noneMatcher()); tok.setIgnoreEmptyTokens(false); final String tokens[] = tok.getTokenArray(); final String expected[] = new String[]{"a", "b", "c ", "d;\"e", "f", " ", " ", "",}; assertEquals(ArrayUtils.toString(tokens), expected.length, tokens.length); for (int i = 0; i < expected.length; i++) { assertEquals("token[" + i + "] was '" + tokens[i] + "' but was expected to be '" + expected[i] + "'", expected[i], tokens[i]); } }
@Test public void test5() { final String input = "a;b; c;\"d;\"\"e\";f; ; ;"; final StrTokenizer tok = new StrTokenizer(input); tok.setDelimiterChar(';'); tok.setQuoteChar('"'); tok.setIgnoredMatcher(StrMatcher.trimMatcher()); tok.setIgnoreEmptyTokens(false); tok.setEmptyTokenAsNull(true); final String tokens[] = tok.getTokenArray(); final String expected[] = new String[]{"a", "b", "c", "d;\"e", "f", null, null, null,}; assertEquals(ArrayUtils.toString(tokens), expected.length, tokens.length); for (int i = 0; i < expected.length; i++) { assertEquals("token[" + i + "] was '" + tokens[i] + "' but was expected to be '" + expected[i] + "'", expected[i], tokens[i]); } }
@Test public void test6() { final String input = "a;b; c;\"d;\"\"e\";f; ; ;"; final StrTokenizer tok = new StrTokenizer(input); tok.setDelimiterChar(';'); tok.setQuoteChar('"'); tok.setIgnoredMatcher(StrMatcher.trimMatcher()); tok.setIgnoreEmptyTokens(false); // tok.setTreatingEmptyAsNull(true); final String tokens[] = tok.getTokenArray(); final String expected[] = new String[]{"a", "b", " c", "d;\"e", "f", null, null, null,}; int nextCount = 0; while (tok.hasNext()) { tok.next(); nextCount++; } int prevCount = 0; while (tok.hasPrevious()) { tok.previous(); prevCount++; } assertEquals(ArrayUtils.toString(tokens), expected.length, tokens.length); assertTrue("could not cycle through entire token list" + " using the 'hasNext' and 'next' methods", nextCount == expected.length); assertTrue("could not cycle through entire token list" + " using the 'hasPrevious' and 'previous' methods", prevCount == expected.length); }
@Test public void testChaining() { final StrTokenizer tok = new StrTokenizer(); assertEquals(tok, tok.reset()); assertEquals(tok, tok.reset("")); assertEquals(tok, tok.reset(new char[0])); assertEquals(tok, tok.setDelimiterChar(' ')); assertEquals(tok, tok.setDelimiterString(" ")); assertEquals(tok, tok.setDelimiterMatcher(null)); assertEquals(tok, tok.setQuoteChar(' ')); assertEquals(tok, tok.setQuoteMatcher(null)); assertEquals(tok, tok.setIgnoredChar(' ')); assertEquals(tok, tok.setIgnoredMatcher(null)); assertEquals(tok, tok.setTrimmerMatcher(null)); assertEquals(tok, tok.setEmptyTokenAsNull(false)); assertEquals(tok, tok.setIgnoreEmptyTokens(false)); }
/** * Constructs a tokenizer splitting on the specified delimiter character. * * @param input the string which is to be parsed * @param delim the field delimiter character */ public StrTokenizer(final String input, final char delim) { this(input); setDelimiterChar(delim); }
/** * Constructs a tokenizer splitting on the specified delimiter character. * * @param input the string which is to be parsed * @param delim the field delimiter character */ public StrTokenizer(final String input, final char delim) { this(input); setDelimiterChar(delim); }
/** * Constructs a tokenizer splitting on the specified character. * * @param input the string which is to be parsed, not cloned * @param delim the field delimiter character */ public StrTokenizer(final char[] input, final char delim) { this(input); setDelimiterChar(delim); }
/** * Constructs a tokenizer splitting on the specified delimiter character. * * @param input the string which is to be parsed * @param delim the field delimiter character */ public StrTokenizer(final String input, final char delim) { this(input); setDelimiterChar(delim); }
/** * Constructs a tokenizer splitting on the specified character. * * @param input the string which is to be parsed, not cloned * @param delim the field delimiter character */ public StrTokenizer(final char[] input, final char delim) { this(input); setDelimiterChar(delim); }
/** * Constructs a tokenizer splitting on the specified character. * * @param input the string which is to be parsed, not cloned * @param delim the field delimiter character */ public StrTokenizer(final char[] input, final char delim) { this(input); setDelimiterChar(delim); }
// get a csv instance (which is cloned, so we can customize it) StrTokenizer tokenizer = StrTokenizer.getCSVInstance(); // Set delimiter char tokenizer.setDelimiterChar('|'); Scanner scanner = new Scanner(new File("file.psv")); while (scanner.hasNextLine()) { // set the input on the tokenizer tokenizer.reset(scanner.nextLine()); // get the tokens String toks[] = tokenizer.getTokenArray(); }
private ListMultimap<ProcessKey, LoadProfileEvent> readLoadProfileEvents(final Element testplan) throws IOException { ListMultimap<ProcessKey, LoadProfileEvent> eventsByProcess = ArrayListMultimap.create(); String loadProfile = testplan.elementTextTrim("loadProfile"); // relative to testplan File loadProfileConfigFile = new File(new File(testplanFile.getParentFile(), "loadprofiles"), loadProfile); try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(loadProfileConfigFile), "UTF-8"))) { StrTokenizer st = StrTokenizer.getCSVInstance(); st.setDelimiterChar(';'); for (String line = null; (line = br.readLine()) != null;) { // ignore line that are blank, commented out, or represent markers if (isBlank(line) || startsWith(line, "#") || MARKER_PATTERN.matcher(line).matches()) { continue; } st.reset(line); String[] tokens = st.getTokenArray(); long startTime = Long.parseLong(tokens[0]); String operation = tokens[1]; String target = tokens[2]; int daemonId = Integer.parseInt(tokens[3]); int processId = Integer.parseInt(tokens[4]); eventsByProcess.put(new ProcessKey(daemonId, processId), new LoadProfileEvent(startTime, operation, target, daemonId, processId)); } } return eventsByProcess; }
private ListMultimap<ProcessKey, LoadProfileEvent> readLoadProfileEvents(final Element testplan) throws IOException { ListMultimap<ProcessKey, LoadProfileEvent> eventsByProcess = ArrayListMultimap.create(); String loadProfile = testplan.elementTextTrim("loadProfile"); // relative to testplan File loadProfileConfigFile = new File(new File(testplanFile.getParentFile(), "loadprofiles"), loadProfile); try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(loadProfileConfigFile), "UTF-8"))) { StrTokenizer st = StrTokenizer.getCSVInstance(); st.setDelimiterChar(';'); for (String line = null; (line = br.readLine()) != null;) { // ignore line that are blank, commented out, or represent markers if (isBlank(line) || startsWith(line, "#") || MARKER_PATTERN.matcher(line).matches()) { continue; } st.reset(line); String[] tokens = st.getTokenArray(); long startTime = Long.parseLong(tokens[0]); String operation = tokens[1]; String target = tokens[2]; int daemonId = Integer.parseInt(tokens[3]); int processId = Integer.parseInt(tokens[4]); eventsByProcess.put(new ProcessKey(daemonId, processId), new LoadProfileEvent(startTime, operation, target, daemonId, processId)); } } return eventsByProcess; }
.setDelimiterChar('\n') .setIgnoreEmptyTokens(false) .getTokenArray()
.setDelimiterChar('\n') .setIgnoreEmptyTokens(false) .getTokenArray()