Codota Logo
CsvOutputFormat.configureRecordFormat
Code IndexAdd Codota to your IDE (free)

How to use
configureRecordFormat
method
in
eu.stratosphere.api.java.record.io.CsvOutputFormat

Best Java code snippets using eu.stratosphere.api.java.record.io.CsvOutputFormat.configureRecordFormat (Showing top 20 results out of 315)

  • Add the Codota plugin to your IDE and get smart completions
private void myMethod () {
ScheduledThreadPoolExecutor s =
  • Codota Iconnew ScheduledThreadPoolExecutor(corePoolSize)
  • Codota IconThreadFactory threadFactory;new ScheduledThreadPoolExecutor(corePoolSize, threadFactory)
  • Codota IconString str;new ScheduledThreadPoolExecutor(1, new ThreadFactoryBuilder().setNameFormat(str).build())
  • Smart code suggestions by Codota
}
origin: stratosphere/stratosphere

  .build();
FileDataSink out = new FileDataSink(new CsvOutputFormat(), output, reducer, "Word Counts");
CsvOutputFormat.configureRecordFormat(out)
  .recordDelimiter('\n')
  .fieldDelimiter(' ')
origin: stratosphere/stratosphere

public Plan getPlan(int numSubTasks, String output) {
  List<Object> tmp = new ArrayList<Object>();
  int pos = 0;
  for (String s : WordCountData.COUNTS.split("\n")) {
    List<Object> tmpInner = new ArrayList<Object>();
    tmpInner.add(pos++);
    tmpInner.add(Integer.parseInt(s.split(" ")[1]));
    tmp.add(tmpInner);
  }
  // test serializable iterator input, the input record is {id, word}
  CollectionDataSource source = new CollectionDataSource(new SerializableIteratorTest(), "test_iterator");
  // test collection input, the input record is {id, count}
  CollectionDataSource source2 = new CollectionDataSource(tmp, "test_collection");
  JoinOperator join = JoinOperator.builder(Join.class, IntValue.class, 0, 0)
    .input1(source).input2(source2).build();
  FileDataSink out = new FileDataSink(new CsvOutputFormat(), output, join, "Collection Join");
  CsvOutputFormat.configureRecordFormat(out)
    .recordDelimiter('\n')
    .fieldDelimiter(' ')
    .field(StringValue.class, 0)
    .field(IntValue.class, 1);
  Plan plan = new Plan(out, "CollectionDataSource");
  plan.setDefaultParallelism(numSubTasks);
  return plan;
}
origin: stratosphere/stratosphere

@Override
public Plan getPlan(String... args) throws IllegalArgumentException {
  // parse program parameters
  int numSubtasks       = (args.length > 0 ? Integer.parseInt(args[0]) : 1);
  String recordsPath    = (args.length > 1 ? args[1] : "");
  String output        = (args.length > 2 ? args[2] : "");
  
  FileDataSource source = new FileDataSource(CsvInputFormat.class, recordsPath);
  source.setDegreeOfParallelism(numSubtasks);
  CsvInputFormat.configureRecordFormat(source)
    .recordDelimiter('\n')
    .fieldDelimiter('|')
    .field(IntValue.class, 0);
  
  FileDataSink sink =
    new FileDataSink(CsvOutputFormat.class, output);
  sink.setDegreeOfParallelism(numSubtasks);
  CsvOutputFormat.configureRecordFormat(sink)
    .recordDelimiter('\n')
    .fieldDelimiter('|')
    .lenient(true)
    .field(IntValue.class, 0);
  
  sink.setGlobalOrder(new Ordering(0, IntValue.class, Order.ASCENDING), new UniformIntegerDistribution(Integer.MIN_VALUE, Integer.MAX_VALUE));
  sink.setInput(source);
  
  return new Plan(sink);
}

origin: stratosphere/stratosphere

@Override
public Plan getPlan(String... args) {
  int numSubTasks = (args.length > 0 ? Integer.parseInt(args[0]) : 1);
  String dataInput = (args.length > 1 ? args[1] : "");
  String output = (args.length > 2 ? args[2] : "");
  FileDataSource source = new FileDataSource(new TextInputFormat(), dataInput, "Input Lines");
  MapOperator mapper = MapOperator.builder(new TokenizeLine()).input(source).name("Tokenize Lines").build();
  
  ReduceOperator reducer = ReduceOperator.builder(CountWords.class, StringValue.class, 0).input(mapper)
      .name("Count Words").build();
  
  FileDataSink out = new FileDataSink(new CsvOutputFormat(), output, reducer, "Word Counts");
  
  CsvOutputFormat.configureRecordFormat(out).recordDelimiter('\n')
      .fieldDelimiter(' ').field(StringValue.class, 0)
      .field(IntValue.class, 1);
  Plan plan = new Plan(out, "WordCount Example");
  plan.setDefaultParallelism(numSubTasks);
  return plan;
}
origin: stratosphere/stratosphere

@Override
public Plan getPlan(String... args) {
  
  // parse job parameters
  int numSubTasks = (args.length > 0 ? Integer.parseInt(args[0]) : 1);
  String dataInput = (args.length > 1 ? args[1] : "");
  String output = (args.length > 2 ? args[2] : "");
  @SuppressWarnings("unchecked")
  CsvInputFormat format = new CsvInputFormat(' ', IntValue.class, IntValue.class);
  FileDataSource input = new FileDataSource(format, dataInput, "Input");
  
  // create the reduce contract and sets the key to the first field
  ReduceOperator sorter = ReduceOperator.builder(new IdentityReducer(), IntValue.class, 0)
    .input(input)
    .name("Reducer")
    .build();
  // sets the group sorting to the second field
  sorter.setGroupOrder(new Ordering(1, IntValue.class, Order.ASCENDING));
  // create and configure the output format
  FileDataSink out = new FileDataSink(new CsvOutputFormat(), output, sorter, "Sorted Output");
  CsvOutputFormat.configureRecordFormat(out)
    .recordDelimiter('\n')
    .fieldDelimiter(' ')
    .field(IntValue.class, 0)
    .field(IntValue.class, 1);
  
  Plan plan = new Plan(out, "SecondarySort Example");
  plan.setDefaultParallelism(numSubTasks);
  return plan;
}
origin: stratosphere/stratosphere

CsvOutputFormat.configureRecordFormat(sink)
  .recordDelimiter('\n')
  .fieldDelimiter(',')
origin: stratosphere/stratosphere

public Plan getPlan(int numSubTasks, String dataInput, String output) {
  // input is {word, count} pair
  FileDataSource source = new FileDataSource(new TextInputFormat(), dataInput, "Input Lines");
  //do a selection using cached file
  MapOperator mapper = MapOperator.builder(new TokenizeLine())
    .input(source)
    .name("Tokenize Lines")
    .build();
  FileDataSink out = new FileDataSink(new CsvOutputFormat(), output, mapper, "Selection");
  CsvOutputFormat.configureRecordFormat(out)
    .recordDelimiter('\n')
    .fieldDelimiter(' ')
    .field(StringValue.class, 0)
    .field(IntValue.class, 1);
  Plan plan = new Plan(out, "Distributed Cache");
  plan.setDefaultParallelism(numSubTasks);
  return plan;
}
origin: stratosphere/stratosphere

@Override
protected Plan getTestJob() {
  
  int dop = this.config.getInteger("GroupOrderTest#NumSubtasks", 1);
  
  @SuppressWarnings("unchecked")
  CsvInputFormat format = new CsvInputFormat(',', IntValue.class, IntValue.class);
  FileDataSource source = new FileDataSource(format, this.textPath, "Source");
  
  ReduceOperator reducer = ReduceOperator.builder(CheckingReducer.class)
    .keyField(IntValue.class, 0)
    .input(source)
    .name("Ordered Reducer")
    .build();
  reducer.setGroupOrder(new Ordering(1, IntValue.class, Order.ASCENDING));
  
  FileDataSink sink = new FileDataSink(CsvOutputFormat.class, this.resultPath, reducer, "Sink");
  CsvOutputFormat.configureRecordFormat(sink)
    .recordDelimiter('\n')
    .fieldDelimiter(',')
    .field(IntValue.class, 0)
    .field(IntValue.class, 1);
  
  Plan p = new Plan(sink);
  p.setDefaultParallelism(dop);
  return p;
}
origin: stratosphere/stratosphere

CsvOutputFormat.configureRecordFormat(result)
  .recordDelimiter('\n')
  .fieldDelimiter('|')
origin: stratosphere/stratosphere

private static Plan getTestPlanPlan(int numSubTasks, String input, String output) {
  FileDataSource initialInput = new FileDataSource(TextInputFormat.class, input, "input");
  
  BulkIteration iteration = new BulkIteration("Loop");
  iteration.setInput(initialInput);
  iteration.setMaximumNumberOfIterations(5);
  
  Assert.assertTrue(iteration.getMaximumNumberOfIterations() > 1);
  ReduceOperator sumReduce = ReduceOperator.builder(new PickOneReducer())
      .input(iteration.getPartialSolution())
      .name("Compute sum (Reduce)")
      .build();
  
  iteration.setNextPartialSolution(sumReduce);
  FileDataSink finalResult = new FileDataSink(CsvOutputFormat.class, output, iteration, "Output");
  CsvOutputFormat.configureRecordFormat(finalResult)
    .recordDelimiter('\n')
    .fieldDelimiter(' ')
    .field(StringValue.class, 0);
  Plan plan = new Plan(finalResult, "Iteration with AllReducer (keyless Reducer)");
  
  plan.setDefaultParallelism(numSubTasks);
  Assert.assertTrue(plan.getDefaultParallelism() > 1);
  
  return plan;
}

origin: stratosphere/stratosphere

CsvOutputFormat.configureRecordFormat(triangles)
  .recordDelimiter('\n')
  .fieldDelimiter(' ')
origin: stratosphere/stratosphere

CsvOutputFormat.configureRecordFormat(sink)
  .recordDelimiter('\n')
  .fieldDelimiter(' ')
origin: stratosphere/stratosphere

CsvOutputFormat.configureRecordFormat(result)
  .recordDelimiter('\n')
  .fieldDelimiter(' ')
origin: stratosphere/stratosphere

CsvOutputFormat.configureRecordFormat(result)
  .recordDelimiter('\n')
  .fieldDelimiter(' ')
origin: stratosphere/stratosphere

CsvOutputFormat.configureRecordFormat(finalResult)
  .recordDelimiter('\n')
  .fieldDelimiter(' ')
origin: stratosphere/stratosphere

CsvOutputFormat.configureRecordFormat(finalResult)
  .recordDelimiter('\n')
  .fieldDelimiter(' ')
origin: stratosphere/stratosphere

  .build();
FileDataSink out = new FileDataSink(new CsvOutputFormat(), OUT_FILE, reduceNode, "Word Counts");
CsvOutputFormat.configureRecordFormat(out)
  .recordDelimiter('\n')
  .fieldDelimiter(' ')
origin: stratosphere/stratosphere

CsvOutputFormat.configureRecordFormat(result)
  .recordDelimiter('\n')
  .fieldDelimiter(' ')
origin: stratosphere/stratosphere

CsvOutputFormat.configureRecordFormat(result)
  .recordDelimiter('\n')
  .fieldDelimiter(' ')
origin: stratosphere/stratosphere

CsvOutputFormat.configureRecordFormat(result)
  .recordDelimiter('\n')
  .fieldDelimiter(' ')
eu.stratosphere.api.java.record.ioCsvOutputFormatconfigureRecordFormat

Javadoc

Creates a configuration builder that can be used to set the input format's parameters to the config in a fluent fashion.

Popular methods of CsvOutputFormat

  • <init>
    Creates an instance of CsvOutputFormat. The position of the fields in the record is determined by th
  • setOutputFilePath
  • setTypes

Popular in Java

  • Making http requests using okhttp
  • orElseThrow (Optional)
    Return the contained value, if present, otherwise throw an exception to be created by the provided s
  • getSupportFragmentManager (FragmentActivity)
    Return the FragmentManager for interacting with fragments associated with this activity.
  • setRequestProperty (URLConnection)
    Sets the general request property. If a property with the key already exists, overwrite its value wi
  • HttpServer (com.sun.net.httpserver)
    This class implements a simple HTTP server. A HttpServer is bound to an IP address and port number a
  • Proxy (java.net)
    This class represents proxy server settings. A created instance of Proxy stores a type and an addres
  • Socket (java.net)
    Provides a client-side TCP socket.
  • Arrays (java.util)
    This class contains various methods for manipulating arrays (such as sorting and searching). This cl
  • Locale (java.util)
    A Locale object represents a specific geographical, political, or cultural region. An operation that
  • ServletException (javax.servlet)
    Defines a general exception a servlet can throw when it encounters difficulty.
Codota Logo
  • Products

    Search for Java codeSearch for JavaScript codeEnterprise
  • IDE Plugins

    IntelliJ IDEAWebStormAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogCodota Academy Plugin user guide Terms of usePrivacy policyJava Code IndexJavascript Code Index
Get Codota for your IDE now