Codota Logo
org.datacleaner.server
Code IndexAdd Codota to your IDE (free)

How to use org.datacleaner.server

Best Java code snippets using org.datacleaner.server (Showing top 18 results out of 315)

  • Add the Codota plugin to your IDE and get smart completions
private void myMethod () {
Point p =
  • Codota Iconnew Point(x, y)
  • Codota Iconnew Point()
  • Codota IconMouseEvent e;e.getPoint()
  • Smart code suggestions by Codota
}
origin: datacleaner/DataCleaner

/**
 * Determines if the configuration directories specified by YARN_CONF_DIR
 * and/or HADOOP_CONF_DIR are set or not.
 *
 * @return
 */
public static boolean isConfigurationDirectoriesSpecified() {
  return getConfigurationDirectories().length > 0;
}
origin: datacleaner/DataCleaner

public HadoopResource(final URI uri, final HadoopClusterInformation defaultCluster) {
  this(uri, defaultCluster.getConfiguration(), defaultCluster.getName());
}
origin: datacleaner/DataCleaner

  @Override
  public Configuration getConfiguration() {
    try {
      return super.getConfiguration();
    } catch (final IllegalStateException e) {
      if (getDirectories().length == 0) {
        throw new IllegalStateException(
            "None of the standard Hadoop environment variables (HADOOP_CONF_DIR, YARN_CONF_DIR) has been set.",
            e);
      } else {
        throw e;
      }
    }
  }
}
origin: datacleaner/DataCleaner

private ServerInformation createHadoopClusterInformation(final HadoopClusterType hadoopClusterType,
    final String name, final String description) {
  final ServerInformation serverInformation;
  if (hadoopClusterType.getEnvironmentConfigured() != null) {
    serverInformation = new EnvironmentBasedHadoopClusterInformation(name, description);
  } else if (hadoopClusterType.getDirectories() != null) {
    final List<String> directoryList = hadoopClusterType.getDirectories().getDirectory();
    // TODO: Variable-thingy
    final String[] directories = directoryList.toArray(new String[directoryList.size()]);
    serverInformation = new DirectoryBasedHadoopClusterInformation(name, description, directories);
  } else if (hadoopClusterType.getNamenodeUrl() != null) {
    serverInformation = new DirectConnectionHadoopClusterInformation(name, description,
        URI.create(hadoopClusterType.getNamenodeUrl()));
  } else {
    throw new UnsupportedOperationException("Unsupported hadoop cluster configuration method");
  }
  return serverInformation;
}
origin: org.eobjects.datacleaner/DataCleaner-monitor-csv-datastore-wizard

    throw new DCUserInputException("The Hadoop path does not exist");
  final EnvironmentBasedHadoopClusterInformation environmentBasedHadoopClusterInformation = new EnvironmentBasedHadoopClusterInformation(
      "default", HadoopResource.DEFAULT_CLUSTERREFERENCE);
  if (!EnvironmentBasedHadoopClusterInformation.isConfigurationDirectoriesSpecified()) {
    throw new DCUserInputException("HADOOP_CONF_DIR or/and SPARK_CONF_DIR are not defined");
  logger.debug("Environment variable is", environmentBasedHadoopClusterInformation.getDescription());
  resource = new HadoopResource(uri, environmentBasedHadoopClusterInformation.getConfiguration(),
      HadoopResource.DEFAULT_CLUSTERREFERENCE);
} else {
origin: datacleaner/DataCleaner

final Element hadoopClusterElement = getDocument().createElement("hadoop-cluster");
hadoopClusterElement.setAttribute("name", hadoopClusterInformation.getName());
final String description = hadoopClusterInformation.getDescription();
if (!Strings.isNullOrEmpty(description)) {
  hadoopClusterElement.setAttribute("description", description);
      ((DirectConnectionHadoopClusterInformation) hadoopClusterInformation).getNameNodeUri().toString());
} else if (hadoopClusterInformation instanceof EnvironmentBasedHadoopClusterInformation) {
  appendElement(hadoopClusterElement, "environment-configured", "");
  final Element directoriesElement = getDocument().createElement("directories");
  hadoopClusterElement.appendChild(directoriesElement);
  for (final String directory : directoryBasedHadoopClusterInformation.getDirectories()) {
    appendElement(directoriesElement, "directory", directory);
origin: datacleaner/DataCleaner

  final URI nameNodeUri = new URI(_fileSystemURITextField.getText().trim());
  final DirectConnectionHadoopClusterInformation newServer =
      new DirectConnectionHadoopClusterInformation(_nameTextField.getText(),
          _descriptionTextField.getText(), nameNodeUri);
  _savedServer = newServer;
_nameTextField.setText(directConnection.getName());
_nameTextField.setEnabled(false);
_fileSystemURITextField.setText(directConnection.getNameNodeUri().toString());
final String description = directConnection.getDescription();
if (description != null) {
  _descriptionTextField.setText(description);
origin: datacleaner/DataCleaner

public ServerInformationCatalogImpl(final ServerInformation... servers) {
  final List<ServerInformation> serversList = new ArrayList<>();
  Collections.addAll(serversList, servers);
  _servers = serversList;
  try {
    if (!containsServer(HadoopResource.DEFAULT_CLUSTERREFERENCE)) {
      final EnvironmentBasedHadoopClusterInformation environmentBasedHadoopClusterInformation =
          new EnvironmentBasedHadoopClusterInformation(HadoopResource.DEFAULT_CLUSTERREFERENCE, null);
      if (environmentBasedHadoopClusterInformation.getDirectories().length > 0) {
        serversList.add(0, environmentBasedHadoopClusterInformation);
      }
    }
  } catch (final IllegalStateException e) {
    logger.info("No Hadoop environment variables, skipping default server");
  }
}
origin: datacleaner/DataCleaner

private List<DirectoryPathPanel> getDirectoriesListPanel(final JPanel parent) {
  _pathPanels = new ArrayList<>();
  if (_server != null) {
    final String[] directories = _server.getDirectories();
    if (directories != null) {
      for (final String directory : directories) {
        final DirectoryPathPanel directoryPanel = new DirectoryPathPanel(new File(directory), parent);
        _pathPanels.add(directoryPanel);
      }
    } else {
      _pathPanels.add(new DirectoryPathPanel(null, parent));
    }
  } else {
    _pathPanels.add(new DirectoryPathPanel(null, parent));
  }
  return _pathPanels;
}
origin: datacleaner/DataCleaner

if (EnvironmentBasedHadoopClusterInformation.isConfigurationDirectoriesSpecified()) {
  selectedServer = serverNames[0];
      (HadoopClusterInformation) serverInformationCatalog.getServer(selectedServer);
  final HdfsResource resource =
      new HadoopResource(selectedFile, server.getConfiguration(), selectedServer);
  final Datastore datastore = createAndAddDatastore(resource);
  _datastoreSelectListener.datastoreSelected(datastore);
origin: datacleaner/DataCleaner

private ServerInformationCatalog createServerInformationCatalog(final ServersType serversType,
    final DataCleanerConfigurationImpl temporaryConfiguration,
    final TemporaryMutableDataCleanerEnvironment temporaryEnvironment) {
  if (serversType == null) {
    return temporaryConfiguration.getServerInformationCatalog();
  }
  final Map<String, ServerInformation> servers = new HashMap<>();
  final List<HadoopClusterType> hadoopClusterTypes = serversType.getHadoopClusters().getHadoopCluster();
  for (final HadoopClusterType hadoopClusterType : hadoopClusterTypes) {
    final String name = hadoopClusterType.getName();
    checkName(name, ServerInformation.class, servers);
    final String description = hadoopClusterType.getDescription();
    final ServerInformation serverInformation =
        createHadoopClusterInformation(hadoopClusterType, name, description);
    servers.put(name, serverInformation);
  }
  try {
    servers.put(HadoopResource.DEFAULT_CLUSTERREFERENCE,
        new EnvironmentBasedHadoopClusterInformation(HadoopResource.DEFAULT_CLUSTERREFERENCE, null));
  } catch (final IllegalStateException e) {
    logger.info("No Hadoop environment variables, skipping default server");
  }
  return new ServerInformationCatalogImpl(servers.values());
}
origin: datacleaner/DataCleaner

final String serverName = _server.getName();
if (serverName != null) {
  _nameTextField.setText(serverName);
  _nameTextField.setEnabled(false);
final String description = _server.getDescription();
if (description != null) {
  _descriptionTextField.setText(description);
      new DirectoryBasedHadoopClusterInformation(_nameTextField.getText(),
          _descriptionTextField.getText(), paths.toArray(new String[paths.size()]));
  _serverInformationCatalog.addServerInformation(newServer);
origin: datacleaner/DataCleaner

  public static Configuration getHadoopConfigurationWithTimeout(final HadoopClusterInformation clusterInformation) {
    Configuration configuration = null;

    if (clusterInformation == null) {
      configuration = new Configuration();
    } else {
      configuration = clusterInformation.getConfiguration();
    }

    configuration.set(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
        String.valueOf(1));
    return configuration;
  }
}
origin: datacleaner/DataCleaner

@Override
public Configuration getConfiguration() {
  final Configuration configuration;
  if (SystemProperties.getBoolean(HdfsResource.SYSTEM_PROPERTY_HADOOP_CONF_DIR_ENABLED, false)) {
    configuration = super.getConfiguration();
  } else {
    configuration = new Configuration();
  }
  configuration.set("fs.defaultFS", _nameNodeUri.toString());
  return configuration;
}
origin: datacleaner/DataCleaner

servers.add(new EnvironmentBasedHadoopClusterInformation("default", "hadoop conf dir"));
servers.add(new DirectoryBasedHadoopClusterInformation("directory", "directopry set up",
    "C:\\Users\\claudiap\\git\\vagrant-vms\\bigdatavm\\hadoop_conf"));
servers.add(new DirectConnectionHadoopClusterInformation("namenode", "directconnection",
    new URI("hdfs://192.168.0.255:9000/")));
final ServerInformationCatalog serverInformationCatalog = new ServerInformationCatalogImpl(servers);
origin: datacleaner/DataCleaner

@Override
public Configuration getConfiguration() {
  final Configuration configuration = new Configuration();
  final Map<String, File> configurationFiles = new HashMap<>();
  Arrays.stream(getDirectories()).map(File::new).filter(File::isDirectory).forEach(c -> {
    final File[] array = c.listFiles();
    assert (array != null);
    Arrays.stream(array).filter(File::isFile).filter(f -> !configurationFiles.containsKey(f.getName()))
        .filter(f -> FilenameUtils.getExtension(f.getName()).equalsIgnoreCase("xml"))
        .forEach(f -> configurationFiles.put(f.getName(), f));
  });
  if (configurationFiles.size() == 0) {
    throw new IllegalStateException("Specified directories does not contain any Hadoop configuration files");
  }
  configurationFiles.values().stream().map(File::toURI).map(Path::new).forEach(configuration::addResource);
  return configuration;
}
origin: datacleaner/DataCleaner

public HadoopResourceBuilder(final ServerInformationCatalog catalog, final String templatedUri) {
  final Matcher matcher = RESOURCE_SCHEME_PATTERN.matcher(templatedUri);
  if (!matcher.matches()) {
    _clusterReferenceName = null;
    final String fixedUri = templatedUri.replace(" ", "%20");
    final HadoopClusterInformation hadoopClusterInformation =
        (HadoopClusterInformation) catalog.getServer(HadoopResource.DEFAULT_CLUSTERREFERENCE);
    if (hadoopClusterInformation != null) {
      _configuration = hadoopClusterInformation.getConfiguration();
    } else {
      _configuration = new Configuration();
    }
    _configuration.set("fs.defaultFS", fixedUri);
    _uri = URI.create(fixedUri);
  } else {
    _clusterReferenceName = matcher.group(2);
    final HadoopClusterInformation hadoopClusterInformation =
        (HadoopClusterInformation) catalog.getServer(_clusterReferenceName);
    _configuration = hadoopClusterInformation.getConfiguration();
    _uri = URI.create(matcher.group(3).replace(" ", "%20"));
  }
}
origin: datacleaner/DataCleaner

public EnvironmentBasedHadoopClusterInformation(final String name, final String description) {
  super(name, description, getConfigurationDirectories());
}
org.datacleaner.server

Most used classes

  • EnvironmentBasedHadoopClusterInformation
    Environment based configuration
  • DirectoryBasedHadoopClusterInformation
    Environment based configuration
  • HadoopClusterInformation
    Represents a connection to a hadoop cluster. Either by namenode or other means (i.e. in the case of
  • DirectConnectionHadoopClusterInformation
    Environment based configuration
Codota Logo
  • Products

    Search for Java codeSearch for JavaScript codeEnterprise
  • IDE Plugins

    IntelliJ IDEAWebStormAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogCodota Academy Plugin user guide Terms of usePrivacy policyJava Code IndexJavascript Code Index
Get Codota for your IDE now