/** * Creates a RestRepository for use with a multi-index resource pattern. The client is left pinned * to the original node that it was pinned to since the shard locations cannot be determined at all. * @param settings Job settings * @param currentInstance Partition number * @param resource Configured write resource * @param log Logger to use * @return The RestRepository to be used by the partition writer */ private static RestRepository initMultiIndices(Settings settings, long currentInstance, Resource resource, Log log) { if (log.isDebugEnabled()) { log.debug(String.format("Resource [%s] resolves as an index pattern", resource)); } // multi-index write - since we don't know before hand what index will be used, use an already selected node String node = SettingsUtils.getPinnedNode(settings); if (log.isDebugEnabled()) { log.debug(String.format("Partition writer instance [%s] assigned to [%s]", currentInstance, node)); } return new RestRepository(settings); }
private void initClient(Properties props, boolean read) { if (client == null) { Settings settings = CascadingUtils.addDefaultsToSettings(props, this.props, LogFactory.getLog(EsTap.class)); CascadingUtils.init(settings, host, port, resource, query, read); CascadingUtils.initialDiscovery(settings, LogFactory.getLog(EsTap.class)); CascadingUtils.finalValidation(settings, read); client = new RestRepository(settings); } }
public static void checkIndexExistence(Settings settings) { // Only open a connection and check if autocreate is disabled if (!settings.getIndexAutoCreate()) { RestRepository repository = new RestRepository(settings); try { doCheckIndexExistence(settings, repository); } finally { repository.close(); } } }
return new RestRepository(settings);
RestRepository repository = new RestRepository(settings); repository = new RestRepository(settings);
InitializationUtils.filterNonIngestNodesIfNeeded(settings, log); RestRepository client = new RestRepository(settings); try { boolean indexExists = client.resourceExists(true);
ValueReader reader = ObjectUtils.instantiate(settings.getSerializerValueReaderClassName(), settings); RestRepository repository = new RestRepository(settings); Mapping fieldMapping = null; if (StringUtils.hasText(partition.getSerializedMapping())) {
RestRepository client = new RestRepository(settings); MappingSet mappings = client.getMappings(); Mapping mapping = mappings.isEmpty() ? null : mappings.getResolvedView();
private static RestRepository randomNodeWrite(Settings settings, int currentInstance, Resource resource, Log log) { // multi-index write - since we don't know before hand what index will be used, pick a random node from the given list List<String> nodes = SettingsUtils.discoveredOrDeclaredNodes(settings); String node = nodes.get(new Random().nextInt(nodes.size())); // override the global settings to communicate directly with the target node SettingsUtils.pinNode(settings, node); if (log.isDebugEnabled()) { log.debug(String.format("Partition writer instance [%s] assigned to [%s]", currentInstance, node)); } return new RestRepository(settings); } }
/** * Creates a RestRepository for use with a multi-index resource pattern. The client is left pinned * to the original node that it was pinned to since the shard locations cannot be determined at all. * @param settings Job settings * @param currentInstance Partition number * @param resource Configured write resource * @param log Logger to use * @return The RestRepository to be used by the partition writer */ private static RestRepository initMultiIndices(Settings settings, long currentInstance, Resource resource, Log log) { if (log.isDebugEnabled()) { log.debug(String.format("Resource [%s] resolves as an index pattern", resource)); } // multi-index write - since we don't know before hand what index will be used, use an already selected node String node = SettingsUtils.getPinnedNode(settings); if (log.isDebugEnabled()) { log.debug(String.format("Partition writer instance [%s] assigned to [%s]", currentInstance, node)); } return new RestRepository(settings); }
/** * Creates a RestRepository for use with a multi-index resource pattern. The client is left pinned * to the original node that it was pinned to since the shard locations cannot be determined at all. * @param settings Job settings * @param currentInstance Partition number * @param resource Configured write resource * @param log Logger to use * @return The RestRepository to be used by the partition writer */ private static RestRepository initMultiIndices(Settings settings, long currentInstance, Resource resource, Log log) { if (log.isDebugEnabled()) { log.debug(String.format("Resource [%s] resolves as an index pattern", resource)); } // multi-index write - since we don't know before hand what index will be used, use an already selected node String node = SettingsUtils.getPinnedNode(settings); if (log.isDebugEnabled()) { log.debug(String.format("Partition writer instance [%s] assigned to [%s]", currentInstance, node)); } return new RestRepository(settings); }
/** * Creates a RestRepository for use with a multi-index resource pattern. The client is left pinned * to the original node that it was pinned to since the shard locations cannot be determined at all. * @param settings Job settings * @param currentInstance Partition number * @param resource Configured write resource * @param log Logger to use * @return The RestRepository to be used by the partition writer */ private static RestRepository initMultiIndices(Settings settings, long currentInstance, Resource resource, Log log) { if (log.isDebugEnabled()) { log.debug(String.format("Resource [%s] resolves as an index pattern", resource)); } // multi-index write - since we don't know before hand what index will be used, use an already selected node String node = SettingsUtils.getPinnedNode(settings); if (log.isDebugEnabled()) { log.debug(String.format("Partition writer instance [%s] assigned to [%s]", currentInstance, node)); } return new RestRepository(settings); }
private void initClient(Properties props, boolean read) { if (client == null) { Settings settings = CascadingUtils.addDefaultsToSettings(props, this.props, LogFactory.getLog(EsTap.class)); CascadingUtils.init(settings, host, port, resource, query, read); CascadingUtils.initialDiscovery(settings, LogFactory.getLog(EsTap.class)); CascadingUtils.finalValidation(settings, read); client = new RestRepository(settings); } }
public static void checkIndexExistence(Settings settings) { // Only open a connection and check if autocreate is disabled if (!settings.getIndexAutoCreate()) { RestRepository repository = new RestRepository(settings); try { doCheckIndexExistence(settings, repository); } finally { repository.close(); } } }
public static void checkIndexExistence(Settings settings) { // Only open a connection and check if autocreate is disabled if (!settings.getIndexAutoCreate()) { RestRepository repository = new RestRepository(settings); try { doCheckIndexExistence(settings, repository); } finally { repository.close(); } } }
public static void checkIndexExistence(Settings settings) { // Only open a connection and check if autocreate is disabled if (!settings.getIndexAutoCreate()) { RestRepository repository = new RestRepository(settings); try { doCheckIndexExistence(settings, repository); } finally { repository.close(); } } }
public static void checkIndexExistence(Settings settings, RestRepository client) { // check index existence if (!settings.getIndexAutoCreate()) { if (client == null) { client = new RestRepository(settings); } if (!client.indexExists(false)) { client.close(); throw new EsHadoopIllegalArgumentException(String.format("Target index [%s] does not exist and auto-creation is disabled [setting '%s' is '%s']", settings.getResourceWrite(), ConfigurationOptions.ES_INDEX_AUTO_CREATE, settings.getIndexAutoCreate())); } } }
public static <T> void saveSchemaIfNeeded(Object conf, ValueWriter<T> schemaWriter, T schema, Log log) { Settings settings = HadoopSettingsManager.loadFrom(conf); if (settings.getIndexAutoCreate()) { RestRepository client = new RestRepository(settings); if (!client.indexExists(false)) { if (schemaWriter == null) { log.warn(String.format("No mapping found [%s] and no schema found; letting Elasticsearch perform auto-mapping...", settings.getResourceWrite())); } else { log.info(String.format("No mapping found [%s], creating one based on given schema", settings.getResourceWrite())); ContentBuilder builder = ContentBuilder.generate(schemaWriter).value(schema).flush(); BytesArray content = ((FastByteArrayOutputStream) builder.content()).bytes(); builder.close(); client.putMapping(content); if (log.isDebugEnabled()) { log.debug(String.format("Creating ES mapping [%s] from schema [%s]", content.toString(), schema)); } } } client.close(); } }
public static <T> void saveSchemaIfNeeded(Object conf, ValueWriter<T> schemaWriter, T schema, Log log) { Settings settings = HadoopSettingsManager.loadFrom(conf); if (settings.getIndexAutoCreate()) { RestRepository client = new RestRepository(settings); if (!client.indexExists(false)) { if (schemaWriter == null) { log.warn(String.format("No mapping found [%s] and no schema found; letting Elasticsearch perform auto-mapping...", settings.getResourceWrite())); } else { log.info(String.format("No mapping found [%s], creating one based on given schema", settings.getResourceWrite())); ContentBuilder builder = ContentBuilder.generate(schemaWriter).value(schema).flush(); BytesArray content = ((FastByteArrayOutputStream) builder.content()).bytes(); builder.close(); client.putMapping(content); if (log.isDebugEnabled()) { log.debug(String.format("Creating ES mapping [%s] from schema [%s]", content.toString(), schema)); } } } client.close(); } }
public static <T> void saveSchemaIfNeeded(Object conf, ValueWriter<T> schemaWriter, T schema, Log log) { Settings settings = HadoopSettingsManager.loadFrom(conf); if (settings.getIndexAutoCreate()) { RestRepository client = new RestRepository(settings); if (!client.indexExists(false)) { if (schemaWriter == null) { log.warn(String.format("No mapping found [%s] and no schema found; letting Elasticsearch perform auto-mapping...", settings.getResourceWrite())); } else { log.info(String.format("No mapping found [%s], creating one based on given schema", settings.getResourceWrite())); ContentBuilder builder = ContentBuilder.generate(schemaWriter).value(schema).flush(); BytesArray content = ((FastByteArrayOutputStream) builder.content()).bytes(); builder.close(); client.putMapping(content); if (log.isDebugEnabled()) { log.debug(String.format("Creating ES mapping [%s] from schema [%s]", content.toString(), schema)); } } } client.close(); } }