@Test public void testBulkWithIngestPipeline() throws Exception { Settings settings = new TestSettings(); settings.setProperty(ConfigurationOptions.ES_INGEST_PIPELINE, "ingest-pipeline"); Resource res = createResource("pipeline/test", settings); assertEquals("pipeline/test", res.toString()); assertEquals("pipeline/_aliases", res.aliases()); assertEquals("pipeline/test/_bulk?pipeline=ingest-pipeline", res.bulk()); assertEquals("pipeline/_refresh", res.refresh()); }
@Override public BulkOutputGenerator addSuccess(String operation, int status) { Assert.notNull(resource); items.add(getSuccess() .replace(OP, operation) .replace(IDX, resource.index()) .replace(TYPE, resource.type()) .replace(ID, UUID.randomUUID().toString()) .replace(VER, "1") .replace(STAT, "201") ); return this; }
public static void checkIndexNameForRead(Settings settings) { Resource readResource = new Resource(settings, true); if (readResource.index().contains("{") && readResource.index().contains("}")) { throw new EsHadoopIllegalArgumentException("Cannot read indices that have curly brace field extraction patterns in them: " + readResource.index()); } }
public boolean indexExists(boolean read) { Resource res = (read ? resources.getResourceRead() : resources.getResourceWrite()); // cheap hit boolean exists = client.indexExists(res.index()); if (exists && StringUtils.hasText(res.type())) { exists = client.typeExists(res.index(), res.type()); } // could be a _all or a pattern which is valid for read // try again by asking the mapping - could be expensive if (!exists && read) { try { // make sure the mapping is null since the index might exist but the type might be missing exists = !client.getMapping(res.mapping()).isEmpty(); } catch (EsHadoopInvalidRequest ex) { exists = false; } } return exists; }
public Resource getResourceRead() { if (resourceRead == null) { if (StringUtils.hasText(resourceSettings.getResourceRead())) { resourceRead = new Resource(resourceSettings, true); } } return resourceRead; }
Resource resource = new Resource(settings, false); iformat.compile(resource.toString()); } else { if (!StringUtils.isValidSingularIndexName(resource.index())) { throw new EsHadoopIllegalArgumentException("Illegal write index name [" + resource.index() + "]. Write resources must " + "be lowercase singular index names, with no illegal pattern characters except for multi-resource writes."); GetAliasesRequestBuilder.Response response = null; try { response = new GetAliasesRequestBuilder(bootstrap).aliases(resource.index()).execute(); } catch (EsHadoopInvalidRequest remoteException) { resource.index(), remoteException.getMessage()));
@Test public void testJustIndex() throws Exception { Resource res = createResource("foo/_all"); assertEquals("foo/_all", res.toString()); }
Integer maxDocsPerPartition = settings.getMaxDocsPerPartition(); Assert.notNull(maxDocsPerPartition, "Attempting to find slice partitions but maximum documents per partition is not set."); Resource readResource = new Resource(settings, true); Mapping resolvedMapping = mappingSet == null ? null : mappingSet.getResolvedView(); if (readResource.isTyped()) { numDocs = client.count(index, readResource.type(), Integer.toString(shardId), query); } else { numDocs = client.countIndexShard(index, Integer.toString(shardId), query);
Resource read = new Resource(settings, true); SearchRequestBuilder queryBuilder = new SearchRequestBuilder(esVersion, settings.getReadMetadata() && settings.getReadMetadataVersion()) .types(read.type()) .indices(read.index()) .query(QueryUtils.parseQuery(settings)) .scroll(settings.getScrollKeepAlive())
public boolean touch() { return client.touch(resources.getResourceWrite().index()); }
public void refresh(Resource resource) { execute(POST, resource.refresh()); }
/** * Executes a single bulk operation against the provided resource, using the passed data as the request body. * This method will retry bulk requests if the entire bulk request fails, but will not retry singular * document failures. * * @param resource target of the bulk request. * @param data bulk request body. This body will be cleared of entries on any successful bulk request. * @return a BulkActionResponse object that will detail if there were failing documents that should be retried. */ public BulkActionResponse bulk(Resource resource, TrackingBytesArray data) { // NB: dynamically get the stats since the transport can change long start = network.transportStats().netTotalTime; Response response = execute(PUT, resource.bulk(), data); long spent = network.transportStats().netTotalTime - start; stats.bulkTotal++; stats.docsSent += data.entries(); stats.bulkTotalTime += spent; // bytes will be counted by the transport layer return new BulkActionResponse(parseBulkActionResponse(response), response.status(), spent); }
public Field getMapping() { return Field.parseField(client.getMapping(resourceR.mapping())); }
public boolean indexExists(boolean read) { Resource res = (read ? resourceR : resourceW); // cheap hit boolean exists = client.exists(res.indexAndType()); // could be a _all or a pattern which is valid for read // try again by asking the mapping - could be expensive if (!exists && read) { try { // make sure the mapping is null since the index might exist but the type might be missing exists = !client.getMapping(res.mapping()).isEmpty(); } catch (EsHadoopInvalidRequest ex) { exists = false; } } return exists; }