public static void setRiverStatus(Client client, String riverName, Status status) { logger.info("setRiverStatus called with {} - {}", riverName, status); XContentBuilder xb; try { xb = jsonBuilder().startObject().startObject(MongoDBRiver.TYPE).field(MongoDBRiver.STATUS_FIELD, status).endObject() .endObject(); client.prepareIndex("_river", riverName, MongoDBRiver.STATUS_ID).setSource(xb).get(); } catch (IOException ioEx) { logger.error("setRiverStatus failed for river {}", ioEx, riverName); } }
logger.debug("Encountered oplog entry with a:false, ts:" + item.get("ts")); break; logger.debug("Before waiting for 500 ms"); Thread.sleep(500); } finally { if (cursor != null) { logger.trace("Closing oplog cursor"); cursor.close(); logger.error("Exception in slurper", e); Thread.currentThread().interrupt(); break; } catch (MongoInterruptedException | InterruptedException e) { logger.info("river-mongodb slurper interrupted"); Thread.currentThread().interrupt(); break; } catch (MongoSocketException | MongoTimeoutException | MongoCursorNotFoundException e) { logger.info("Oplog tailing - {} - {}. Will retry.", e.getClass().getSimpleName(), e.getMessage()); logger.debug("Total documents inserted so far by river {}: {}", definition.getRiverName(), totalDocuments.get()); try { Thread.sleep(MongoDBRiver.MONGODB_RETRY_ERROR_DELAY_MS); } catch (InterruptedException iEx) { logger.info("river-mongodb slurper interrupted"); Thread.currentThread().interrupt(); break; logger.error("Exception while looping in cursor", e); Thread.currentThread().interrupt();
private void dropRecreateMapping() throws IOException, InterruptedException { try { semaphore.acquire(); logger.trace("dropRecreateMapping index[{}] - type[{}]", index, type); client.admin().indices().prepareRefresh(index).get(); ImmutableOpenMap<String, MappingMetaData> mappings = client.admin().cluster().prepareState().get().getState().getMetaData() .index(index).mappings(); logger.trace("mappings contains type {}: {}", type, mappings.containsKey(type)); if (mappings.containsKey(type)) { if (client.admin().indices().prepareDeleteMapping(index).setType(type).get().isAcknowledged()) { PutMappingResponse pmr = client.admin().indices().preparePutMapping(index).setType(type) .setSource(mapping.getSourceAsMap()).get(); if (!pmr.isAcknowledged()) { logger.error("Failed to put mapping {} / {} / {}.", index, type, mapping.source()); } else { logger.info("Delete and recreate for index / type [{}] [{}] successfully executed.", index, type); logger.warn("Delete type[{}] on index[{}] return aknowledge false", type, index); logger.info("type[{}] does not exist in index[{}]. No need to remove mapping.", index, type);
private void checkBulkProcessorAvailability() { while (!isBulkProcessorAvailable()) { try { if (logger.isDebugEnabled()) { logger.debug("Waiting for bulk queue to empty..."); } Thread.sleep(2000); } catch (InterruptedException e) { logger.warn("checkIndexStatistics interrupted", e); } } }
private List<ServerAddress> getServerAddressForReplica(DBObject item) { String definition = item.get("host").toString(); if (definition.contains("/")) { definition = definition.substring(definition.indexOf("/") + 1); } if (logger.isDebugEnabled()) { logger.debug("getServerAddressForReplica - definition: {}", definition); } List<ServerAddress> servers = new ArrayList<ServerAddress>(); for (String server : definition.split(",")) { servers.add(new ServerAddress(server)); } return servers; }
@Test public void dynamicAddingRemovingQueries() throws Exception { logger.info("--> Add dummy doc"); client.admin().indices().prepareDelete("_all").execute().actionGet(); client.prepareIndex("test", "type", "1").setSource("field1", "value").execute().actionGet(); logger.info("--> register a query 1"); client.prepareIndex("test", BatchPercolatorService.TYPE_NAME, "kuku") .setSource(getSource(termQuery("field1", "value1"))) .setRefresh(true) .execute().actionGet(); jsonBuilder() .startObject() .field("_id", "docId") .field("field1", "value1") .endObject()))) .execute().actionGet(); logger.info("--> deleting query 1"); client.prepareDelete("test", BatchPercolatorService.TYPE_NAME, "kuku").setRefresh(true).execute().actionGet(); jsonBuilder() .startObject() .field("_id", "docId")
private boolean isStarted(){ // Refresh index before querying it. client.admin().indices().prepareRefresh("_river").execute().actionGet(); GetResponse isStartedGetResponse = client.prepareGet("_river", riverName().name(), "_s3status").execute().actionGet(); try{ if (!isStartedGetResponse.isExists()){ XContentBuilder xb = jsonBuilder().startObject() .startObject("amazon-s3") .field("feedname", feedDefinition.getFeedname()) .field("status", "STARTED").endObject() .endObject(); client.prepareIndex("_river", riverName.name(), "_s3status").setSource(xb).execute(); return true; } else { String status = (String)XContentMapValues.extractValue("amazon-s3.status", isStartedGetResponse.getSourceAsMap()); if ("STOPPED".equals(status)){ return false; } } } catch (Exception e){ logger.warn("failed to get status for " + riverName().name() + ", throttling....", e); } return true; }
@Override public void handleRequest(RestRequest request, RestChannel channel, Client client) throws Exception{ if (logger.isDebugEnabled()){ logger.debug("REST S3ManageAction called"); XContentBuilder xb = jsonBuilder() .startObject() .startObject("amazon-s3") .field("feedname", rivername) .field("status", status) .endObject() .endObject(); client.prepareIndex("_river", rivername, "_s3status").setSource(xb).execute().actionGet(); XContentBuilder builder = jsonBuilder(); builder .startObject() .field(new XContentBuilderString("ok"), true) .endObject(); channel.sendResponse(new BytesRestResponse(RestStatus.OK, builder)); } catch (IOException e) { onFailure(request, channel, e);
private XContentBuilder getGridFSMapping() throws IOException { XContentBuilder mapping = jsonBuilder() .startObject() .startObject(definition.getTypeName()) .startObject("properties") .startObject("content").field("type", "attachment").endObject() .startObject("filename").field("type", "string").endObject() .startObject("contentType").field("type", "string").endObject() .startObject("md5").field("type", "string").endObject() .startObject("length").field("type", "long").endObject() .startObject("chunkSize").field("type", "long").endObject() .endObject() .endObject() .endObject(); logger.info("GridFS Mapping: {}", mapping.string()); return mapping; }
private void badRequest(RestChannel channel, String message) { try { XContentBuilder builder = channel.newErrorBuilder(); channel.sendResponse(new BytesRestResponse(BAD_REQUEST, builder.startObject().field("error", message).endObject())); } catch (IOException e) { logger.warn("Failed to send response", e); } }
@Test public void TestJSONResponse() throws Exception { facetOnTags(); CacheStatsPerFieldResponse r = client.admin().cluster() .execute(CacheStatsPerFieldAction.INSTANCE, new CacheStatsPerFieldRequest()) .get(); XContentBuilder builder = jsonBuilder(); builder.startObject(); r.toXContent(builder, ToXContent.EMPTY_PARAMS); builder.endObject(); builder.close(); String JSON = builder.bytes().toUtf8(); logger.info("JSON: {}",JSON); // {"cluster_name":"test-cluster-boazmbp.fritz.box", // "nodes":{"qpCAo38-Rm2epLPs0FJu8w": // {"setTimestamp":1353104133685,"name":"node0", // "transport_address":"inet[/192.168.1.107:9300]","hostname":"boazmbp.fritz.box", // "fields":{"tag":{"size":144}}}}} }
private void waitForCluster(final ClusterHealthStatus status, final TimeValue timeout) throws IOException { try { logger.debug("waiting for cluster state {}", status.name()); final ClusterHealthResponse healthResponse = client.admin().cluster().prepareHealth().setWaitForStatus(status) .setTimeout(timeout).execute().actionGet(); if (healthResponse.isTimedOut()) { throw new IOException("cluster state is " + healthResponse.getStatus().name() + " and not " + status.name() + ", cowardly refusing to continue with operations"); } else { logger.debug("... cluster state ok"); } } catch (final ElasticsearchTimeoutException e) { throw new IOException("timeout, cluster does not respond to health request, cowardly refusing to continue with operations"); } }
private void delete(RestRequest request, RestChannel channel, Client esClient) { String river = request.param("river"); if (river == null || river.isEmpty()) { respondError(request, channel, "Parameter 'river' is required", RestStatus.BAD_REQUEST); return; } logger.info("Delete river: {}", river); if (esClient.admin().indices().prepareTypesExists(riverIndexName).setTypes(river).get().isExists()) { esClient.admin().indices().prepareDeleteMapping(riverIndexName).setType(river).get(); } respondSuccess(request, channel, RestStatus.OK); }
private XContentBuilder createPublishMessage(RestRequest request) { try { Map<String, Object> map = null; String message = request.content().toUtf8(); XContentParser parser = null; try { parser = XContentFactory.xContent(message).createParser(message); map = parser.map(); } catch (Exception e) { logger.warn("unable to parse {}", message); } finally { parser = null; } return jsonBuilder().startObject() .field("timestamp", request.param("timestamp", Long.toString(System.currentTimeMillis()))) .field("message", map) .endObject(); } catch (IOException e) { return null; } } }
/** Update river last changes id value.*/ private void updateRiver(String lastScanTimeField, Long lastScanTime) throws Exception{ if (logger.isDebugEnabled()){ logger.debug("Updating lastScanTimeField: {}", lastScanTime); } // We store the lastupdate date and some stats XContentBuilder xb = jsonBuilder() .startObject() .startObject("amazon-s3") .field("feedname", feedDefinition.getFeedname()) .field(lastScanTimeField, lastScanTime) .endObject() .endObject(); esIndex("_river", riverName.name(), lastScanTimeField, xb); }
logger.info("Starting"); logger.info( "MongoDB options: secondaryreadpreference [{}], drop_collection [{}], include_collection [{}], throttlesize [{}], gridfs [{}], filter [{}], db [{}], collection [{}], script [{}], indexing to [{}]/[{}]", definition.isMongoSecondaryReadPreference(), definition.isDropCollection(), definition.getIncludeCollection(), logger.debug("Using MongoDB server(s): host [{}], port [{}]", server.getHost(), server.getPort()); logger.error("failed to create index [{}], disabling river...", e, definition.getIndexName()); return; if (logger.isDebugEnabled()) { logger.debug("Set explicit attachment mapping."); esClient.admin().indices().preparePutMapping(definition.getIndexName()).setType(definition.getTypeName()) .setSource(getGridFSMapping()).get(); } catch (Exception e) { logger.warn("Failed to set explicit mapping (attachment): {}", e); logger.trace("Initial import already completed."); logger.info("Skip initial import from collection {}", definition.getMongoCollection()); logger.info("Started"); } catch (Throwable t) { logger.warn("Failed to start", t); MongoDBRiverHelper.setRiverStatus(esClient, definition.getRiverName(), Status.START_FAILED); context.setStatus(Status.START_FAILED);
/** * Checks the request parameters against enabled settings for error trace support * @return true if the request does not have any parameters that conflict with system settings */ boolean checkRequestParameters(final RestRequest request, final RestChannel channel) { // error_trace cannot be used when we disable detailed errors if (channel.detailedErrorsEnabled() == false && request.paramAsBoolean("error_trace", false)) { try { XContentBuilder builder = channel.newErrorBuilder(); builder.startObject().field("error","error traces in responses are disabled.").endObject().string(); RestResponse response = new BytesRestResponse(BAD_REQUEST, builder); response.addHeader("Content-Type", "application/json"); channel.sendResponse(response); } catch (IOException e) { logger.warn("Failed to send response", e); } return false; } return true; }
@Override public void start() { // create the index explicitly so we can use the whitespace tokenizer // because there are usernames like "user-name" and we want those // to be treated as just one term try { Settings indexSettings = ImmutableSettings.settingsBuilder().put("analysis.analyzer.default.tokenizer", "whitespace").build(); client.admin().indices().prepareCreate(index).setSettings(indexSettings).execute().actionGet(); logger.info("Created index."); } catch (IndexAlreadyExistsException e) { logger.info("Index already created"); } catch (Exception e) { logger.error("Exception creating index.", e); } dataStream = new DataStream(); dataStream.start(); logger.info("Started GitHub river."); }
public static void send(final RestChannel channel, final RestStatus status, final String arg) { try { final XContentBuilder builder = channel.newBuilder(); builder.startObject(); builder.field("status", status.getStatus()); if (arg != null && !arg.isEmpty()) { builder.field("message", arg); } builder.endObject(); channel.sendResponse(new BytesRestResponse(status, builder)); } catch (final Exception e) { log.error("Failed to send a response.", e); try { channel.sendResponse(new BytesRestResponse(channel, e)); } catch (final IOException e1) { log.error("Failed to send a failure response.", e1); } } }
private void waitForESReady() { if (!this.esClient.admin().indices().prepareExists(this.index).execute().actionGet().exists()) { return; } for (final ShardStatus status : this.esClient.admin().indices().prepareStatus(this.index).execute().actionGet().getShards()) { if (status.getState() != IndexShardState.STARTED) { try { Thread.sleep(1000); } catch (InterruptedException e) { this.logger.trace("HBase thread has been interrupted while waiting for the database to be reachable"); } this.logger.trace("Waiting..."); waitForESReady(); break; } } }