@Inject public MongoDBRiver(RiverName riverName, RiverSettings settings, @RiverIndexName String riverIndexName, Client esClient, ScriptService scriptService, MongoClientService mongoClientService) { super(riverName, settings); if (logger.isTraceEnabled()) { logger.trace("Initializing"); } this.esClient = esClient; this.scriptService = scriptService; this.mongoClientService = mongoClientService; this.definition = MongoDBRiverDefinition.parseSettings(riverName.name(), riverIndexName, settings, scriptService); BlockingQueue<QueueEntry> stream = definition.getThrottleSize() == -1 ? new LinkedTransferQueue<QueueEntry>() : new ArrayBlockingQueue<QueueEntry>(definition.getThrottleSize()); this.context = new SharedContext(stream, Status.STOPPED); }
@Override public void start() { // http://stackoverflow.com/questions/5270611/read-maven-properties-file-inside-jar-war-file logger.info("{} - {}", DESCRIPTION, MongoDBHelper.getRiverVersion()); Status status = MongoDBRiverHelper.getRiverStatus(esClient, riverName.getName()); if (status == Status.IMPORT_FAILED || status == Status.INITIAL_IMPORT_FAILED || status == Status.SCRIPT_IMPORT_FAILED || status == Status.START_FAILED) { logger.error("Cannot start. Current status is {}", status); return; } if (status == Status.STOPPED) { // Leave the current status of the river alone, but set the context status to 'stopped'. // Enabling the river via REST will trigger the actual start. context.setStatus(Status.STOPPED); logger.info("River is currently disabled and will not be started"); } else { // Mark the current status as "waiting for full start" context.setStatus(Status.START_PENDING); // Request start of the river in the next iteration of the status thread MongoDBRiverHelper.setRiverStatus(esClient, riverName.getName(), Status.RUNNING); logger.info("Startup pending"); } statusThread = EsExecutors.daemonThreadFactory(settings.globalSettings(), "mongodb_river_status:" + definition.getIndexName()).newThread( new StatusChecker(this, definition, context)); statusThread.start(); }
if (settings.containsKey("index")) { indexSettings = (Map<String, Object>) settings.get("index"); indexName = XContentMapValues.nodeStringValue(indexSettings.get("index"), riverName.name()); typeName = XContentMapValues.nodeStringValue(indexSettings.get("type"), INDEX_ISSUE_TYPE_NAME_DEFAULT); } else { indexName = riverName.name(); typeName = INDEX_ISSUE_TYPE_NAME_DEFAULT; .info( "Configured JIRA River '{}' for JIRA API base URL '{}', jira user '{}', JQL timezone '{}'. Search index name '{}', document type for issues '{}'.", riverName.getName(), jiraClient.getJiraAPIUrlBase(), jiraUser, jiraJqlTimezone, indexName, typeName); if (activityLogIndexName != null) { logger.info( "Activity log for JIRA River '{}' is enabled. Search index name '{}', document type for index updates '{}'.", riverName.getName(), activityLogIndexName, activityLogTypeName);
this.requestHandler = qt; String index = riverName.type(); String type = "import"; int maxConcurrentBulk = 10;
bulk.add(indexRequest(indexName).type(typeName).source(EmailToJson.toJson(message[i], riverName.getName(),config))); }else{ String[] ids=message[i].getHeader(config.getIdField()); bulk.add(indexRequest(indexName).type(typeName).id(id).source(EmailToJson.toJson(message[i], riverName.getName(),config))); bulk.add(indexRequest("_river").type(riverName.name()).id(String.valueOf(config.getHashId())) .source(jsonBuilder().startObject().startObject("email").field("skip_count", skipCount).endObject().endObject())); } catch (IOException e) {
public static void delete(final Client client, final RiverName riverName) { DeleteMappingResponse deleteMappingResponse; try { deleteMappingResponse = client.admin().indices() .prepareDeleteMapping("_river").setType(riverName.name()) .execute().actionGet(); } catch (final ElasticsearchException e) { throw new EsUtilSystemException("Failed to delete " + riverName.name(), e); } if (!deleteMappingResponse.isAcknowledged()) { throw new EsUtilSystemException("Failed to delete " + riverName.name() + ". " + deleteMappingResponse.toString()); } }
/** * Put running instance of jira river into registry. Used for REST management operations handling. * * @param jiraRiver to get instance for * @see #getRunningInstances() * @see #getRunningInstance(String) */ public static void addRunningInstance(IJiraRiverMgm jiraRiver) { riverInstances.put(jiraRiver.riverName().getName(), jiraRiver); }
private String getLastUpdatedTimestamp() { GetResponse lastUpdatedTimestampResponse = client.prepareGet().setIndex(riverIndexName).setType(riverName.name()).setId("lastUpdatedTimestamp").execute().actionGet(); if (lastUpdatedTimestampResponse.isExists() && lastUpdatedTimestampResponse.getSource().containsKey("lastUpdatedTimestamp")) { return lastUpdatedTimestampResponse.getSource().get("lastUpdatedTimestamp").toString(); } return null; } }
@Override public synchronized void close() { logger.info("closing JIRA River on this node"); closed = true; if (coordinatorThread != null) { coordinatorThread.interrupt(); } // free instances created in #start() coordinatorThread = null; coordinatorInstance = null; synchronized (riverInstances) { riverInstances.remove(riverName().getName()); } }
/** Update river last changes id value.*/ private void updateRiver(String lastScanTimeField, Long lastScanTime) throws Exception{ if (logger.isDebugEnabled()){ logger.debug("Updating lastScanTimeField: {}", lastScanTime); } // We store the lastupdate date and some stats XContentBuilder xb = jsonBuilder() .startObject() .startObject("amazon-s3") .field("feedname", feedDefinition.getFeedname()) .field(lastScanTimeField, lastScanTime) .endObject() .endObject(); esIndex("_river", riverName.name(), lastScanTimeField, xb); }
/** * Write indexing info into activity log if enabled. * * @param indexingInfo to write */ protected void writeActivityLogRecord(ProjectIndexingInfo indexingInfo) { if (activityLogIndexName != null) { try { client.prepareIndex(activityLogIndexName, activityLogTypeName) .setSource(indexingInfo.buildDocument(jsonBuilder(), riverName().getName(), true, true)).execute() .actionGet(); } catch (Exception e) { logger.error("Error during index update result writing to the audit log {}", e.getMessage()); } } }
private boolean isStarted(){ // Refresh index before querying it. client.admin().indices().prepareRefresh("_river").execute().actionGet(); GetResponse isStartedGetResponse = client.prepareGet("_river", riverName().name(), "_s3status").execute().actionGet(); try{ if (!isStartedGetResponse.isExists()){ XContentBuilder xb = jsonBuilder().startObject() .startObject("amazon-s3") .field("feedname", feedDefinition.getFeedname()) .field("status", "STARTED").endObject() .endObject(); client.prepareIndex("_river", riverName.name(), "_s3status").setSource(xb).execute(); return true; } else { String status = (String)XContentMapValues.extractValue("amazon-s3.status", isStartedGetResponse.getSourceAsMap()); if ("STOPPED".equals(status)){ return false; } } } catch (Exception e){ logger.warn("failed to get status for " + riverName().name() + ", throttling....", e); } return true; }
super(); logger = esIntegration.createLogger(getClass()); this.riverName = esIntegration.riverName().getName(); this.indexName = indexName; this.issueTypeName = issueTypeName;
@SuppressWarnings("unchecked") private int getLastStateFromRiver(int settingId) { int skipCount=1; try { client.admin().indices().prepareRefresh("_river").execute().actionGet(); GetResponse lastSeqGetResponse = client.prepareGet("_river", riverName().name(), String.valueOf(settingId)).execute().actionGet(); if (lastSeqGetResponse.isExists()) { Map<String, Object> rssState = (Map<String, Object>) lastSeqGetResponse.getSource().get("email"); if (rssState != null) { Object skip_count = rssState.get("skip_count"); if (skip_count != null) { skipCount= Integer.parseInt(skip_count.toString()); } } } else { if (logger.isDebugEnabled()) logger.debug("{} doesn't exist", settingId); } } catch (Exception e) { logger.error("email-river",e); logger.warn("failed to get last_skip_count, throttling....", e); } return skipCount; } }
FilterBuilders.andFilter( FilterBuilders.termFilter(ProjectIndexingInfo.DOCFIELD_PROJECT_KEY, projectKey), FilterBuilders.termFilter(ProjectIndexingInfo.DOCFIELD_RIVER_NAME, riverName().getName()))) .setQuery(QueryBuilders.matchAllQuery()).addSort(ProjectIndexingInfo.DOCFIELD_START_DATE, SortOrder.DESC) .addField("_source").setSize(1).execute().actionGet();
builder.field("river_name", riverName().getName()); builder.field("info_date", currentDate); builder.startObject("indexing");
indexName = XContentMapValues.nodeStringValue(indexSettings.get(INDEX_NAME), riverName.name()); typeName = XContentMapValues.nodeStringValue(indexSettings.get(MAPPING_TYPE), "status"); bulkSize = XContentMapValues.nodeIntegerValue(indexSettings.get(BULK_SIZE), 100); flushInterval = TimeValue.parseTimeValue(XContentMapValues.nodeStringValue(indexSettings.get(FLUSH_INTERVAL), "12h"), FLUSH_12H); } else { indexName = riverName.name(); typeName = "status"; bulkSize = 100;
GetResponse lastSeqGetResponse = client.prepareGet("_river", riverName().name(), lastScanTimeField).execute().actionGet(); if (lastSeqGetResponse.isExists()) {
/** * Reconfigure jira river. Must be stopped! */ public synchronized void reconfigure() { if (!closed) throw new IllegalStateException("Jira River must be stopped to reconfigure it!"); logger.info("reconfiguring JIRA River"); String riverIndexName = getRiverIndexName(); refreshSearchIndex(riverIndexName); GetResponse resp = client.prepareGet(riverIndexName, riverName().name(), "_meta").execute().actionGet(); if (resp.isExists()) { if (logger.isDebugEnabled()) { logger.debug("Configuration document: {}", resp.getSourceAsString()); } Map<String, Object> newset = resp.getSource(); configure(newset); } else { throw new IllegalStateException("Configuration document not found to reconfigure jira river " + riverName().name()); } }