public List<ObjectId> toRefs(JsonNode refsNode) { if (refsNode != null) { try { return mapper.convertValue(refsNode, new TypeReference<List<ObjectId>>() { }); } catch (Exception ex) { LOG.debug("Failed to get objectIds: " + refsNode, ex); } } return null; }
LOG.startTimer("processed>" + instanceClass); LOG.startTimer("processed"); if (LOG.isTraceEnabled()) { LOG.trace("PROCESSING TIME:{} {} EVENT-ID:{} EVENT:{}", new Object[]{ threadTimestamp, LOG.inc("processed>" + instanceClass); LOG.inc("processed"); } finally { LOG.stopTimer("processed>" + instanceClass); LOG.startTimer("processed");
@Override public List<V> readViews(List<ViewDescriptor> request) throws IOException { LOG.inc(VIEW_READ_CALL_COUNT); Preconditions.checkArgument(request != null); Map<TenantAndActor, Set<Id>> permisionCheckTheseIds = new HashMap<>(); List<ViewCollectorImpl<V>> viewCollectors = buildViewCollectors(request, permisionCheckTheseIds); LOG.startTimer(VIEW_READ_LATENCY); try { viewValueReader.readViewValues(viewCollectors); } finally { LOG.stopTimer(VIEW_READ_LATENCY); LOG.startTimer(VIEW_PERMISSIONS_LATENCY); try { canViewTheseIds = checkPermissions(permisionCheckTheseIds); } finally { LOG.stopTimer(VIEW_PERMISSIONS_LATENCY); LOG.startTimer(VIEW_MERGE_LATENCY); try { List<V> views = collectViewObject(viewCollectors, canViewTheseIds); LOG.inc(VIEW_READ_VIEW_COUNT, views.size()); return views; } catch (Exception ex) { LOG.error("Failed while loading {}", request); throw new IOException("Failed to load for the following reason.", ex); } finally { LOG.stopTimer(VIEW_MERGE_LATENCY);
private void traceLogging(Set<ObjectId> ids, Set<ObjectId> existence, ViewFieldChange fieldChange) { if (LOG.isTraceEnabled()) { StringBuilder msg = new StringBuilder().append(" existence:"); String sep = ""; for (ObjectId id : ids) { msg.append(sep); msg.append(id).append(existence.contains(id) ? " exists" : " does not exist"); sep = ","; } msg.append(" change:").append(fieldChange); if (LOG.isTraceEnabled()) { LOG.trace("WRITE BLOCKED DUE TO LACK OF EXISTANCE:" + msg.toString()); } } } }
@Override public void process(WrittenEventContext batchContext, TenantIdAndCentricId tenantIdAndCentricId, WrittenEvent writtenEvent, long threadTimestamp) throws Exception { if (valueTraversers != null && !valueTraversers.isEmpty()) { LOG.startTimer("values"); try { processValues(batchContext, tenantIdAndCentricId, writtenEvent, threadTimestamp); } finally { LOG.stopTimer("values"); } } LOG.startTimer("refs"); try { processRefs(batchContext, tenantIdAndCentricId, writtenEvent, threadTimestamp); } finally { LOG.stopTimer("refs"); } }
List<ViewFieldChange> changes = changeSets.getValue(); TenantIdAndCentricId tenantIdAndCentricId = viewDescriptor.getTenantIdAndCentricId(); LOG.inc("commitChanges", changes.size()); LOG.startTimer("commitChanges"); LOG.inc("commitedChanges", changes.size()); LOG.trace("Committed changes to hbase: {}", changes); LOG.warn("TODO Failed to process isn't implemented."); return failedToProcess;
public void commit(Transaction transaction) throws IOException { TenantIdAndCentricId tenantId = transaction.tenantIdAndCentricId; List<ViewWriteFieldChange> adds = transaction.takeAdds(); if (!adds.isEmpty()) { LOG.inc("added view paths", adds.size()); viewValueStore.add(tenantId, adds); if (LOG.isTraceEnabled()) { for (ViewWriteFieldChange add : adds) { LOG.trace(add.toString()); } } } List<ViewWriteFieldChange> removes = transaction.takeRemoves(); if (!removes.isEmpty()) { LOG.inc("removed view paths", adds.size()); viewValueStore.remove(tenantId, removes); if (LOG.isTraceEnabled()) { for (ViewWriteFieldChange remove : removes) { LOG.trace(remove.toString()); } } } }
warn("Unable to look up model path " + modelPathIdHashCode + " classes for view path with path combination key: " + pathComboKey + " dropping value: " + fieldValue.getValue() + " on the floor."); LOG.warn("failed to load ViewValueBinding for viewValueBindingKey={}, fieldValue={} ", new Object[]{ modelPathIdHashCode, fieldValue }); LOG.debug("Failed to load model path and view path dictionary from column key. Older column key format is likely the case."); try { staleViewFieldStream.stream(viewDescriptor, fieldValue); } catch (Exception x) { LOG.error("Implementer of staleViewFieldStream is failing to handle all exception appropriately. ", x);
long timestamp, List<LinkTo> batchLinks) throws Exception { LOG.inc("batchLink"); LOG.startTimer("batchLink"); LOG.stopTimer("batchLink");
@Override public ColumnValueAndTimestamp<ObjectId, byte[], Long> callback(ColumnValueAndTimestamp<ObjectId, byte[], Long> backRef) throws Exception { if (backRef != null) { ReferenceWithTimestamp reference = new ReferenceWithTimestamp(backRef.getColumn(), fieldName, backRef.getTimestamp()); if (LOG.isTraceEnabled()) { LOG.trace(System.currentTimeMillis() + " |--> {} Got aIds Tenant={} b={} a={} Timestamp={}", new Object[]{ threadTimestamp, tenantIdAndCentricId, aClassAndField_bId, backRef.getColumn(), backRef.getTimestamp()}); } refs.add(reference); if (refs.size() > MAX_FAN_OUT_BEFORE_WARN) { LOG.warn("TODO: streamBackRefs reference fan-out is exceeding comfort level. We need break scans into batched scans."); } } return backRef; } }));
public void loadModel(TenantId tenantId) { ChainedVersion currentVersion = eventsProvider.getCurrentEventsVersion(tenantId); if (currentVersion == ChainedVersion.NULL) { versionedEventsModels.put(tenantId, new VersionedEventsModel(currentVersion, null)); } else { VersionedEventsModel currentVersionedEventModel = versionedEventsModels.get(tenantId); if (currentVersionedEventModel == null || !currentVersionedEventModel.getVersion().equals(currentVersion)) { final MutableInt errors = new MutableInt(); final EventsModel newEventsModel = new EventsModel(); List<ObjectNode> events = eventsProvider.getEvents(new EventsProcessorId(tenantId, "NotBeingUsedYet")); for (ObjectNode event : events) { try { newEventsModel.addEvent(event); } catch (Exception x) { LOG.error("Failed to load event for " + event, x); throw new RuntimeException("Failed to load (" + errors.longValue() + ") event/s. "); } } versionedEventsModels.put(tenantId, new VersionedEventsModel(currentVersion, newEventsModel)); } else { LOG.debug("Didn't reload because event model versions are equal."); } } }
@Override public void run() { try { if (requests.size() > 1) { LOG.debug("Request aggregation size: {}", requests.size()); } referenceStore.multiStreamRefs(requests); } catch (Exception ex) { LOG.warn("Failed to process request:" + requests, ex); for (RefStreamRequestContext request : requests) { request.failure(ex); } } } });
@Override public EventWriterResponse write(List<ObjectNode> events, EventWriterOptions options) throws JsonEventWriteException { Preconditions.checkNotNull(events); ObjectWriter writer = mapper.writer().withDefaultPrettyPrinter(); List<Long> eventIds = new ArrayList<>(events.size()); List<ObjectId> objectIds = new ArrayList<>(events.size()); List<ObjectNode> responseList = this.events.get(); for (ObjectNode event : events) { if (LOG.isDebugEnabled()) { try { LOG.debug("Mock received event: " + writer.writeValueAsString(event)); } catch (IOException e) { throw new JsonEventWriteException("Failed to serialize event", e); } } eventIds.add(orderIdProvider.nextId()); objectIds.add(jsonEventConventions.getInstanceObjectId(event, jsonEventConventions.getInstanceClassName(event))); if (responseList != null) { responseList.add(event); } } return new EventWriterResponse(eventIds, objectIds); }
@Override public void uncaughtException(Thread t, Throwable e) { LOG.error("Thread " + t.getName() + " threw uncaught exception", e); } })
final EventModel ingressEvent = EventModel.builder(eventNode, false).build(); Validated.ValidatedBuilder builder = Validated.build(); LOG.inc("validated_events"); if (eventsModel == null || eventsModel.getEventsModel() == null) { LOG.inc("no_model"); builder.addMessage(failInvalidEvents, "There is no event model to validate against. Please call loadModel()"); } else { LOG.inc("unknown_event>" + ingressEvent.getEventClass()); LOG.inc("unknown_event"); builder.addMessage(failInvalidEvents, "'" + ingressEvent.getEventClass() + "' is not declared in the current event model. Do you need to reload the model?"); ValueType modelFieldType = modelFields.get(ingressFieldName); if (modelFieldType == null) { LOG.inc("unexpected_field>" + ingressFieldName); LOG.inc("unexpected_field"); builder.addMessage(failInvalidEvents, "unexpected field: '" + ingressFieldName + "'"); LOG.inc("unexpected_type>" + ingressFieldName + ">" + ingressFieldType); LOG.inc("unexpected_type"); builder.addMessage(failInvalidEvents, "expected: '" + ingressFieldName + "' of type '" + modelFieldType
final VersionedTasmoViewModel model = tasmoViewModel.getVersionedTasmoViewModel(tenantId); if (model == null) { LOG.error("Cannot process an event until a model has been loaded."); throw new Exception("Cannot process an event until a model has been loaded."); LOG.info("{} millis valuePaths:{} refPaths:{} backRefPaths:{} " + "fanDepth:{} fanBreath:{} value:{} changes:{} DONE PROCESSING {} event:{} instance:{} tenant:{}", new Object[]{ elapse,
@Override public ColumnValueAndTimestamp<ObjectId, byte[], Long> callback(ColumnValueAndTimestamp<ObjectId, byte[], Long> v) throws Exception { if (v == null) { if (!batch.isEmpty()) { refStreamQueue.put(batch); } refStreamQueue.put(NULL); } else { ReferenceWithTimestamp referenceWithTimestamp = new ReferenceWithTimestamp(v.getColumn(), referringFieldName, v.getTimestamp()); batch.add(referenceWithTimestamp); if (batch.size() > MAX_FAN_OUT_BEFORE_WARN) { LOG.warn("TODO: streamBackRefs reference fan-out is exceeding comfort level. We need to break scans into batched scans."); } } return v; }
public void logStats() { List<SortableStat> latencyStats = new ArrayList<>(); for (String catagoryName : latencyCatagories.keySet()) { Map<String, DescriptiveStatistics> catagory = latencyCatagories.get(catagoryName); for (String name : catagory.keySet()) { DescriptiveStatistics descriptiveStatistics = catagory.get(name); double sla = 0; logStats(sla, "STATS " + catagoryName + " OF " + name, descriptiveStatistics, "millis", latencyStats); } } Collections.sort(latencyStats); for (SortableStat stat : latencyStats) { LOG.info(stat.value + " " + stat.name); } List<SortableStat> tallis = new ArrayList<>(); for (String catagoryName : tallyCatagories.keySet()) { Map<String, AtomicLong> catagory = tallyCatagories.get(catagoryName); for (String name : catagory.keySet()) { AtomicLong atomicLong = catagory.get(name); tallis.add(new SortableStat(atomicLong.get(), "TALLY " + catagoryName + " FOR " + name)); } } Collections.sort(tallis); for (SortableStat stat : tallis) { LOG.info(stat.value + " " + stat.name); } }
@Override public void write(TenantIdAndCentricId tenantIdAndCentricId, List<ViewWriteFieldChange> fieldChanges) throws ViewWriterException { try { Transaction transaction = viewValueStore.begin(tenantIdAndCentricId); for (ViewWriteFieldChange fieldChange : fieldChanges) { if (fieldChange.getType() == ViewWriteFieldChange.Type.add) { LOG.trace(" >>>>>> VVS ADD:{}", fieldChange); transaction.set(fieldChange.getViewObjectId(), fieldChange.getModelPathIdHashcode(), fieldChange.getModelPathInstanceIds(), fieldChange.getValue(), fieldChange.getTimestamp()); } else if (fieldChange.getType() == ViewWriteFieldChange.Type.remove) { LOG.trace(" >>>>>> VVS REMOVE:{}", fieldChange); transaction.remove(fieldChange.getViewObjectId(), fieldChange.getModelPathIdHashcode(), fieldChange.getModelPathInstanceIds(), fieldChange.getTimestamp()); } else { throw new ViewWriterException("Unknown change type." + fieldChange.getType()); } } viewValueStore.commit(transaction); } catch (IOException | ViewWriterException x) { throw new ViewWriterException("Failed to write view fields changes for tenantIdAndCentricId:" + tenantIdAndCentricId, x); } } }