generator.writeString(event.collection()); List<SchemaField> fields = event.schema(); for (SchemaField field : fields) { generator.writeFieldName("properties." + field.getName());
public void store(Event event, boolean partitionCheckDone) { GenericRecord record = event.properties(); try (Connection connection = connectionPool.getConnection()) { Schema schema = event.properties().getSchema(); PreparedStatement ps = connection.prepareStatement(getQuery(event.project(), event.collection(), schema)); bindParam(connection, ps, event.schema(), record); ps.executeUpdate(); } catch (SQLException e) { // check_violation -> https://www.postgresql.org/docs/8.2/static/errcodes-appendix.html if (version.getVersion() == PG10 && !partitionCheckDone && "23514".equals(e.getSQLState())) { generateMissingPartitions(event.project(), event.collection(), ImmutableList.of(event), 0); store(event, true); } else { throw new RuntimeException(e); } } catch (Exception e) { throw new RuntimeException(e); } }
Event event = eventsForCollection.get(i); GenericRecord properties = event.properties(); bindParam(connection, ps, lastEvent.schema(), properties); ps.addBatch(); if (i > 0 && i % 5000 == 0) {
@Override public int[] storeBatch(List<Event> events) { for (Map.Entry<String, List<Event>> collection : events.stream().collect(Collectors.groupingBy(e -> e.collection())).entrySet()) { QueryResult join = queryExecutor.executeRawStatement(String.format("INSERT INTO %s.%s.%s (_shard_time, %s) (%s)", config.getColdStorageConnector(), checkProject(events.get(0).project(), '"'), checkCollection(collection.getKey()), collection.getValue().get(0).schema().stream().map(e -> ValidationUtil.checkCollection(e.getName())) .collect(Collectors.joining(", ")), collection.getValue().stream() .map(e -> buildValues(e.properties(), e.schema())) .collect(Collectors.joining(" union all ")))) .getResult().join(); if (join.isFailed()) { try { Thread.sleep(300000); } catch (InterruptedException e) { e.printStackTrace(); } throw new IllegalStateException(join.getError().message); } } return EventStore.SUCCESSFUL_BATCH; }
@Override public void store(Event event) { queryExecutor.executeRawStatement(String.format("INSERT INTO %s.%s.%s VALUES %s", config.getColdStorageConnector(), event.project(), event.collection(), buildValues(event.properties(), event.schema()))); }
@Inject public ClickHouseEventStore(ClickHouseConfig config) { this.config = config; queuedEvents = new ConcurrentHashMap<>(); currentFutureSingle = new ConcurrentHashMap<>(); Executors.newSingleThreadScheduledExecutor().scheduleWithFixedDelay(() -> { try { Iterator<Map.Entry<ProjectCollection, List<Event>>> iterator = queuedEvents.entrySet().iterator(); while (iterator.hasNext()) { Map.Entry<ProjectCollection, List<Event>> next = iterator.next(); List<Event> value = next.getValue(); iterator.remove(); CompletableFuture<Void> remove = currentFutureSingle.remove(next.getKey()); List<SchemaField> schema = value.get(0).schema(); executeRequest(next.getKey(), schema, next.getValue(), remove, false); } } catch (Exception e) { e.printStackTrace(); } }, 1, 1, TimeUnit.SECONDS); }
private ByteBuffer getBuffer(Event event) { SharedByteArrayOutputStream buffer = new SharedByteArrayOutputStream(event.properties().getSchema().getFields().size() * 8); LittleEndianDataOutputStream out = new LittleEndianDataOutputStream(buffer); GenericRecord record = event.properties(); Object time = record.get("_time"); try { int size = event.schema().size(); writeVarInt(size, out); writeValue(time == null ? 0 : ((int) (((long) time) / 86400000)), DATE, out); for (int i = 0; i < size; i++) { writeValue(record.get(i), event.schema().get(i).getType(), out); } } catch (IOException e) { throw Throwables.propagate(e); } return buffer.toByteBuffer(); }