public DruidDataSource addSegment(DataSegment dataSegment) { idToSegmentMap.put(dataSegment.getId(), dataSegment); return this; }
/** * Returns true if the segment was added, false if a segment with the same {@link SegmentId} already existed in this * DruidDataSource. */ public boolean addSegmentIfAbsent(DataSegment dataSegment) { return idToSegmentMap.putIfAbsent(dataSegment.getId(), dataSegment) == null; }
@Override public SegmentId getId() { return segment.getId(); }
@Override public String asString() { return StringUtils.format("DROP: %s", segment.getId()); }
@Override public int hashCode() { return getId().hashCode(); }
@Override public boolean isSegmentLoadedByServer(String serverKey, DataSegment segment) { DruidServerHolder holder = servers.get(serverKey); return holder != null && holder.druidServer.getSegment(segment.getId()) != null; }
@Override public boolean isSegmentLoadedByServer(String serverKey, DataSegment segment) { try { DruidServer server = getInventoryValue(serverKey); return server != null && server.getSegment(segment.getId()) != null; } catch (Exception ex) { throw Throwables.propagate(ex); } }
@Override public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) { segmentDropLatches.get(segment.getId()).countDown(); return ServerView.CallbackAction.CONTINUE; }
@Override public ServerView.CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) { segmentAddLathces.get(segment.getId()).countDown(); return ServerView.CallbackAction.CONTINUE; }
@Override public ServerView.CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) { CachingClusteredClient.this.cache.close(segment.getId().toString()); return ServerView.CallbackAction.CONTINUE; } }
@Override public Segment factorize(DataSegment dataSegment, File parentDir) throws SegmentLoadingException { try { return new QueryableIndexSegment(indexIO.loadIndex(parentDir), dataSegment.getId()); } catch (IOException e) { throw new SegmentLoadingException(e, "%s", e.getMessage()); } } }
private void deleteSegmentFromCache(final DataSegment segment) { File segmentInfoCacheFile = new File(infoDir, segment.getId().toString()); if (segmentInfoCacheFile.exists()) { segmentInfoCacheFile.delete(); } Assert.assertTrue(!segmentInfoCacheFile.exists()); }
protected void reduceLifetimes(String tier) { for (BalancerSegmentHolder holder : currentlyMovingSegments.get(tier).values()) { holder.reduceLifetime(); if (holder.getLifetime() <= 0) { log.makeAlert("[%s]: Balancer move segments queue has a segment stuck", tier) .addData("segment", holder.getSegment().getId()) .addData("server", holder.getFromServer().getMetadata()) .emit(); } } }
private boolean segmentExists(final Handle handle, final DataSegment segment) { return !handle .createQuery(StringUtils.format("SELECT id FROM %s WHERE id = :identifier", dbTables.getSegmentsTable())) .bind("identifier", segment.getId().toString()) .map(StringMapper.FIRST) .list() .isEmpty(); }
private void deleteSegment(final Handle handle, final DataSegment segment) { handle.createStatement(StringUtils.format("DELETE from %s WHERE id = :id", dbTables.getSegmentsTable())) .bind("id", segment.getId().toString()) .execute(); }
private void unannounceSegmentForServer(DruidServer druidServer, DataSegment segment) throws Exception { curator .delete() .guaranteed() .forPath(ZKPaths.makePath(inventoryPath, druidServer.getHost(), segment.getId().toString())); }
protected void unannounceSegmentForServer(DruidServer druidServer, DataSegment segment, ZkPathsConfig zkPathsConfig) throws Exception { String path = ZKPaths.makePath( zkPathsConfig.getLiveSegmentsPath(), druidServer.getHost(), segment.getId().toString() ); curator.delete().guaranteed().forPath(path); }
@Override public Integer withHandle(Handle handle) { String request = StringUtils.format( "UPDATE %s SET used = false WHERE id = :id", derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable() ); return handle.createStatement(request).bind("id", segment.getId().toString()).execute(); } }