Codota Logo
OffsetRequest.<init>
Code IndexAdd Codota to your IDE (free)

How to use
kafka.javaapi.OffsetRequest
constructor

Best Java code snippets using kafka.javaapi.OffsetRequest.<init> (Showing top 20 results out of 369)

  • Common ways to obtain OffsetRequest
private void myMethod () {
OffsetRequest o =
  • Codota IconMap requestInfo;String clientName;new kafka.javaapi.OffsetRequest(requestInfo, OffsetRequest.CurrentVersion(), clientName)
  • Codota IconMap map;SimpleConsumer simpleConsumer;new kafka.javaapi.OffsetRequest(map, OffsetRequest.CurrentVersion(), simpleConsumer.clientId())
  • Smart code suggestions by Codota
}
origin: alibaba/jstorm

public long getOffset(String topic, int partition, long startOffsetTime) {
  SimpleConsumer simpleConsumer = findLeaderConsumer(partition);
  if (simpleConsumer == null) {
    LOG.error("Error consumer is null get offset from partition:" + partition);
    return -1;
  }
  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
  Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
  requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
  OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), simpleConsumer.clientId());
  long[] offsets = simpleConsumer.getOffsetsBefore(request).offsets(topic, partition);
  if (offsets.length > 0) {
    return offsets[0];
  } else {
    return NO_OFFSET;
  }
}
origin: prestodb/presto

private static long[] findAllOffsets(SimpleConsumer consumer, String topicName, int partitionId)
{
  TopicAndPartition topicAndPartition = new TopicAndPartition(topicName, partitionId);
  // The API implies that this will always return all of the offsets. So it seems a partition can not have
  // more than Integer.MAX_VALUE-1 segments.
  //
  // This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value
  // should not be 0.
  PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), Integer.MAX_VALUE);
  OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
  OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest);
  if (offsetResponse.hasError()) {
    short errorCode = offsetResponse.errorCode(topicName, partitionId);
    throw new RuntimeException("could not fetch data from Kafka, error code is '" + errorCode + "'");
  }
  return offsetResponse.offsets(topicName, partitionId);
}
origin: apache/flink

OffsetResponse response;
while (true) {
  kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(
    partitionToRequestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
  response = consumer.getOffsetsBefore(request);
origin: apache/incubator-gobblin

private long getOffset(KafkaPartition partition, Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo)
  throws KafkaOffsetRetrievalFailureException {
 SimpleConsumer consumer = this.getSimpleConsumer(partition.getLeader().getHostAndPort());
 for (int i = 0; i < this.fetchOffsetRetries; i++) {
  try {
   OffsetResponse offsetResponse =
     consumer.getOffsetsBefore(new OffsetRequest(offsetRequestInfo, kafka.api.OffsetRequest.CurrentVersion(),
       this.clientName));
   if (offsetResponse.hasError()) {
    throw new RuntimeException("offsetReponse has error: "
      + offsetResponse.errorCode(partition.getTopicName(), partition.getId()));
   }
   return offsetResponse.offsets(partition.getTopicName(), partition.getId())[0];
  } catch (Exception e) {
   log.warn(String.format("Fetching offset for partition %s has failed %d time(s). Reason: %s", partition, i + 1,
     e));
   if (i < this.fetchOffsetRetries - 1) {
    try {
     Thread.sleep((long) ((i + Math.random()) * 1000));
    } catch (InterruptedException e2) {
     log.error("Caught interrupted exception between retries of getting latest offsets. " + e2);
    }
   }
  }
 }
 throw new KafkaOffsetRetrievalFailureException(String.format("Fetching offset for partition %s has failed.",
   partition));
}
origin: apache/incubator-gobblin

private long getOffset(KafkaPartition partition,
  Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetRequestInfo)
  throws KafkaOffsetRetrievalFailureException {
 SimpleConsumer consumer = this.getSimpleConsumer(partition.getLeader().getHostAndPort());
 for (int i = 0; i < this.fetchOffsetRetries; i++) {
  try {
   OffsetResponse offsetResponse = consumer.getOffsetsBefore(new OffsetRequest(offsetRequestInfo,
     kafka.api.OffsetRequest.CurrentVersion(), this.clientName));
   if (offsetResponse.hasError()) {
    throw new RuntimeException(
      "offsetReponse has error: " + offsetResponse.errorCode(partition.getTopicName(), partition.getId()));
   }
   return offsetResponse.offsets(partition.getTopicName(), partition.getId())[0];
  } catch (Exception e) {
   LOG.warn(
     String.format("Fetching offset for partition %s has failed %d time(s). Reason: %s", partition, i + 1, e));
   if (i < this.fetchOffsetRetries - 1) {
    try {
     Thread.sleep((long) ((i + Math.random()) * 1000));
    } catch (InterruptedException e2) {
     LOG.error("Caught interrupted exception between retries of getting latest offsets. " + e2);
    }
   }
  }
 }
 throw new KafkaOffsetRetrievalFailureException(
   String.format("Fetching offset for partition %s has failed.", partition));
}
origin: apache/incubator-pinot

OffsetRequest request = new OffsetRequest(Collections.singletonMap(new TopicAndPartition(_topic, _partition),
  new PartitionOffsetRequestInfo(offsetRequestTime, 1)), kafka.api.OffsetRequest.CurrentVersion(), _clientId);
OffsetResponse offsetResponse;
origin: apache/incubator-druid

private long getOffset(boolean earliest) throws InterruptedException
{
 TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partitionId);
 Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
 requestInfo.put(
   topicAndPartition,
   new PartitionOffsetRequestInfo(
     earliest ? kafka.api.OffsetRequest.EarliestTime() : kafka.api.OffsetRequest.LatestTime(), 1
   )
 );
 OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientId);
 OffsetResponse response;
 try {
  response = consumer.getOffsetsBefore(request);
 }
 catch (Exception e) {
  ensureNotInterrupted(e);
  log.error(e, "caught exception in getOffsetsBefore [%s] - [%s]", topic, partitionId);
  return -1;
 }
 if (response.hasError()) {
  log.error(
    "error fetching data Offset from the Broker [%s]. reason: [%s]", leaderBroker.host(),
    response.errorCode(topic, partitionId)
  );
  return -1;
 }
 long[] offsets = response.offsets(topic, partitionId);
 return earliest ? offsets[0] : offsets[offsets.length - 1];
}
origin: pinterest/secor

private long findLastOffset(TopicPartition topicPartition, SimpleConsumer consumer) {
  TopicAndPartition topicAndPartition = new TopicAndPartition(topicPartition.getTopic(),
      topicPartition.getPartition());
  Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo =
      new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
  requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(
      kafka.api.OffsetRequest.LatestTime(), 1));
  final String clientName = getClientName(topicPartition);
  OffsetRequest request = new OffsetRequest(requestInfo,
                       kafka.api.OffsetRequest.CurrentVersion(),
                       clientName);
  OffsetResponse response = consumer.getOffsetsBefore(request);
  if (response.hasError()) {
    throw new RuntimeException("Error fetching offset data. Reason: " +
        response.errorCode(topicPartition.getTopic(), topicPartition.getPartition()));
  }
  long[] offsets = response.offsets(topicPartition.getTopic(),
      topicPartition.getPartition());
  return offsets[0] - 1;
}
origin: linkedin/camus

protected OffsetResponse getLatestOffsetResponse(SimpleConsumer consumer,
  Map<TopicAndPartition, PartitionOffsetRequestInfo> offsetInfo, JobContext context) {
 for (int i = 0; i < NUM_TRIES_FETCH_FROM_LEADER; i++) {
  try {
   OffsetResponse offsetResponse =
     consumer.getOffsetsBefore(new OffsetRequest(offsetInfo, kafka.api.OffsetRequest.CurrentVersion(), CamusJob
       .getKafkaClientName(context)));
   if (offsetResponse.hasError()) {
    throw new RuntimeException("offsetReponse has error.");
   }
   return offsetResponse;
  } catch (Exception e) {
   log.warn("Fetching offset from leader " + consumer.host() + ":" + consumer.port() + " has failed " + (i + 1)
     + " time(s). Reason: " + e.getMessage() + " " + (NUM_TRIES_FETCH_FROM_LEADER - i - 1) + " retries left.");
   if (i < NUM_TRIES_FETCH_FROM_LEADER - 1) {
    try {
     Thread.sleep((long) (Math.random() * (i + 1) * 1000));
    } catch (InterruptedException e1) {
     log.error("Caught interrupted exception between retries of getting latest offsets. " + e1.getMessage());
    }
   }
  }
 }
 return null;
}
origin: uber/chaperone

 private static long getLatestOffset(SimpleConsumer consumer, TopicAndPartition topicAndPartition) {
  Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
  requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
  kafka.javaapi.OffsetRequest request =
    new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
  OffsetResponse response = consumer.getOffsetsBefore(request);

  if (response.hasError()) {
   logger.warn("Failed to fetch offset for {} due to {}", topicAndPartition,
     response.errorCode(topicAndPartition.topic(), topicAndPartition.partition()));
   return -1;
  }

  long[] offsets = response.offsets(topicAndPartition.topic(), topicAndPartition.partition());
  return offsets[0];
 }
}
origin: rakam-io/rakam

private static long[] findAllOffsets(SimpleConsumer consumer, String topicName, int partitionId) {
  TopicAndPartition topicAndPartition = new TopicAndPartition(topicName, partitionId);
  // The API implies that this will always return all of the offsets. So it seems a partition can not have
  // more than Integer.MAX_VALUE-1 segments.
  //
  // This also assumes that the lowest value returned will be the first segment available. So if segments have been dropped off, this value
  // should not be 0.
  PartitionOffsetRequestInfo partitionOffsetRequestInfo = new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 10000);
  OffsetRequest offsetRequest = new OffsetRequest(ImmutableMap.of(topicAndPartition, partitionOffsetRequestInfo), kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
  OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest);
  if (offsetResponse.hasError()) {
    short errorCode = offsetResponse.errorCode(topicName, partitionId);
    LOGGER.warn(format("Offset response has error: %d", errorCode));
    throw new RakamException("could not fetch data from Kafka, error code is '" + errorCode + "'", HttpResponseStatus.INTERNAL_SERVER_ERROR);
  }
  long[] offsets = offsetResponse.offsets(topicName, partitionId);
  return offsets;
}
origin: com.github.hackerwin7/jlib-utils

public static long getMaxOffset(SimpleConsumer consumer, String topic, int partition) throws Exception {
  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
  Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfoMap = new HashMap<>();
  requestInfoMap.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
  kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfoMap, kafka.api.OffsetRequest.CurrentVersion(), "find offset " + System.currentTimeMillis());
  OffsetResponse response = consumer.getOffsetsBefore(request);
  if(response.hasError()) {
    throw new Exception("error fetching data offset , reason = " + response.errorCode(topic, partition));
  }
  long[] offsets = response.offsets(topic, partition);
  return offsets[0];
}
origin: com.github.hackerwin7/jlib-utils

public static long getMinOffset(SimpleConsumer consumer, String topic, int partition) throws Exception {
  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
  Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfoMap = new HashMap<>();
  requestInfoMap.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.EarliestTime(), 1));
  kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfoMap, kafka.api.OffsetRequest.CurrentVersion(), "find offset " + System.currentTimeMillis());
  OffsetResponse response = consumer.getOffsetsBefore(request);
  if(response.hasError()) {
    throw new Exception("error fetching data offset , reason = " + response.errorCode(topic, partition));
  }
  long[] offsets = response.offsets(topic, partition);
  return offsets[0];
}
origin: michal-harish/kafka-hadoop-loader

private long getEarliestOffset() {
  // return kafka.api.OffsetRequest.EarliestTime();
  Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfoMap = new HashMap<>();
  requestInfoMap.put(topicAndPartition, new PartitionOffsetRequestInfo(EARLIEST_TIME, 1));
  OffsetRequest offsetRequest = new OffsetRequest(requestInfoMap, kafka.api.OffsetRequest.CurrentVersion(), CLIENT_ID);
  if (earliestOffset <= 0) {
    OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest);
    earliestOffset = offsetResponse.offsets(topicAndPartition.topic(), topicAndPartition.partition())[0];
  }
  return earliestOffset;
}
origin: wurstmeister/storm-kafka-0.8-plus

public static long getOffset(SimpleConsumer consumer, String topic, int partition, long startOffsetTime) {
  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
  Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<TopicAndPartition, PartitionOffsetRequestInfo>();
  requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(startOffsetTime, 1));
  OffsetRequest request = new OffsetRequest(
      requestInfo, kafka.api.OffsetRequest.CurrentVersion(), consumer.clientId());
  long[] offsets = consumer.getOffsetsBefore(request).offsets(topic, partition);
  if (offsets.length > 0) {
    return offsets[0];
  } else {
    return NO_OFFSET;
  }
}
origin: michal-harish/kafka-hadoop-loader

private long getLatestOffset() {
  // return kafka.api.OffsetRequest.LatestTime();
  Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfoMap = new HashMap<>();
  requestInfoMap.put(topicAndPartition, new PartitionOffsetRequestInfo(LATEST_TIME, 1));
  OffsetRequest offsetRequest = new OffsetRequest(requestInfoMap, kafka.api.OffsetRequest.CurrentVersion(), CLIENT_ID);
  if (latestOffset <= 0) {
    OffsetResponse offsetResponse = consumer.getOffsetsBefore(offsetRequest);
    latestOffset = offsetResponse.offsets(topicAndPartition.topic(), topicAndPartition.partition())[0];
  }
  return latestOffset;
}
origin: apache/eagle

public long getLatestOffset(SimpleConsumer consumer, String topic, int partition, String clientName) {
  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
  Map<TopicAndPartition, kafka.api.PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
  requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
  kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
  OffsetResponse response = consumer.getOffsetsBefore(request);
  if (response.hasError()) {
    throw new RuntimeException("Error fetching data offset from the broker. Reason: " + response.errorCode(topic, partition));
  }
  long[] offsets = response.offsets(topic, partition);
  return offsets[0];
}
origin: apache/eagle

public long getLatestOffset(SimpleConsumer consumer, String topic, int partition, String clientName) {
  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
  Map<TopicAndPartition, kafka.api.PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
  requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
  kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
  OffsetResponse response = consumer.getOffsetsBefore(request);
  if (response.hasError()) {
    throw new RuntimeException("Error fetching data offset from the broker. Reason: " + response.errorCode(topic, partition) );
  }
  long[] offsets = response.offsets(topic, partition);
  return offsets[0];
}
origin: org.apache.eagle/eagle-metric-collection

public long getLatestOffset(SimpleConsumer consumer, String topic, int partition, String clientName) {
  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
  Map<TopicAndPartition, kafka.api.PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
  requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(kafka.api.OffsetRequest.LatestTime(), 1));
  kafka.javaapi.OffsetRequest request = new kafka.javaapi.OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
  OffsetResponse response = consumer.getOffsetsBefore(request);
  if (response.hasError()) {
    throw new RuntimeException("Error fetching data offset from the broker. Reason: " + response.errorCode(topic, partition) );
  }
  long[] offsets = response.offsets(topic, partition);
  return offsets[0];
}
origin: apache/eagle

private long getLastOffset(SimpleConsumer consumer, String topic, int partition, long whichTime, String clientName) {
  TopicAndPartition topicAndPartition = new TopicAndPartition(topic, partition);
  Map<TopicAndPartition, PartitionOffsetRequestInfo> requestInfo = new HashMap<>();
  requestInfo.put(topicAndPartition, new PartitionOffsetRequestInfo(whichTime, 1));
  OffsetRequest request = new OffsetRequest(requestInfo, kafka.api.OffsetRequest.CurrentVersion(), clientName);
  OffsetResponse response = consumer.getOffsetsBefore(request);
  if (response.hasError()) {
    System.out.println("error fetching data offset data the broker. reason: " + response.errorCode(topic, partition));
    return 0;
  }
  long[] offsets = response.offsets(topic, partition);
  return offsets[0];
}
kafka.javaapiOffsetRequest<init>

Popular methods of OffsetRequest

  • underlying

Popular in Java

  • Start an intent from android
  • getResourceAsStream (ClassLoader)
  • onCreateOptionsMenu (Activity)
  • startActivity (Activity)
  • FileOutputStream (java.io)
    A file output stream is an output stream for writing data to aFile or to a FileDescriptor. Whether
  • Proxy (java.net)
    This class represents proxy server settings. A created instance of Proxy stores a type and an addres
  • ZipFile (java.util.zip)
    This class provides random read access to a zip file. You pay more to read the zip file's central di
  • BoxLayout (javax.swing)
  • LogFactory (org.apache.commons.logging)
    A minimal incarnation of Apache Commons Logging's LogFactory API, providing just the common Log look
  • Option (scala)
Codota Logo
  • Products

    Search for Java codeSearch for JavaScript codeEnterprise
  • IDE Plugins

    IntelliJ IDEAWebStormAndroid StudioEclipseVisual Studio CodePyCharmSublime TextPhpStormVimAtomGoLandRubyMineEmacsJupyter
  • Company

    About UsContact UsCareers
  • Resources

    FAQBlogCodota Academy Plugin user guide Terms of usePrivacy policyJava Code IndexJavascript Code Index
Get Codota for your IDE now