private List<OrcSplit> generateSplitsFromPpd(SplitInfos ppdResult) throws IOException { OffsetAndLength current = new OffsetAndLength(); List<OrcSplit> splits = new ArrayList<>(ppdResult.getInfosCount()); int lastIdx = -1; for (Metastore.SplitInfo si : ppdResult.getInfosList()) { int index = si.getIndex(); if (lastIdx >= 0 && lastIdx + 1 != index && current.offset != -1) { // Create split for the previous unfinished stripe. splits.add(createSplit(current.offset, current.length, orcTail)); current.offset = -1; } lastIdx = index; String debugStr = null; if (LOG.isDebugEnabled()) { debugStr = current.toString(); } current = generateOrUpdateSplit(splits, current, si.getOffset(), si.getLength(), null); if (LOG.isDebugEnabled()) { LOG.debug("Updated split from {" + index + ": " + si.getOffset() + ", " + si.getLength() + "} and "+ debugStr + " to " + current); } } generateLastSplit(splits, current, null); return splits; }
/** * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code> */ public org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder addInfosBuilder( int index) { return getInfosFieldBuilder().addBuilder( index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance()); } /**
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, offset_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt64(2, length_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt32(3, index_); } getUnknownFields().writeTo(output); }
public org.apache.hadoop.hive.metastore.Metastore.SplitInfo buildPartial() { org.apache.hadoop.hive.metastore.Metastore.SplitInfo result = new org.apache.hadoop.hive.metastore.Metastore.SplitInfo(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.offset_ = offset_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.length_ = length_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.index_ = index_; result.bitField0_ = to_bitField0_; onBuilt(); return result; }
public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(1, offset_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(2, length_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(3, index_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; }
public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasOffset()) { memoizedIsInitialized = 0; return false; } if (!hasLength()) { memoizedIsInitialized = 0; return false; } if (!hasIndex()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; }
public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(org.apache.hadoop.hive.metastore.Metastore.SplitInfo prototype) {
public Builder toBuilder() { return newBuilder(this); }
public org.apache.hadoop.hive.metastore.Metastore.SplitInfo getDefaultInstanceForType() { return org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance(); }
public static Builder newBuilder(org.apache.hadoop.hive.metastore.Metastore.SplitInfo prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); }
public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); if (((bitField0_ & 0x00000001) == 0x00000001)) { output.writeInt64(1, offset_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { output.writeInt64(2, length_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { output.writeInt32(3, index_); } getUnknownFields().writeTo(output); }
public Builder mergeFrom(org.apache.hadoop.hive.metastore.Metastore.SplitInfo other) { if (other == org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance()) return this; if (other.hasOffset()) { setOffset(other.getOffset()); } if (other.hasLength()) { setLength(other.getLength()); } if (other.hasIndex()) { setIndex(other.getIndex()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
/** * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code> */ public org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder addInfosBuilder( int index) { return getInfosFieldBuilder().addBuilder( index, org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance()); } /**
/** * <code>repeated .org.apache.hadoop.hive.metastore.SplitInfo infos = 1;</code> */ public org.apache.hadoop.hive.metastore.Metastore.SplitInfo.Builder addInfosBuilder() { return getInfosFieldBuilder().addBuilder( org.apache.hadoop.hive.metastore.Metastore.SplitInfo.getDefaultInstance()); } /**
@Override public SplitInfos applySargToMetadata( SearchArgument sarg, ByteBuffer fileMetadata) throws IOException { // TODO: ideally we should store shortened representation of only the necessary fields // in HBase; it will probably require custom SARG application code. OrcTail orcTail = ReaderImpl.extractFileTail(fileMetadata); OrcProto.Footer footer = orcTail.getFooter(); int stripeCount = footer.getStripesCount(); boolean[] result = OrcInputFormat.pickStripesViaTranslatedSarg( sarg, orcTail.getWriterVersion(), footer.getTypesList(), orcTail.getStripeStatistics(), stripeCount); // For ORC case, send the boundaries of the stripes so we don't have to send the footer. SplitInfos.Builder sb = SplitInfos.newBuilder(); List<StripeInformation> stripes = orcTail.getStripes(); boolean isEliminated = true; for (int i = 0; i < result.length; ++i) { if (result != null && !result[i]) continue; isEliminated = false; StripeInformation si = stripes.get(i); if (LOG.isDebugEnabled()) { LOG.debug("PPD is adding a split " + i + ": " + si.getOffset() + ", " + si.getLength()); } sb.addInfos(SplitInfo.newBuilder().setIndex(i) .setOffset(si.getOffset()).setLength(si.getLength())); } return isEliminated ? null : sb.build(); }
@Override public SplitInfos applySargToMetadata( SearchArgument sarg, ByteBuffer fileMetadata) throws IOException { // TODO: ideally we should store shortened representation of only the necessary fields // in HBase; it will probably require custom SARG application code. OrcTail orcTail = ReaderImpl.extractFileTail(fileMetadata); OrcProto.Footer footer = orcTail.getFooter(); int stripeCount = footer.getStripesCount(); boolean[] result = OrcInputFormat.pickStripesViaTranslatedSarg( sarg, orcTail.getWriterVersion(), footer.getTypesList(), orcTail.getStripeStatistics(), stripeCount); // For ORC case, send the boundaries of the stripes so we don't have to send the footer. SplitInfos.Builder sb = SplitInfos.newBuilder(); List<StripeInformation> stripes = orcTail.getStripes(); boolean isEliminated = true; for (int i = 0; i < result.length; ++i) { if (result != null && !result[i]) continue; isEliminated = false; StripeInformation si = stripes.get(i); if (LOG.isDebugEnabled()) { LOG.debug("PPD is adding a split " + i + ": " + si.getOffset() + ", " + si.getLength()); } sb.addInfos(SplitInfo.newBuilder().setIndex(i) .setOffset(si.getOffset()).setLength(si.getLength())); } return isEliminated ? null : sb.build(); }
private List<OrcSplit> generateSplitsFromPpd(SplitInfos ppdResult) throws IOException { OffsetAndLength current = new OffsetAndLength(); List<OrcSplit> splits = new ArrayList<>(ppdResult.getInfosCount()); int lastIdx = -1; for (Metastore.SplitInfo si : ppdResult.getInfosList()) { int index = si.getIndex(); if (lastIdx >= 0 && lastIdx + 1 != index && current.offset != -1) { // Create split for the previous unfinished stripe. splits.add(createSplit(current.offset, current.length, orcTail)); current.offset = -1; } lastIdx = index; String debugStr = null; if (LOG.isDebugEnabled()) { debugStr = current.toString(); } current = generateOrUpdateSplit(splits, current, si.getOffset(), si.getLength(), null); if (LOG.isDebugEnabled()) { LOG.debug("Updated split from {" + index + ": " + si.getOffset() + ", " + si.getLength() + "} and "+ debugStr + " to " + current); } } generateLastSplit(splits, current, null); return splits; }
public org.apache.hadoop.hive.metastore.Metastore.SplitInfo buildPartial() { org.apache.hadoop.hive.metastore.Metastore.SplitInfo result = new org.apache.hadoop.hive.metastore.Metastore.SplitInfo(this); int from_bitField0_ = bitField0_; int to_bitField0_ = 0; if (((from_bitField0_ & 0x00000001) == 0x00000001)) { to_bitField0_ |= 0x00000001; } result.offset_ = offset_; if (((from_bitField0_ & 0x00000002) == 0x00000002)) { to_bitField0_ |= 0x00000002; } result.length_ = length_; if (((from_bitField0_ & 0x00000004) == 0x00000004)) { to_bitField0_ |= 0x00000004; } result.index_ = index_; result.bitField0_ = to_bitField0_; onBuilt(); return result; }
public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(1, offset_); } if (((bitField0_ & 0x00000002) == 0x00000002)) { size += com.google.protobuf.CodedOutputStream .computeInt64Size(2, length_); } if (((bitField0_ & 0x00000004) == 0x00000004)) { size += com.google.protobuf.CodedOutputStream .computeInt32Size(3, index_); } size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size; }
public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; if (!hasOffset()) { memoizedIsInitialized = 0; return false; } if (!hasLength()) { memoizedIsInitialized = 0; return false; } if (!hasIndex()) { memoizedIsInitialized = 0; return false; } memoizedIsInitialized = 1; return true; }