private static Pair<SubjectType, Subject> setupSubject() { AccessControlContext context = AccessController.getContext(); Subject subject = Subject.getSubject(context); if (subject != null) { if (!subject.getPrincipals(KerberosPrincipal.class).isEmpty()) { LOG.debug("Using caller-provided subject with Kerberos principal {}. " + "Caller is responsible for refreshing credentials.", SecurityUtil.getKerberosPrincipalOrNull(subject)); return new Pair<>(SubjectType.PROVIDED, subject); } LOG.debug("Caller-provided subject {} does not have any Kerberos credentials. " + "Ignoring it.", subject.toString()); } subject = SecurityUtil.getSubjectFromTicketCacheOrNull(); if (subject != null) { return new Pair<>(SubjectType.CREATED, subject); } // If we weren't able to login from a ticket cache when we create the client, // we shouldn't later pick one up. return new Pair<>(SubjectType.NONE, null); }
SecurityContext() { try { Pair<SubjectType, Subject> p = setupSubject(); this.subjectType = p.getFirst(); this.subject = p.getSecond(); this.realUser = System.getProperty("user.name"); this.sslContextWithCert = SSLContext.getInstance("TLS"); sslContextWithCert.init(null, new TrustManager[] { trustManager }, null); this.sslContextTrustAny = SSLContext.getInstance("TLS"); sslContextTrustAny.init(null, new TrustManager[] { new TrustAnyCert() }, null); } catch (Exception e) { throw new RuntimeException(e); } }
/** Removes all partition key ranges through the provided exclusive upper bound. */ public void removePartitionKeyRange(byte[] upperBound) { if (upperBound.length == 0) { rangePartitions.clear(); return; } while (!rangePartitions.isEmpty()) { Pair<byte[], byte[]> range = rangePartitions.getFirst(); if (Bytes.memcmp(upperBound, range.getFirst()) <= 0) { break; } rangePartitions.removeFirst(); if (range.getSecond().length == 0 || Bytes.memcmp(upperBound, range.getSecond()) < 0) { // The upper bound falls in the middle of this range, so add it back // with the restricted bounds. rangePartitions.addFirst(new Pair<>(upperBound, range.getSecond())); break; } } }
/** @return the inclusive lower bound partition key of the next tablet to scan. */ public byte[] nextPartitionKey() { return rangePartitions.getFirst().getFirst(); }
partitionKeyRanges.add(new Pair<>(ByteVec.create(), ByteVec.create())); bucket = hashBuckets.nextSetBit(bucket + 1)) { int bucketUpper = isLast ? bucket + 1 : bucket; ByteVec lower = partitionKeyRange.getFirst().clone(); ByteVec upper = partitionKeyRange.getFirst().clone(); KeyEncoder.encodeHashBucket(bucket, lower); KeyEncoder.encodeHashBucket(bucketUpper, upper); newPartitionKeyRanges.add(new Pair<>(lower, upper)); range.getFirst().append(rangeLowerBound); range.getSecond().append(rangeUpperBound); new ArrayDeque<>(partitionKeyRanges.size()); for (Pair<ByteVec, ByteVec> range : partitionKeyRanges) { byte[] lower = range.getFirst().toArray(); byte[] upper = range.getSecond().toArray(); partitionKeyRangeBytes.add(new Pair<>(lower, upper));
/** * @param partition to prune * @return {@code true} if the partition should be pruned */ boolean shouldPruneForTests(Partition partition) { // The C++ version uses binary search to do this with fewer key comparisons, // but the algorithm isn't easily translatable, so this just uses a linear // search. for (Pair<byte[], byte[]> range : rangePartitions) { // Continue searching the list of ranges if the partition is greater than // the current range. if (range.getSecond().length > 0 && Bytes.memcmp(range.getSecond(), partition.getPartitionKeyStart()) <= 0) { continue; } // If the current range is greater than the partitions, // then the partition should be pruned. return partition.getPartitionKeyEnd().length > 0 && Bytes.memcmp(partition.getPartitionKeyEnd(), range.getFirst()) <= 0; } // The partition is greater than all ranges. return true; }
/** * Decodes a partition key into a list of hash buckets and range key * * @param schema the schema of the table * @param partitionSchema the partition schema of the table * @param key the encoded partition key * @return the decoded buckets and range key */ public static Pair<List<Integer>, PartialRow> decodePartitionKey(Schema schema, PartitionSchema partitionSchema, byte[] key) { ByteBuffer buf = ByteBuffer.wrap(key); buf.order(ByteOrder.BIG_ENDIAN); List<Integer> buckets = new ArrayList<>(); for (HashBucketSchema hashSchema : partitionSchema.getHashBucketSchemas()) { if (buf.hasRemaining()) { buckets.add(buf.getInt()); } else { buckets.add(0); } } return new Pair<>(buckets, decodeRangePartitionKey(schema, partitionSchema, buf)); }
List<Integer> hashBuckets = lower.getFirst(); if (!hashBuckets.isEmpty()) { sb.append("hash-partition-buckets: "); if (lowerBound.length > 4 * hashBuckets.size()) { sb.append('('); lower.getSecond().appendDebugString(idxs, sb); sb.append(')'); } else { if (upperBound.length > 4 * hashBuckets.size()) { sb.append('('); upper.getSecond().appendDebugString(idxs, sb); sb.append(')'); } else {
@Override protected void map(NullWritable key, RowResult value, Mapper.Context context) throws IOException, InterruptedException { // Add as many heads as we need, then we skip the rest. do { if (headsCache.size() < numUpdatesPerMapper) { value = (RowResult)context.getCurrentValue(); headsCache.add(new Pair<Long, Long>(value.getLong(0), value.getLong(1))); } } while (context.nextKeyValue()); // At this point we've exhausted the scanner and hopefully gathered all the linked list // heads we needed. LOG.info("Processing " + headsCache.size() + " linked lists, out of " + numUpdatesPerMapper); processAllHeads(context); }
@Override Pair<PingResponse, Object> deserialize(CallResponse callResponse, String tsUUID) throws KuduException { final Master.PingResponsePB.Builder respBuilder = Master.PingResponsePB.newBuilder(); readProtobuf(callResponse.getPBMessage(), respBuilder); PingResponse response = new PingResponse(deadlineTracker.getElapsedMillis(), tsUUID); return new Pair<>(response, null); } }
if (decoded.getSecond() instanceof Tserver.TabletServerErrorPB) { Tserver.TabletServerErrorPB error = (Tserver.TabletServerErrorPB) decoded.getSecond(); exception = dispatchTSError(client, connection, rpc, error, traceBuilder); if (exception == null) { } else if (decoded.getSecond() instanceof Master.MasterErrorPB) { Master.MasterErrorPB error = (Master.MasterErrorPB) decoded.getSecond(); exception = dispatchMasterError(client, connection, rpc, error, traceBuilder); if (exception == null) { Preconditions.checkState(!(decoded.getFirst() instanceof Exception)); if (client.isStatisticsEnabled()) { rpc.updateStatistics(client.getStatistics(), decoded.getFirst()); rpc.callback(decoded.getFirst()); } else { if (client.isStatisticsEnabled()) {
Pair<byte[], byte[]> partitionRange = pruner.nextPartitionKeyRange(); List<LocatedTablet> newTablets = table.getTabletsLocations( partitionRange.getFirst().length == 0 ? null : partitionRange.getFirst(), partitionRange.getSecond().length == 0 ? null : partitionRange.getSecond(), timeout); pruner.removePartitionKeyRange(partitionRange.getSecond()); } else { pruner.removePartitionKeyRange(newTablets.get(newTablets.size() - 1)
@Override Pair<Void, Object> deserialize(final CallResponse callResponse, String tsUUID) throws KuduException { ScannerKeepAliveResponsePB.Builder builder = ScannerKeepAliveResponsePB.newBuilder(); readProtobuf(callResponse.getPBMessage(), builder); ScannerKeepAliveResponsePB resp = builder.build(); TabletServerErrorPB error = resp.hasError() ? resp.getError() : null; return new Pair<Void, Object>(null, error); } }
/** * Creates a new table with two int columns, c0 and c1. c0 is the primary key. * The table is hash partitioned on c0 into two buckets, and range partitioned * with the provided bounds. */ private KuduTable createTable(List<Pair<Integer, Integer>> bounds) throws KuduException { // Create initial table with single range partition covering the entire key // space, and two hash buckets. ArrayList<ColumnSchema> columns = new ArrayList<>(1); columns.add(new ColumnSchema.ColumnSchemaBuilder("c0", Type.INT32) .nullable(false) .key(true) .build()); columns.add(new ColumnSchema.ColumnSchemaBuilder("c1", Type.INT32) .nullable(false) .build()); Schema schema = new Schema(columns); CreateTableOptions createOptions = new CreateTableOptions().setRangePartitionColumns(ImmutableList.of("c0")) .setNumReplicas(1) .addHashPartitions(ImmutableList.of("c0"), 2); for (Pair<Integer, Integer> bound : bounds) { PartialRow lower = schema.newPartialRow(); PartialRow upper = schema.newPartialRow(); lower.addInt("c0", bound.getFirst()); upper.addInt("c0", bound.getSecond()); createOptions.addRangePartition(lower, upper); } return client.createTable(tableName, schema, createOptions); }
private Pair<ConnectToMasterResponsePB, Object> deserializeNewRpc( CallResponse callResponse, String tsUUID) { final ConnectToMasterResponsePB.Builder respBuilder = ConnectToMasterResponsePB.newBuilder(); readProtobuf(callResponse.getPBMessage(), respBuilder); return new Pair<ConnectToMasterResponsePB, Object>( respBuilder.build(), respBuilder.hasError() ? respBuilder.getError() : null); }
@Override Pair<Master.GetTableLocationsResponsePB, Object> deserialize( final CallResponse callResponse, String tsUUID) throws KuduException { Master.GetTableLocationsResponsePB.Builder builder = Master.GetTableLocationsResponsePB .newBuilder(); readProtobuf(callResponse.getPBMessage(), builder); Master.GetTableLocationsResponsePB resp = builder.build(); return new Pair<Master.GetTableLocationsResponsePB, Object>( resp, builder.hasError() ? builder.getError() : null); }
@Override Pair<DeleteTableResponse, Object> deserialize(CallResponse callResponse, String tsUUID) throws KuduException { final Master.DeleteTableResponsePB.Builder builder = Master.DeleteTableResponsePB.newBuilder(); readProtobuf(callResponse.getPBMessage(), builder); DeleteTableResponse response = new DeleteTableResponse(deadlineTracker.getElapsedMillis(), tsUUID); return new Pair<DeleteTableResponse, Object>( response, builder.hasError() ? builder.getError() : null); } }
@Override Pair<ListTablesResponse, Object> deserialize(CallResponse callResponse, String tsUUID) throws KuduException { final Master.ListTablesResponsePB.Builder respBuilder = Master.ListTablesResponsePB.newBuilder(); readProtobuf(callResponse.getPBMessage(), respBuilder); int serversCount = respBuilder.getTablesCount(); List<String> tables = new ArrayList<String>(serversCount); for (Master.ListTablesResponsePB.TableInfo info : respBuilder.getTablesList()) { tables.add(info.getName()); } ListTablesResponse response = new ListTablesResponse(deadlineTracker.getElapsedMillis(), tsUUID, tables); return new Pair<ListTablesResponse, Object>( response, respBuilder.hasError() ? respBuilder.getError() : null); } }