String tableId = connector.tableOperations().tableIdMap().get(table); Scanner scanner = connector.createScanner("accumulo.metadata", auths); scanner.fetchColumnFamily(new Text("loc")); Key defaultTabletRow = new Key(tableId + '<'); Key start = new Key(tableId); Key end = defaultTabletRow.followingKey(PartialKey.ROW); scanner.setRange(new Range(start, end)); Iterator<Entry<Key, Value>> iter = scanner.iterator(); if (iter.hasNext()) { location = Optional.of(iter.next().getValue().toString()); Text splitCompareKey = new Text(); key.getRow(splitCompareKey); Text scannedCompareKey = new Text();
/** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); }
private Optional<String> getDefaultTabletLocation(String fulltable) { try { String tableId = connector.tableOperations().tableIdMap().get(fulltable); // Create a scanner over the metadata table, fetching the 'loc' column of the default tablet row Scanner scan = connector.createScanner("accumulo.metadata", connector.securityOperations().getUserAuthorizations(username)); scan.fetchColumnFamily(new Text("loc")); scan.setRange(new Range(tableId + '<')); // scan the entry Optional<String> location = Optional.empty(); for (Entry<Key, Value> entry : scan) { if (location.isPresent()) { throw new PrestoException(FUNCTION_IMPLEMENTATION_ERROR, "Scan for default tablet returned more than one entry"); } location = Optional.of(entry.getValue().toString()); } scan.close(); return location; } catch (Exception e) { // Swallow this exception so the query does not fail due to being unable to locate the tablet server for the default tablet. // This is purely an optimization, but we will want to log the error. LOG.error("Failed to get tablet location, returning dummy location", e); return Optional.empty(); } }
/** * Loads the cardinality for the given Range. Uses a BatchScanner and sums the cardinality for all values that encapsulate the Range. * * @param key Range to get the cardinality for * @return The cardinality of the column, which would be zero if the value does not exist */ @Override public Long load(CacheKey key) throws Exception { LOG.debug("Loading a non-exact range from Accumulo: %s", key); // Get metrics table name and the column family for the scanner String metricsTable = getMetricsTableName(key.getSchema(), key.getTable()); Text columnFamily = new Text(getIndexColumnFamily(key.getFamily().getBytes(UTF_8), key.getQualifier().getBytes(UTF_8)).array()); // Create scanner for querying the range BatchScanner scanner = connector.createBatchScanner(metricsTable, key.auths, 10); scanner.setRanges(connector.tableOperations().splitRangeByTablets(metricsTable, key.range, Integer.MAX_VALUE)); scanner.fetchColumn(columnFamily, CARDINALITY_CQ_AS_TEXT); try { return stream(scanner) .map(Entry::getValue) .map(Value::toString) .mapToLong(Long::parseLong) .sum(); } finally { scanner.close(); } }
private static void writeKeyValues(Connector connector, Multimap<BulkIngestKey,Value> keyValues) throws Exception { final TableOperations tops = connector.tableOperations(); final Set<BulkIngestKey> biKeys = keyValues.keySet(); for (final BulkIngestKey biKey : biKeys) { final String tableName = biKey.getTableName().toString(); if (!tops.exists(tableName)) tops.create(tableName); final BatchWriter writer = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (final Value val : keyValues.get(biKey)) { final Mutation mutation = new Mutation(biKey.getKey().getRow()); mutation.put(biKey.getKey().getColumnFamily(), biKey.getKey().getColumnQualifier(), biKey.getKey().getColumnVisibilityParsed(), val); writer.addMutation(mutation); } writer.close(); } }
scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(new Text(startkey), null)); scanner.addScanIterator(cfg); scanner.fetchColumn(colFam, new Text(field)); final Text cq = new Text(); for (Entry<Key, Value> rowEntry : row.entrySet()) { rowEntry.getKey().getColumnQualifier(cq); rowData.put(cq.toString(), new ByteArrayByteIterator(rowEntry.getValue().get()));
@Test public void testRealWrite() throws Exception { Connector c = getConnector(); c.tableOperations().create(TEST_TABLE); BatchWriter bw = c.createBatchWriter(TEST_TABLE, new BatchWriterConfig()); Mutation m = new Mutation("Key"); m.put("", "", ""); bw.addMutation(m); bw.close(); handleWriteTests(true); }
public void writeAndReadData(Connector connector, String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { // Write some data to the table BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (String s : rows) { Mutation m = new Mutation(new Text(s)); m.put(EMPTY, EMPTY, EMPTY_VALUE); bw.addMutation(m); } bw.close(); // Write the data to disk, read it back connector.tableOperations().flush(tableName, null, null, true); Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY); int i = 0; for (Entry<Key,Value> entry : scanner) { assertEquals("Data read is not data written", rows[i++], entry.getKey().getRow().toString()); } }
/** * Gets a scanner from Accumulo over one row. * * @param row the row to scan * @param fields the set of columns to scan * @return an Accumulo {@link Scanner} bound to the given row and columns */ private Scanner getRow(String table, Text row, Set<String> fields) throws TableNotFoundException { Scanner scanner = connector.createScanner(table, Authorizations.EMPTY); scanner.setRange(new Range(row)); if (fields != null) { for (String field : fields) { scanner.fetchColumn(colFam, new Text(field)); } } return scanner; }
@Override public void run() { try { TreeSet<Text> splits = new TreeSet<>(); splits.add(new Text("X")); conn.tableOperations().addSplits(tableName, splits); } catch (Exception e) { ex.set(e); } } };
private void writeFlush(Connector conn, String tablename, String row) throws Exception { BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig()); Mutation m = new Mutation(row); m.put("", "", ""); bw.addMutation(m); bw.close(); conn.tableOperations().flush(tablename, null, null, true); }
private void writeTestMutation(Connector userC) throws TableNotFoundException, MutationsRejectedException { BatchWriter batchWriter = userC.createBatchWriter(tableName, new BatchWriterConfig()); Mutation m = new Mutation("1"); m.put(new Text("2"), new Text("3"), new Value("".getBytes())); batchWriter.addMutation(m); batchWriter.flush(); batchWriter.close(); }
private void addDuplicateLocation(String table, String tableNameToModify) throws TableNotFoundException, MutationsRejectedException { String tableIdToModify = getConnector().tableOperations().tableIdMap().get(tableNameToModify); Mutation m = new Mutation(new KeyExtent(tableIdToModify, null, null).getMetadataEntry()); m.put(MetadataSchema.TabletsSection.CurrentLocationColumnFamily.NAME, new Text("1234567"), new Value("fake:9005".getBytes(UTF_8))); BatchWriter bw = getConnector().createBatchWriter(table, null); bw.addMutation(m); bw.close(); }
public static Pair<byte[], byte[]> getMinMaxRowIds(Connector connector, AccumuloTable table, Authorizations auths) throws TableNotFoundException { Scanner scanner = connector.createScanner(table.getMetricsTableName(), auths); scanner.setRange(new Range(new Text(Indexer.METRICS_TABLE_ROW_ID.array()))); Text family = new Text(Indexer.METRICS_TABLE_ROWS_CF.array()); Text firstRowQualifier = new Text(Indexer.METRICS_TABLE_FIRST_ROW_CQ.array()); Text lastRowQualifier = new Text(Indexer.METRICS_TABLE_LAST_ROW_CQ.array()); scanner.fetchColumn(family, firstRowQualifier); scanner.fetchColumn(family, lastRowQualifier); byte[] firstRow = null; byte[] lastRow = null; for (Entry<Key, Value> entry : scanner) { if (entry.getKey().compareColumnQualifier(firstRowQualifier) == 0) { firstRow = entry.getValue().get(); } if (entry.getKey().compareColumnQualifier(lastRowQualifier) == 0) { lastRow = entry.getValue().get(); } } scanner.close(); return Pair.of(firstRow, lastRow); }
private void insertDefaultData(Connector c, String tableName) throws Exception { BatchWriter bw = c.createBatchWriter(tableName, new BatchWriterConfig()); Mutation m1 = new Mutation(new Text("row1")); mput(m1, "cf1", "cq1", "BASE", "v1"); mput(m1, "cf1", "cq2", "DEFLABEL", "v2"); mput(m1, "cf1", "cq3", "", "v3"); bw.addMutation(m1); bw.close(); }
protected AccumuloRecordWriter(JobConf job) throws AccumuloException, AccumuloSecurityException, IOException { Level l = getLogLevel(job); if (l != null) log.setLevel(getLogLevel(job)); this.simulate = getSimulationMode(job); this.createTables = canCreateTables(job); if (simulate) log.info("Simulating output only. No writes to tables will occur"); this.bws = new HashMap<>(); String tname = getDefaultTableName(job); this.defaultTableName = (tname == null) ? null : new Text(tname); if (!simulate) { this.conn = getInstance(job).getConnector(getPrincipal(job), getAuthenticationToken(job)); mtbw = conn.createMultiTableBatchWriter(getBatchWriterOptions(job)); } }
boolean isOffline(String tablename, Connector connector) throws TableNotFoundException { String tableId = connector.tableOperations().tableIdMap().get(tablename); Scanner scanner = connector.createScanner(MetadataTable.NAME, Authorizations.EMPTY); scanner.setRange(new Range(new Text(tableId + ";"), new Text(tableId + "<"))); scanner.fetchColumnFamily(TabletsSection.CurrentLocationColumnFamily.NAME); return Iterators.size(scanner.iterator()) == 0; }
tasks.add(executor.submit(() -> { BatchScanner scan = connector.createBatchScanner(indexTable, auths, 10); scan.setRanges(constraintEntry.getValue()); scan.fetchColumnFamily(new Text(Indexer.getIndexColumnFamily(constraintEntry.getKey().getFamily().getBytes(), constraintEntry.getKey().getQualifier().getBytes()).array())); Text tmpQualifier = new Text(); Set<Range> columnRanges = new HashSet<>(); for (Entry<Key, Value> entry : scan) { entry.getKey().getColumnQualifier(tmpQualifier);
Text columnFamily = new Text(getIndexColumnFamily(anyKey.getFamily().getBytes(UTF_8), anyKey.getQualifier().getBytes(UTF_8)).array()); BatchScanner scanner = connector.createBatchScanner(metricsTable, anyKey.getAuths(), 10); try { scanner.setRanges(stream(keys).map(CacheKey::getRange).collect(Collectors.toList())); rangeValues.put(rangeToKey.get(Range.exact(entry.getKey().getRow())), parseLong(entry.getValue().toString()));
@Test public void test() throws Exception { conn.tableOperations().create(tableName); BatchWriter bw = conn.createBatchWriter(tableName, new BatchWriterConfig()); for (Entry<Key,Value> e : data) { Key k = e.getKey(); Mutation m = new Mutation(k.getRow()); m.put(k.getColumnFamily(), k.getColumnQualifier(), new ColumnVisibility(k.getColumnVisibility()), k.getTimestamp(), e.getValue()); bw.addMutation(m); } bw.close(); assertEquals(0, CIFTester.main(tableName, CIFTester.TestMapper.class.getName())); assertEquals(1, assertionErrors.get(tableName).size()); }