Refine search
@Override public int execute(final String fullCommand, final CommandLine cl, final Shell shellState) throws AccumuloException, AccumuloSecurityException, TableNotFoundException, IOException, ConstraintViolationException { shellState.checkTableState(); final Mutation m = new Mutation(new Text(cl.getArgs()[0].getBytes(Shell.CHARSET))); final Text colf = new Text(cl.getArgs()[1].getBytes(Shell.CHARSET)); final Text colq = new Text(cl.getArgs()[2].getBytes(Shell.CHARSET)); if (cl.hasOption(deleteOptAuths.getOpt())) { final ColumnVisibility le = new ColumnVisibility(cl.getOptionValue(deleteOptAuths.getOpt())); if (cl.hasOption(timestampOpt.getOpt())) { m.putDelete(colf, colq, le, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt()))); } else { m.putDelete(colf, colq, le); } } else if (cl.hasOption(timestampOpt.getOpt())) { m.putDelete(colf, colq, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt()))); } else { m.putDelete(colf, colq); } final BatchWriter bw = shellState.getAccumuloClient().createBatchWriter( shellState.getTableName(), new BatchWriterConfig().setMaxMemory(Math.max(m.estimatedMemoryUsed(), 1024)) .setMaxWriteThreads(1).setTimeout(getTimeout(cl), TimeUnit.MILLISECONDS)); bw.addMutation(m); bw.close(); return 0; }
/** * Creates a BatchWriter with the expected configuration. * * @param table The table to write to */ private BatchWriter createBatchWriter(String table) throws TableNotFoundException { BatchWriterConfig bwc = new BatchWriterConfig(); bwc.setMaxLatency( Long.parseLong(getProperties() .getProperty("accumulo.batchWriterMaxLatency", "30000")), TimeUnit.MILLISECONDS); bwc.setMaxMemory(Long.parseLong( getProperties().getProperty("accumulo.batchWriterSize", "100000"))); final String numThreadsValue = getProperties().getProperty("accumulo.batchWriterThreads"); // Try to saturate the client machine. int numThreads = Math.max(1, Runtime.getRuntime().availableProcessors() / 2); if (null != numThreadsValue) { numThreads = Integer.parseInt(numThreadsValue); } System.err.println("Using " + numThreads + " threads to write data"); bwc.setMaxWriteThreads(numThreads); return connector.createBatchWriter(table, bwc); }
/** * Merge this BatchWriterConfig with another. If config is set in both, preference will be given * to this config. * * @param other * Another BatchWriterConfig * @return Merged BatchWriterConfig * @since 2.0.0 */ public BatchWriterConfig merge(BatchWriterConfig other) { BatchWriterConfig result = new BatchWriterConfig(); result.maxMemory = merge(this.maxMemory, other.maxMemory); result.maxLatency = merge(this.maxLatency, other.maxLatency); result.timeout = merge(this.timeout, other.timeout); result.maxWriteThreads = merge(this.maxWriteThreads, other.maxWriteThreads); if (this.isDurabilitySet) { result.durability = this.durability; } else if (other.isDurabilitySet) { result.durability = other.durability; } return result; }
private void resetWriter() { BatchWriter writer = null; try { writer = accumuloClient.createBatchWriter(tableName, new BatchWriterConfig().setMaxLatency(BATCH_WRITER_MAX_LATENCY, TimeUnit.SECONDS)); } catch (Exception ex) { log.warn("Unable to create a batch writer, will retry. Set log level to" + " DEBUG to see stacktrace. cause: " + ex); log.debug("batch writer creation failed with exception.", ex); } finally { /* Trade in the new writer (even if null) for the one we need to close. */ writer = this.writer.getAndSet(writer); try { if (writer != null) { writer.close(); } } catch (Exception ex) { log.warn( "Problem closing batch writer. Set log level to DEBUG to see stacktrace. cause: " + ex); log.debug("batch writer close failed with exception", ex); } } }
private void fillTable(Connector conn, String table) throws Exception { BatchWriter bw = conn.createBatchWriter(table, new BatchWriterConfig()); for (String row : ROWS) { Mutation m = new Mutation(row); m.put("cf", "cq", "value"); bw.addMutation(m); } bw.close(); } }
VolumeManager fs = master.getFileSystem(); mbw = master.getContext().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) { String oldName = new Path(key.getColumnQualifier().toString()).getName(); String newName = fileNameMappings.get(oldName); cq = new Text(bulkDir + "/" + newName); } else { cq = key.getColumnQualifier(); m = new Mutation(metadataRow); TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8))); if (!currentRow.equals(metadataRow)) { mbw.addMutation(m); m.put(key.getColumnFamily(), cq, val); mbw.addMutation(m); break; // its the last column in the last row mbw.close();
public void writeAndReadData(Connector connector, String tableName) throws AccumuloException, AccumuloSecurityException, TableNotFoundException { // Write some data to the table BatchWriter bw = connector.createBatchWriter(tableName, new BatchWriterConfig()); for (String s : rows) { Mutation m = new Mutation(new Text(s)); m.put(EMPTY, EMPTY, EMPTY_VALUE); bw.addMutation(m); } bw.close(); // Write the data to disk, read it back connector.tableOperations().flush(tableName, null, null, true); Scanner scanner = connector.createScanner(tableName, Authorizations.EMPTY); int i = 0; for (Entry<Key,Value> entry : scanner) { assertEquals("Data read is not data written", rows[i++], entry.getKey().getRow().toString()); } }
shellState.checkTableState(); final Mutation m = new Mutation(new Text(cl.getArgs()[0].getBytes(Shell.CHARSET))); final Text colf = new Text(cl.getArgs()[1].getBytes(Shell.CHARSET)); m.put(colf, colq, le, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt())), val); else m.put(colf, colq, le, val); final BatchWriterConfig cfg = new BatchWriterConfig() .setMaxMemory(Math.max(m.estimatedMemoryUsed(), 1024)).setMaxWriteThreads(1) .setTimeout(getTimeout(cl), TimeUnit.MILLISECONDS); if (cl.hasOption(durabilityOption.getOpt())) { String userDurability = cl.getOptionValue(durabilityOption.getOpt()); switch (userDurability) { case "sync": cfg.setDurability(Durability.SYNC); break; case "flush": cfg.setDurability(Durability.FLUSH); break; case "none": cfg.setDurability(Durability.NONE); break; case "log": cfg.setDurability(Durability.NONE); break; default:
public static void removeBulkLoadEntries(Connector conn, String tableId, long tid) throws Exception { Scanner mscanner = new IsolatedScanner(conn.createScanner(Constants.METADATA_TABLE_NAME, Constants.NO_AUTHS)); mscanner.setRange(new KeyExtent(new Text(tableId), null, null).toMetadataRange()); mscanner.fetchColumnFamily(Constants.METADATA_BULKFILE_COLUMN_FAMILY); BatchWriter bw = conn.createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig()); for (Entry<Key,Value> entry : mscanner) { log.debug("Looking at entry " + entry + " with tid " + tid); if (Long.parseLong(entry.getValue().toString()) == tid) { log.debug("deleting entry " + entry); Mutation m = new Mutation(entry.getKey().getRow()); m.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier()); bw.addMutation(m); } } bw.close(); }
long recordsClosed = 0; try { bw = client.createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); bs = client.createBatchScanner(MetadataTable.NAME, Authorizations.EMPTY, 4); bs.setRanges(Collections.singleton(Range.prefix(ReplicationSection.getRowPrefix()))); bs.fetchColumnFamily(ReplicationSection.COLF); Text replFileText = new Text(); for (Entry<Key,Value> entry : bs) { Status status; String replFile = replFileText.toString(); boolean isClosed = closedWals.contains(replFile); bw.close(); } catch (MutationsRejectedException e) { log.error("Failed to write delete mutations for replication table", e);
try (Scanner ms = new ScannerImpl(context, MetadataTable.ID, Authorizations.EMPTY); BatchWriter bw = new BatchWriterImpl(context, MetadataTable.ID, new BatchWriterConfig().setMaxMemory(1000000) .setMaxLatency(120000L, TimeUnit.MILLISECONDS).setMaxWriteThreads(2))) { Key key = cell.getKey(); if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) { FileRef ref = new FileRef(context.getVolumeManager(), key); bw.addMutation(createDeleteMutation(context, tableId, ref.meta().toString())); bw.addMutation(createDeleteMutation(context, tableId, cell.getValue().toString())); bw.flush(); m = new Mutation(key.getRow()); if (lock != null) putLockID(context, lock, m); if (key.getRow().compareTo(m.getRow(), 0, m.getRow().length) != 0) { bw.addMutation(m); m = new Mutation(key.getRow());
Text start = range.getPrevEndRow(); if (start == null) { start = new Text(); AccumuloClient client = this.master.getContext(); bw = client.createBatchWriter(targetSystemTable, new BatchWriterConfig()); Scanner scanner = client.createScanner(targetSystemTable, Authorizations.EMPTY); scanner.setRange(scanRange); maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, value.toString()); } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) { bw.addMutation(MetadataTableUtil.createDeleteMutation(master.getContext(), range.getTableId(), entry.getValue().toString())); TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(maxLogicalTime.getBytes())); if (!m.getUpdates().isEmpty()) { bw.addMutation(m); bw.flush();
private void writeFlush(Connector conn, String tablename, String row) throws Exception { BatchWriter bw = conn.createBatchWriter(tablename, new BatchWriterConfig()); Mutation m = new Mutation(row); m.put("", "", ""); bw.addMutation(m); bw.close(); conn.tableOperations().flush(tablename, null, null, true); }
@Test public void testMerge() throws Exception { Connector conn = getConnector(); String tableName = getUniqueNames(1)[0]; conn.tableOperations().create(tableName); BatchWriter bw1 = conn.createBatchWriter(tableName, new BatchWriterConfig()); bw1.addMutation(createTablet("0", "m", null, "/d1", "/d1/file1")); bw1.addMutation(createTablet("0", null, "m", "/d2", "/d2/file2")); bw1.flush(); BatchWriter bw2 = conn.createBatchWriter(tableName, new BatchWriterConfig()); MetadataTableUtil.initializeClone(tableName, "0", "1", conn, bw2); bw1.addMutation(deleteTablet("0", "m", null, "/d1", "/d1/file1")); Mutation mut = createTablet("0", null, null, "/d2", "/d2/file2"); mut.put(DataFileColumnFamily.NAME.toString(), "/d1/file1", new DataFileValue(10, 200).encodeAsString()); bw1.addMutation(mut); bw1.flush(); try { MetadataTableUtil.checkClone(tableName, "0", "1", conn, bw2); assertTrue(false); } catch (TabletIterator.TabletDeletedException tde) {} }
Text start = extent.getPrevEndRow(); if (start == null) { start = new Text(); BatchWriter bw = client.createBatchWriter(targetSystemTable, new BatchWriterConfig()); try { deleteTablets(info, deleteRange, bw, client); } finally { bw.close(); bw = client.createBatchWriter(targetSystemTable, new BatchWriterConfig()); try { Mutation m = new Mutation(followingTablet.getMetadataEntry()); TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.put(m, KeyExtent.encodePrevEndRow(extent.getPrevEndRow())); ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m); bw.addMutation(m); bw.flush(); } finally { bw.close();
scanner.setRange(new Range(row)); for (Entry<Key,Value> entry : scanner) { if (entry.getKey().getColumnFamily().equals(CurrentLocationColumnFamily.NAME)) { assigned.put(entry.getKey(), entry.getValue()); } else if (entry.getKey().getColumnFamily().equals(FutureLocationColumnFamily.NAME)) { future.put(entry.getKey(), entry.getValue()); Master.log.info("Removing entry {}", entry); BatchWriter bw = this.master.getContext().createBatchWriter(table, new BatchWriterConfig()); Mutation m = new Mutation(entry.getKey().getRow()); m.putDelete(entry.getKey().getColumnFamily(), entry.getKey().getColumnQualifier()); bw.addMutation(m); bw.close(); return;
throw new IOException("Error connecting to Accumulo for entity index output", e); final BatchWriterConfig batchWriterConfig = new BatchWriterConfig(); batchWriterConfig.setMaxMemory(RdfCloudTripleStoreConstants.MAX_MEMORY); batchWriterConfig.setTimeout(RdfCloudTripleStoreConstants.MAX_TIME, TimeUnit.MILLISECONDS); batchWriterConfig.setMaxWriteThreads(RdfCloudTripleStoreConstants.NUM_THREADS); writer = conn.createMultiTableBatchWriter(batchWriterConfig); entityIndexer.setMultiTableBatchWriter(writer);
/** * Because there is only one active Master, and thus one active StatusMaker, the only safe time * that we can issue the delete for a Status which is closed is immediately after writing it to * the replication table. * <p> * If we try to defer and delete these entries in another thread/process, we will have no * assurance that the Status message was propagated to the replication table. It is easiest, in * terms of concurrency, to do this all in one step. * * @param k * The Key to delete */ protected void deleteStatusRecord(Key k) { log.debug("Deleting {} from metadata table as it's no longer needed", k.toStringNoTruncate()); if (metadataWriter == null) { try { metadataWriter = client.createBatchWriter(sourceTableName, new BatchWriterConfig()); } catch (TableNotFoundException e) { throw new RuntimeException("Metadata table doesn't exist"); } } try { Mutation m = new Mutation(k.getRow()); m.putDelete(k.getColumnFamily(), k.getColumnQualifier()); metadataWriter.addMutation(m); metadataWriter.flush(); } catch (MutationsRejectedException e) { log.warn("Failed to delete status mutations for metadata table, will retry", e); } }
BatchWriterConfig bwConfig = new BatchWriterConfig(); bwConfig.setMaxMemory(memoryInBytes); try { bw = context.createBatchWriter(tableName, bwConfig); Mutation copy = new Mutation(orig.getRow()); for (ColumnUpdate update : orig.getUpdates()) { long timestamp; bw.addMutations(mutationsCopy); } catch (MutationsRejectedException e) { log.error("Could not apply mutations to {}", tableName); if (bw != null) { try { bw.close(); } catch (MutationsRejectedException e) { log.error("Could not apply mutations to {}", tableName);
conn.tableOperations().create(table.getFullTableName()); conn.tableOperations().create(table.getIndexTableName()); conn.tableOperations().create(table.getMetricsTableName()); Indexer indexer = new Indexer(conn, new Authorizations(), table, new BatchWriterConfig()); indexer.index(m1); indexer.flush();