private Collection<Mutation> getMetricsMutations() { ImmutableList.Builder<Mutation> mutationBuilder = ImmutableList.builder(); // Mapping of column value to column to number of row IDs that contain that value for (Entry<MetricsKey, AtomicLong> entry : metrics.entrySet()) { // Row ID: Column value // Family: columnfamily_columnqualifier // Qualifier: CARDINALITY_CQ // Visibility: Inherited from indexed Mutation // Value: Cardinality Mutation mut = new Mutation(entry.getKey().row.array()); mut.put(entry.getKey().family.array(), CARDINALITY_CQ, entry.getKey().visibility, ENCODER.encode(entry.getValue().get())); // Add to our list of mutations mutationBuilder.add(mut); } // If the first row and last row are both not null, // which would really be for a brand new table that has zero rows and no indexed elements... // Talk about your edge cases! if (firstRow != null && lastRow != null) { // Add a some columns to the special metrics table row ID for the first/last row. // Note that if the values on the server side are greater/lesser, // the configured iterator will take care of this at scan/compaction time Mutation firstLastMutation = new Mutation(METRICS_TABLE_ROW_ID.array()); firstLastMutation.put(METRICS_TABLE_ROWS_CF.array(), METRICS_TABLE_FIRST_ROW_CQ.array(), firstRow); firstLastMutation.put(METRICS_TABLE_ROWS_CF.array(), METRICS_TABLE_LAST_ROW_CQ.array(), lastRow); mutationBuilder.add(firstLastMutation); } return mutationBuilder.build(); }
private void addIndexMutation(ByteBuffer row, ByteBuffer family, ColumnVisibility visibility, byte[] qualifier) { // Create the mutation and add it to the batch writer Mutation indexMutation = new Mutation(row.array()); indexMutation.put(family.array(), qualifier, visibility, EMPTY_BYTES); try { indexWriter.addMutation(indexMutation); } catch (MutationsRejectedException e) { throw new PrestoException(UNEXPECTED_ACCUMULO_ERROR, "Index mutation rejected by server", e); } // Increment the cardinality metrics for this value of index // metrics is a mapping of row ID to column family MetricsKey key = new MetricsKey(row, family, visibility); AtomicLong count = metrics.get(key); if (count == null) { count = new AtomicLong(0); metrics.put(key, count); } count.incrementAndGet(); }
private static void moveDeleteEntry(ServerContext context, KeyExtent oldExtent, Entry<Key,Value> entry, String rowID, String prefix) { String filename = rowID.substring(prefix.length()); // add the new entry first log.info("Moving {} marker in {}", filename, RootTable.NAME); Mutation m = new Mutation(MetadataSchema.DeletesSection.getRowPrefix() + filename); m.put(EMPTY_BYTES, EMPTY_BYTES, EMPTY_BYTES); update(context, m, RootTable.EXTENT); // then remove the old entry m = new Mutation(entry.getKey().getRow()); m.putDelete(EMPTY_BYTES, EMPTY_BYTES); update(context, m, oldExtent); }
shellState.checkTableState(); final Mutation m = new Mutation(new Text(cl.getArgs()[0].getBytes(Shell.CHARSET))); final Text colf = new Text(cl.getArgs()[1].getBytes(Shell.CHARSET)); final Text colq = new Text(cl.getArgs()[2].getBytes(Shell.CHARSET)); m.put(colf, colq, le, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt())), val); else m.put(colf, colq, le, val); } else if (cl.hasOption(timestampOpt.getOpt())) m.put(colf, colq, Long.parseLong(cl.getOptionValue(timestampOpt.getOpt())), val); else m.put(colf, colq, val); final BatchWriterConfig cfg = new BatchWriterConfig() .setMaxMemory(Math.max(m.estimatedMemoryUsed(), 1024)).setMaxWriteThreads(1) .setTimeout(getTimeout(cl), TimeUnit.MILLISECONDS); bw.addMutation(m); try { bw.close(); } catch (MutationsRejectedException e) { final ArrayList<String> lines = new ArrayList<>();
BatchWriterConfig bwConfig = new BatchWriterConfig(); bwConfig.setMaxMemory(memoryInBytes); try { Mutation copy = new Mutation(orig.getRow()); for (ColumnUpdate update : orig.getUpdates()) { long timestamp; new ColumnVisibility(update.getColumnVisibility()), timestamp); } else { copy.put(update.getColumnFamily(), update.getColumnQualifier(), new ColumnVisibility(update.getColumnVisibility()), timestamp, update.getValue()); if (bw != null) { try { bw.close(); } catch (MutationsRejectedException e) { log.error("Could not apply mutations to {}", tableName);
Mutation m = new Mutation(cloneTablet.getExtent().getMetadataEntry()); bw.addMutation(m); bw.addMutation(createCloneMutation(srcTableId, tableId, st.getKeyValues())); Mutation m = new Mutation(cloneTablet.getExtent().getMetadataEntry()); m.put(ClonedColumnFamily.NAME, new Text(""), new Value("OK".getBytes(UTF_8))); bw.addMutation(m);
VolumeManager fs = master.getFileSystem(); mbw = master.getContext().createBatchWriter(MetadataTable.NAME, new BatchWriterConfig()); m = new Mutation(metadataRow); TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8))); mbw.addMutation(m); m = new Mutation(metadataRow); TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(absolutePath.getBytes(UTF_8))); m.put(key.getColumnFamily(), cq, val); mbw.addMutation(m); break; // its the last column in the last row mbw.close();
@Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; }
@BeforeClass public void setupClass() { AccumuloColumnHandle c1 = new AccumuloColumnHandle("id", Optional.empty(), Optional.empty(), VARCHAR, 0, "", false); AccumuloColumnHandle c2 = new AccumuloColumnHandle("age", Optional.of("cf"), Optional.of("age"), BIGINT, 1, "", true); AccumuloColumnHandle c3 = new AccumuloColumnHandle("firstname", Optional.of("cf"), Optional.of("firstname"), VARCHAR, 2, "", true); AccumuloColumnHandle c4 = new AccumuloColumnHandle("arr", Optional.of("cf"), Optional.of("arr"), new ArrayType(VARCHAR), 3, "", true); table = new AccumuloTable("default", "index_test_table", ImmutableList.of(c1, c2, c3, c4), "id", true, LexicoderRowSerializer.class.getCanonicalName(), null); m1 = new Mutation(M1_ROWID); m1.put(CF, AGE, AGE_VALUE); m1.put(CF, FIRSTNAME, M1_FNAME_VALUE); m1.put(CF, SENDERS, M1_ARR_VALUE); m2 = new Mutation(M2_ROWID); m2.put(CF, AGE, AGE_VALUE); m2.put(CF, FIRSTNAME, M2_FNAME_VALUE); m2.put(CF, SENDERS, M2_ARR_VALUE); ColumnVisibility visibility1 = new ColumnVisibility("private"); ColumnVisibility visibility2 = new ColumnVisibility("moreprivate"); m1v = new Mutation(M1_ROWID); m1v.put(CF, AGE, visibility1, AGE_VALUE); m1v.put(CF, FIRSTNAME, visibility1, M1_FNAME_VALUE); m1v.put(CF, SENDERS, visibility2, M1_ARR_VALUE); m2v = new Mutation(M2_ROWID); m2v.put(CF, AGE, visibility1, AGE_VALUE); m2v.put(CF, FIRSTNAME, visibility2, M2_FNAME_VALUE); m2v.put(CF, SENDERS, visibility2, M2_ARR_VALUE); }
public static void finishSplit(Text metadataEntry, Map<FileRef,DataFileValue> datafileSizes, List<FileRef> highDatafilesToRemove, final ServerContext context, ZooLock zooLock) { Mutation m = new Mutation(metadataEntry); TabletsSection.TabletColumnFamily.SPLIT_RATIO_COLUMN.putDelete(m); TabletsSection.TabletColumnFamily.OLD_PREV_ROW_COLUMN.putDelete(m); ChoppedColumnFamily.CHOPPED_COLUMN.putDelete(m); for (Entry<FileRef,DataFileValue> entry : datafileSizes.entrySet()) { m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode())); } for (FileRef pathToRemove : highDatafilesToRemove) { m.putDelete(DataFileColumnFamily.NAME, pathToRemove.meta()); } update(context, zooLock, m, new KeyExtent(metadataEntry, (Text) null)); }
Mutation m = new Mutation(key.getRow()); + Path.SEPARATOR + ServerConstants.TABLE_DIR + Path.SEPARATOR + tableId + Path.SEPARATOR + directory; m.put(key.getColumnFamily(), key.getColumnQualifier(), new Value(newLocation.getBytes(UTF_8))); if (log.isTraceEnabled()) { log.trace("Replacing {} with {}", oldLocation, newLocation); writer.addMutation(m); pool.submit(() -> { try { count++; writer.close(); pool.shutdown(); while (!pool.isTerminated()) {
@Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; }
public static void updateTabletVolumes(KeyExtent extent, List<LogEntry> logsToRemove, List<LogEntry> logsToAdd, List<FileRef> filesToRemove, SortedMap<FileRef,DataFileValue> filesToAdd, String newDir, ZooLock zooLock, ServerContext context) { if (extent.isRootTablet()) { if (newDir != null) throw new IllegalArgumentException("newDir not expected for " + extent); if (filesToRemove.size() != 0 || filesToAdd.size() != 0) throw new IllegalArgumentException("files not expected for " + extent); // add before removing in case of process death for (LogEntry logEntry : logsToAdd) addRootLogEntry(context, zooLock, logEntry); removeUnusedWALEntries(context, extent, logsToRemove, zooLock); } else { Mutation m = new Mutation(extent.getMetadataEntry()); for (LogEntry logEntry : logsToRemove) m.putDelete(logEntry.getColumnFamily(), logEntry.getColumnQualifier()); for (LogEntry logEntry : logsToAdd) m.put(logEntry.getColumnFamily(), logEntry.getColumnQualifier(), logEntry.getValue()); for (FileRef fileRef : filesToRemove) m.putDelete(DataFileColumnFamily.NAME, fileRef.meta()); for (Entry<FileRef,DataFileValue> entry : filesToAdd.entrySet()) m.put(DataFileColumnFamily.NAME, entry.getKey().meta(), new Value(entry.getValue().encode())); if (newDir != null) ServerColumnFamily.DIRECTORY_COLUMN.put(m, new Value(newDir.getBytes(UTF_8))); update(context, m, extent); } }
try { for (TabletLocationState tls : tablets) { Mutation m = new Mutation(tls.extent.getMetadataEntry()); if (tls.current != null) { tls.current.clearLocation(m); LogEntry entry = new LogEntry(tls.extent, 0, tls.current.hostPort(), log.toString()); m.put(entry.getColumnFamily(), entry.getColumnQualifier(), entry.getValue()); tls.future.clearFutureLocation(m); writer.addMutation(m); } finally { try { writer.close(); } catch (MutationsRejectedException e) { throw new DistributedStoreException(e);
@Override public Status update(String table, String key, Map<String, ByteIterator> values) { BatchWriter bw = null; try { bw = getWriter(table); } catch (TableNotFoundException e) { System.err.println("Error opening batch writer to Accumulo table " + table); e.printStackTrace(); return Status.ERROR; } Mutation mutInsert = new Mutation(key.getBytes(UTF_8)); for (Map.Entry<String, ByteIterator> entry : values.entrySet()) { mutInsert.put(colFamBytes, entry.getKey().getBytes(UTF_8), entry.getValue().toArray()); } try { bw.addMutation(mutInsert); } catch (MutationsRejectedException e) { System.err.println("Error performing update."); e.printStackTrace(); return Status.ERROR; } return Status.BATCHED_OK; }
protected void deserializeData(AccumuloRowSerializer serializer, byte[] data) { Mutation m = new Mutation("row"); m.put(b("a"), b("a"), data); Key key = new Key(b("row"), b("a"), b("b"), b(), 0, false); Value value = new Value(data); serializer.setMapping(COLUMN_NAME, "a", "b"); serializer.deserialize(new SimpleImmutableEntry<>(key, value)); }
String address, ZooLock zooLock, Set<String> unusedWalLogs, TServerInstance lastLocation, long flushId) { Mutation m = new Mutation(extent.getMetadataEntry()); m.put(DataFileColumnFamily.NAME, path.meta(), new Value(dfv.encode())); TabletsSection.ServerColumnFamily.TIME_COLUMN.put(m, new Value(time.getBytes(UTF_8))); m.put(ScanFileColumnFamily.NAME, scanFile.meta(), new Value(new byte[0]));
AccumuloClient client = this.master.getContext(); bw = client.createBatchWriter(targetSystemTable, new BatchWriterConfig()); Scanner scanner = client.createScanner(targetSystemTable, Authorizations.EMPTY); scanner.setRange(scanRange); Value value = entry.getValue(); if (key.getColumnFamily().equals(DataFileColumnFamily.NAME)) { m.put(key.getColumnFamily(), key.getColumnQualifier(), value); fileCount++; } else if (TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.hasColumns(key) maxLogicalTime = TabletTime.maxMetadataTime(maxLogicalTime, value.toString()); } else if (TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.hasColumns(key)) { bw.addMutation(MetadataTableUtil.createDeleteMutation(master.getContext(), range.getTableId(), entry.getValue().toString())); bw.addMutation(m); Mutation updatePrevRow = stop.getPrevRowUpdateMutation(); Master.log.debug("Setting the prevRow for last tablet: {}", stop); bw.addMutation(updatePrevRow); bw.flush(); m = new Mutation(stopRow);
/** * Create a status record in the replication table */ protected boolean addStatusRecord(Text file, Table.ID tableId, Value v) { try { Mutation m = new Mutation(file); m.put(StatusSection.NAME, new Text(tableId.getUtf8()), v); try { replicationWriter.addMutation(m); } catch (MutationsRejectedException e) { log.warn("Failed to write work mutations for replication, will retry", e); return false; } } finally { try { replicationWriter.flush(); } catch (MutationsRejectedException e) { log.warn("Failed to write work mutations for replication, will retry", e); return false; } } return true; }