public List<AtomicIncrementRequest> getIncrements() { List<AtomicIncrementRequest> actions = new ArrayList<AtomicIncrementRequest>(); if (incrementColumn != null) { AtomicIncrementRequest inc = new AtomicIncrementRequest(table, incrementRow, cf, incrementColumn); actions.add(inc); } return actions; }
setTable(table); final GetRequest get = new GetRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (fields != null) { get.qualifiers(getQualifierList(fields)); if (debug) { System.out.println("Doing read from HBase columnfamily " + Bytes.pretty(columnFamilyBytes)); System.out.println("Doing read for key: " + key); final ArrayList<KeyValue> row = client.get(get).join(joinTimeout); if (row == null || row.isEmpty()) { return Status.NOT_FOUND; result.put(new String(column.qualifier()), new ByteArrayByteIterator(column.value())); "Result for field: " + Bytes.pretty(column.qualifier()) + " is: " + Bytes.pretty(column.value()));
final DeleteRequest delete = new DeleteRequest( lastTableBytes, key.getBytes(), columnFamilyBytes); if (!durability) { delete.setDurable(false); delete.setBufferable(false); try { client.delete(delete).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); client.delete(delete);
setTable(table); final Scanner scanner = client.newScanner(lastTableBytes); scanner.setFamily(columnFamilyBytes); scanner.setStartKey(startkey.getBytes(UTF8_CHARSET)); scanner.setQualifiers(getQualifierList(fields)); try { int numResults = 0; while ((rows = scanner.nextRows().join(joinTimeout)) != null) { for (final ArrayList<KeyValue> row : rows) { final HashMap<String, ByteIterator> rowResult = new HashMap<String, ByteIterator>(row.size()); for (final KeyValue column : row) { rowResult.put(new String(column.qualifier()), new ByteArrayByteIterator(column.value())); if (debug) { System.out.println("Got scan result for key: " + Bytes.pretty(column.key())); scanner.close().join(joinTimeout); return Status.OK; } catch (InterruptedException e) {
final Config config; if (configPath == null || configPath.isEmpty()) { config = new Config(); final Iterator<Entry<Object, Object>> iterator = getProperties() .entrySet().iterator(); while (iterator.hasNext()) { final Entry<Object, Object> property = iterator.next(); config.overrideConfig((String)property.getKey(), (String)property.getValue()); config = new Config(configPath); client = new HBaseClient(config); client.ensureTableExists(table).join(joinTimeout); } catch (InterruptedException e1) { Thread.currentThread().interrupt(); System.out.println("Starting meta prefetch for table " + table); client.prefetchMeta(table).join(joinTimeout); if (debug) { System.out.println("Completed meta prefetch for table " + table);
action.setDurable(enableWal); client.put(action).addCallbacks(putSuccessCallback, putFailureCallback); CellIdentifier identifier = new CellIdentifier(increment.key(), increment.qualifier()); AtomicIncrementRequest request = incrementBuffer.get(identifier); incrementBuffer.put(identifier, increment); } else { request.setAmount(request.getAmount() + increment.getAmount()); client.atomicIncrement(increment).addCallbacks( incrementSuccessCallback, incrementFailureCallback); Collection<AtomicIncrementRequest> increments = incrementBuffer.values(); for (AtomicIncrementRequest increment : increments) { client.atomicIncrement(increment).addCallbacks( incrementSuccessCallback, incrementFailureCallback); client.flush(); } catch (Throwable e) { this.handleTransactionFailure(txn);
if (debug) { System.out.println("Adding field/value " + entry.getKey() + "/" + Bytes.pretty(entry.getValue().toArray()) + " to put request"); final PutRequest put = new PutRequest(lastTableBytes, key.getBytes(), columnFamilyBytes, qualifiers, byteValues); if (!durability) { put.setDurable(false); put.setBufferable(false); try { client.put(put).join(joinTimeout); } catch (InterruptedException e) { System.err.println("Thread interrupted"); client.put(put);
.setNameFormat(this.getName() + " HBase Call Pool").build()); logger.info("Callback pool created"); client = new HBaseClient(asyncClientConfig, new NioClientSocketChannelFactory(sinkCallbackPool, sinkCallbackPool)); client.ensureTableFamilyExists( tableName.getBytes(Charsets.UTF_8), columnFamily).addCallbacks( new Callback<Object, Object>() { open = true; client.setFlushInterval((short) 0); return client;
@Override public void cleanup() throws DBException { synchronized (MUTEX) { --threadCount; if (client != null && threadCount < 1) { try { if (debug) { System.out.println("Shutting down client"); } client.shutdown().joinUninterruptibly(joinTimeout); } catch (Exception e) { System.err.println("Failed to shutdown the AsyncHBase client " + "properly: " + e.getMessage()); } client = null; } } }
asyncClientConfig = new Config(); asyncClientConfig.overrideConfig( HBaseSinkConfigurationConstants.ASYNC_ZK_QUORUM_KEY, zkQuorum ); asyncClientConfig.overrideConfig( HBaseSinkConfigurationConstants.ASYNC_ZK_BASEPATH_KEY, zkBaseDir ); for (String property: asyncProperties.keySet()) { asyncClientConfig.overrideConfig(property, asyncProperties.get(property));
@Override public List<PutRequest> getActions() { List<PutRequest> actions = new ArrayList<PutRequest>(); if (payloadColumn != null) { byte[] rowKey; try { switch (keyType) { case TS: rowKey = SimpleRowKeyGenerator.getTimestampKey(rowPrefix); break; case TSNANO: rowKey = SimpleRowKeyGenerator.getNanoTimestampKey(rowPrefix); break; case RANDOM: rowKey = SimpleRowKeyGenerator.getRandomKey(rowPrefix); break; default: rowKey = SimpleRowKeyGenerator.getUUIDKey(rowPrefix); break; } PutRequest putRequest = new PutRequest(table, rowKey, cf, payloadColumn, payload); actions.add(putRequest); } catch (Exception e) { throw new FlumeException("Could not get row key!", e); } } return actions; }
public RowLock call(final Object response) { if (response instanceof Long) { return new RowLock(request.getRegion().name(), (Long) response); } else { throw new InvalidResponseException(Long.class, response); } } public String toString() {
public Object call(final Object arg) { return openScanner(scanner, scanner.getOpenRequestForReverseScan( ((RegionLocation) arg).startKey())); } },
/** * Deletes data from HBase. * @param request The {@code delete} request. * @return A deferred object that indicates the completion of the request. * The {@link Object} has not special meaning and can be {@code null} * (think of it as {@code Deferred<Void>}). But you probably want to attach * at least an errback to this {@code Deferred} to handle failures. */ public Deferred<Object> delete(final DeleteRequest request) { num_deletes.increment(); return sendRpcToRegion(request); }
@Override Object deserialize(final ChannelBuffer buf, int cell_size) { HBaseRpc.ensureNoCell(cell_size); final MutateResponse resp = readProtobuf(buf, MutateResponse.PARSER); return null; }
/** * Specifies from which row key to start scanning (inclusive). * @param start_key The row key to start scanning from. If you don't invoke * this method, scanning will begin from the first row key in the table. * <strong>This byte array will NOT be copied.</strong> * @throws IllegalStateException if scanning already started. */ public void setStartKey(final byte[] start_key) { KeyValue.checkKey(start_key); checkScanningNotStarted(); this.start_key = start_key; }
@Override Object deserialize(final ChannelBuffer buf, int cell_size) { HBaseRpc.ensureNoCell(cell_size); final MutateResponse resp = readProtobuf(buf, MutateResponse.PARSER); return null; }
/** * Constructor for UTF-8 prefix strings. */ public MultipleColumnPrefixFilter(final String[] prefixes) { this.prefixes = new byte[prefixes.length][]; for (int i = 0; i < prefixes.length; i++) { this.prefixes[i] = Bytes.UTF8(prefixes[i]); } this.prefixesLength = estimatePrefixesLength(); }
private void shutdownHBaseClient() { logger.info("Shutting down HBase Client"); final CountDownLatch waiter = new CountDownLatch(1); try { client.shutdown().addCallback(new Callback<Object, Object>() { @Override public Object call(Object arg) throws Exception { waiter.countDown(); return null; } }).addErrback(new Callback<Object, Object>() { @Override public Object call(Object arg) throws Exception { logger.error("Failed to shutdown HBase client cleanly! HBase cluster might be down"); waiter.countDown(); return null; } }); if (!waiter.await(timeout, TimeUnit.NANOSECONDS)) { logger.error("HBase connection could not be closed within timeout! HBase cluster might " + "be down!"); } } catch (Exception ex) { logger.warn("Error while attempting to close connections to HBase"); } finally { // Dereference the client to force GC to clear up any buffered requests. client = null; } }