@Override public HColumnFamily<K, N> next() { if ( !hasNext() ) { throw new NoSuchElementException("No more rows left on this HColumnFamily"); } rowIndex++; K key = _keys.get(rowIndex); applyToRow(key, rows.get(keySerializer.toByteBuffer(key))); return this; }
private void doExecuteMultigetSlice() { keyspace.doExecuteOperation(new Operation<Column>(OperationType.READ) { @Override public Column execute(Cassandra.Client cassandra) throws HectorException { try { if ( queryLogger.isDebugEnabled() ) { queryLogger.debug("---------\nColumnFamily multiget: {} slicePredicate: {}", columnFamilyName, activeSlicePredicate.toString()); } rows = cassandra.multiget_slice(keySerializer.toBytesList(_keys), columnParent, activeSlicePredicate.toThrift(), ThriftConverter.consistencyLevel(consistencyLevelPolicy.get(operationType))); applyResultStatus(execTime, getCassandraHost()); if ( queryLogger.isDebugEnabled() ) { queryLogger.debug("Execution took {} microseconds on host {}\n----------", lastExecutionTime, lastHostUsed); } } catch (Exception e) { throw exceptionsTranslator.translate(e); } hasValues = true; return null; } }); applyToRow(_keys.get(0), rows.get(keySerializer.toByteBuffer(_keys.get(0)))); }
@Override public HColumnFamily<K, N> next() { if ( !hasNext() ) { throw new NoSuchElementException("No more rows left on this HColumnFamily"); } rowIndex++; K key = _keys.get(rowIndex); applyToRow(key, rows.get(keySerializer.toByteBuffer(key))); return this; }
@Override public HColumnFamily<K, N> next() { if ( !hasNext() ) { throw new NoSuchElementException("No more rows left on this HColumnFamily"); } rowIndex++; K key = _keys.get(rowIndex); applyToRow(key, rows.get(keySerializer.toByteBuffer(key))); return this; }
private void doExecuteMultigetSlice() { keyspace.doExecuteOperation(new Operation<Column>(OperationType.READ) { @Override public Column execute(Cassandra.Client cassandra) throws HectorException { try { if ( queryLogger.isDebugEnabled() ) { queryLogger.debug("---------\nColumnFamily multiget: {} slicePredicate: {}", columnFamilyName, activeSlicePredicate.toString()); } rows = cassandra.multiget_slice(keySerializer.toBytesList(_keys), columnParent, activeSlicePredicate.toThrift(), ThriftConverter.consistencyLevel(consistencyLevelPolicy.get(operationType))); applyResultStatus(execTime, getCassandraHost()); if ( queryLogger.isDebugEnabled() ) { queryLogger.debug("Execution took {} microseconds on host {}\n----------", lastExecutionTime, lastHostUsed); } } catch (Exception e) { throw exceptionsTranslator.translate(e); } hasValues = true; return null; } }); applyToRow(_keys.get(0), rows.get(keySerializer.toByteBuffer(_keys.get(0)))); }
private void doExecuteMultigetSlice() { keyspace.doExecuteOperation(new Operation<Column>(OperationType.READ) { @Override public Column execute(Cassandra.Client cassandra) throws HectorException { try { if ( queryLogger.isDebugEnabled() ) { queryLogger.debug("---------\nColumnFamily multiget: {} slicePredicate: {}", columnFamilyName, activeSlicePredicate.toString()); } rows = cassandra.multiget_slice(keySerializer.toBytesList(_keys), columnParent, activeSlicePredicate.toThrift(), ThriftConverter.consistencyLevel(consistencyLevelPolicy.get(operationType))); applyResultStatus(execTime, getCassandraHost()); if ( queryLogger.isDebugEnabled() ) { queryLogger.debug("Execution took {} microseconds on host {}\n----------", lastExecutionTime, lastHostUsed); } } catch (Exception e) { throw exceptionsTranslator.translate(e); } hasValues = true; return null; } }); applyToRow(_keys.get(0), rows.get(keySerializer.toByteBuffer(_keys.get(0)))); }