/** {@inheritDoc} */ @Override public K key() { assert key != null : "Entry is being improperly processed."; return key.value(null, false); }
/** {@inheritDoc} */ @Override public int partition() { assert key != null; return key.partition(); }
/** * @param ctx Cache object context. * @return Marshalled size. * @throws IgniteCheckedException If failed. */ public int marshalledSize(CacheObjectContext ctx) throws IgniteCheckedException { int size = 0; if (val != null) size += val.valueBytes(ctx).length; size += key.valueBytes(ctx).length; return SIZE_OVERHEAD + size; }
addr += row.key().putValue(addr); addr += (2 + mvccInfoSize + cacheIdSize + row.key().valueBytesLength(null));
/** {@inheritDoc} */ @Override public int size() throws IgniteCheckedException { int len = key().valueBytesLength(null); len += value().valueBytesLength(null) + CacheVersionIO.size(version(), false) + 8; return len + (cacheId() != 0 ? 4 : 0); }
/** {@inheritDoc} */ @Override public int memorySize() throws IgniteCheckedException { byte[] kb; byte[] vb = null; int extrasSize; lockEntry(); try { key.prepareMarshal(cctx.cacheObjectContext()); kb = key.valueBytes(cctx.cacheObjectContext()); if (val != null) { val.prepareMarshal(cctx.cacheObjectContext()); vb = val.valueBytes(cctx.cacheObjectContext()); } extrasSize = extrasSize(); } finally { unlockEntry(); } return SIZE_OVERHEAD + extrasSize + kb.length + (vb == null ? 1 : vb.length); }
/** * @param ctx Context. * @param ldr Class loader. * @throws IgniteCheckedException If failed. */ public void finishUnmarshal(GridCacheContext ctx, ClassLoader ldr) throws IgniteCheckedException { assert key != null; key.finishUnmarshal(ctx.cacheObjectContext(), ldr); }
/** * @param buf Buffer to write to. * @param entry Data entry. */ void putPlainDataEntry(ByteBuffer buf, DataEntry entry) throws IgniteCheckedException { buf.putInt(entry.cacheId()); if (!entry.key().putValue(buf)) throw new AssertionError(); if (entry.value() == null) buf.putInt(-1); else if (!entry.value().putValue(buf)) throw new AssertionError(); buf.put((byte)entry.op().ordinal()); putVersion(buf, entry.nearXidVersion(), true); putVersion(buf, entry.writeVersion(), false); buf.putInt(entry.partitionId()); buf.putLong(entry.partitionCounter()); buf.putLong(entry.expireTime()); }
/** {@inheritDoc} */ @Override public int hashCode() { int res = key.hashCode(); res = 31 * res + cacheId; return res; }
CacheDataRow row = it.nextX(); partHash += row.key().hashCode(); row.key().valueBytes(grpCtx.cacheObjectContext()), row.version(), valHash));
grpCtx.cacheObjectContext(), entryHashRecord.key().cacheObjectType(), entryHashRecord.keyBytes());
addr += row.key().putValue(addr); addr += (2 + mvccInfoSize + cacheIdSize + row.key().valueBytesLength(null));
/** {@inheritDoc} */ @Override protected void writeFragmentData(CacheDataRow row, ByteBuffer buf, int rowOff, int payloadSize) throws IgniteCheckedException { final int keySize = row.key().valueBytesLength(null); final int valSize = row.value().valueBytesLength(null); int written = writeFragment(row, buf, rowOff, payloadSize, MVCC_INFO, keySize, valSize); written += writeFragment(row, buf, rowOff + written, payloadSize - written, CACHE_ID, keySize, valSize); written += writeFragment(row, buf, rowOff + written, payloadSize - written, KEY, keySize, valSize); written += writeFragment(row, buf, rowOff + written, payloadSize - written, EXPIRE_TIME, keySize, valSize); written += writeFragment(row, buf, rowOff + written, payloadSize - written, VALUE, keySize, valSize); written += writeFragment(row, buf, rowOff + written, payloadSize - written, VERSION, keySize, valSize); assert written == payloadSize; }
/** {@inheritDoc} */ @Override public KeyCacheObject toCacheKeyObject(CacheObjectContext ctx, @Nullable GridCacheContext cctx, Object obj, boolean userObj) { if (!ctx.binaryEnabled()) return super.toCacheKeyObject(ctx, cctx, obj, userObj); if (obj instanceof KeyCacheObject) { KeyCacheObject key = (KeyCacheObject)obj; if (key instanceof BinaryObjectImpl) { // Need to create a copy because the key can be reused at the application layer after that (IGNITE-3505). key = key.copy(partition(ctx, cctx, key)); } else if (key.partition() == -1) // Assume others KeyCacheObjects can not be reused for another cache. key.partition(partition(ctx, cctx, key)); return key; } obj = toBinary(obj, false); if (obj instanceof BinaryObjectImpl) { ((BinaryObjectImpl)obj).partition(partition(ctx, cctx, obj)); return (KeyCacheObject)obj; } return toCacheKeyObject0(ctx, cctx, obj, userObj); }
/** * @param ctx Context. * @throws IgniteCheckedException If failed. */ public void unmarshal(CacheObjectContext ctx) throws IgniteCheckedException { assert key != null; key.finishUnmarshal(ctx, null); if (val != null) val.finishUnmarshal(ctx, null); }
/** * @throws Exception If failed. */ @Test public void testPrimitiveValues() throws Exception { IgniteEx ignite = grid(0); IgniteCache<Object, Object> cache = ignite.cache(CACHE_NAME); for (int i = 0; i < 10; i++) cache.put(i, String.valueOf(i)); IgniteCacheObjectProcessor co = ignite.context().cacheObjects(); GridCacheAdapter<Object, Object> iCache = ignite.context().cache().internalCache(CACHE_NAME); GridCacheContext<Object, Object> cacheCtx = iCache.context(); CacheObjectContext coCtx = cacheCtx.cacheObjectContext(); ByteBuffer buf = ByteBuffer.allocate(2048); for (int i = 0; i < 10; i++) { KeyCacheObject key = co.toCacheKeyObject(coCtx, cacheCtx, i, false); GridCacheEntryEx entry = iCache.peekEx(key); assertNotNull(entry); assertTrue(entry.key().putValue(buf)); assertTrue(entry.valueBytes().putValue(buf)); } buf.flip(); for (int i = 0; i < 10; i++) { CacheObject co1 = co.toCacheObject(coCtx, buf); assertEquals((Integer)i, co1.value(coCtx, false)); CacheObject co2 = co.toCacheObject(coCtx, buf); assertEquals(String.valueOf(i), co2.value(coCtx, false)); } }
/** * Check whether this worker owns particular key. * * @param key Key. * @return {@code True} in case this worker should process this key. */ protected boolean owns(KeyCacheObject key) { assert key != null; // Avoid hash code and remainder calculation in case there is no actual split. return totalCnt == 1 || key.hashCode() % totalCnt == id; }
grpCtx.cacheObjectContext(), entryHashRecord.key().cacheObjectType(), entryHashRecord.keyBytes());
/** * @param cpy Copy flag. * @return Key value. */ protected Object keyValue(boolean cpy) { return key.value(cctx.cacheObjectContext(), cpy); }