/** * @param cacheName Cache name. * @return Streamer for given cache. */ public IgniteDataStreamer<?, ?> streamerForCache(String cacheName) { synchronized (mux) { if (streamers == null) return null; IgniteDataStreamer<?, ?> res = streamers.get(cacheName); if (res != null) return res; res = ctx.grid().dataStreamer(cacheName); res.autoFlushFrequency(streamFlushTimeout); res.allowOverwrite(streamAllowOverwrite); if (streamNodeBufSize > 0) res.perNodeBufferSize(streamNodeBufSize); if (streamNodeParOps > 0) res.perNodeParallelOperations(streamNodeParOps); streamers.put(cacheName, res); return res; } }
/** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { stopAllGrids(); cleanPersistenceDir(); List<Integer> list = IntStream.range(0, SERVERS).boxed().collect(Collectors.toList()); Collections.shuffle(list); for (Integer i : list) startGrid(i); isClient = true; client = startGrid(CLIENT_IDX); try (IgniteDataStreamer<Integer, Integer> dataStreamer = client.dataStreamer(DEFAULT_CACHE_NAME)) { dataStreamer.allowOverwrite(true); for (int i = 0; i < CACHE_ENTRIES_CNT; i++) dataStreamer.addData(i, i); } }
/** * @param name Cache name. */ private void fillCache(String name) { try(IgniteDataStreamer<Integer, Value> streamer = grid().dataStreamer(name)) { for (int i = 0; i < ENTRIES; ++i) streamer.addData(i, new Value(i)); } }
/** * @param ignite Ignite. * @param keysCnt Keys count. */ private void loadData(IgniteEx ignite, int keysCnt, int multiplier) { log.info("Load data: keys=" + keysCnt); try (IgniteDataStreamer streamer = ignite.dataStreamer(DEFAULT_CACHE_NAME)) { streamer.allowOverwrite(true); for (int k = 0; k < keysCnt; k++) streamer.addData(k, k * multiplier); } }
/** * @throws Exception If failed. */ private void fillCaches() throws Exception { grid(0).createCache(CACHE_NAME); try ( IgniteDataStreamer<Integer, String> streamer = grid(0).dataStreamer(CACHE_NAME)) { for (int i = 0; i < CACHE_SIZE; ++i) streamer.addData(i, "Data " + i); } awaitPartitionMapExchange(); }
/** * @throws Exception If failed. */ protected void fillCaches() throws Exception { grid(0).createCache(Organization.class.getSimpleName()); grid(0).createCache(Person.class.getSimpleName()); createCacheWithAffinity(OTHER_CACHE_NAME); awaitPartitionMapExchange(); orgIds = new ArrayList<>(ORGS_COUNT_PER_NODE * RESTARTED_NODE_CNT); for (int i = GRID_CNT - RESTARTED_NODE_CNT; i < GRID_CNT; ++i) orgIds.addAll(primaryKeys(grid(i).cache(Organization.class.getSimpleName()), ORGS_COUNT_PER_NODE)); try ( IgniteDataStreamer<Integer, Organization> orgStreamer = grid(0).dataStreamer(Organization.class.getSimpleName()); IgniteDataStreamer<Person.Key, Person> persStreamer = grid(0).dataStreamer(Person.class.getSimpleName())) { int persId = 0; for (int orgId : orgIds) { Organization org = new Organization(orgId); orgStreamer.addData(orgId, org); for (int persCnt = 0; persCnt < PERS_AT_ORG_CNT; ++persCnt, ++persId) { Person pers = new Person(persId, orgId); persStreamer.addData(pers.createKey(), pers); } } } awaitPartitionMapExchange(); }
/** * @throws Exception If failed. */ @SuppressWarnings("BusyWait") @Test public void testGetPut() throws Exception { final AtomicBoolean flag = new AtomicBoolean(); final LongAdder cnt = new LongAdder(); try (IgniteDataStreamer<Object, Object> ldr = grid(0).dataStreamer(DEFAULT_CACHE_NAME)) { IgniteInternalFuture<?> f = multithreadedAsync( new Callable<Object>() { @Override public Object call() throws Exception { ThreadLocalRandom rnd = ThreadLocalRandom.current(); while (!flag.get()) { ldr.addData(rnd.nextInt(10000), new TestObject(rnd.nextInt(10000))); cnt.add(1); } return null; } }, THREAD_CNT ); for (int i = 0; i < 30 && !f.isDone(); i++) Thread.sleep(1000); flag.set(true); f.get(); } info("Operations in 30 sec: " + cnt.sum()); }
/** * @param ig Ig. * @param cacheName Cache name. * @param startKey Start key range. * @param cnt Count. */ private void populateCache(IgniteEx ig, String cacheName, int startKey, int cnt) throws IgniteCheckedException { try (IgniteDataStreamer<Object, Object> streamer = ig.dataStreamer(cacheName)) { for (int i = startKey; i < startKey + cnt; i++) streamer.addData(i, new byte[5 * 1000]); } GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager)ig.context().cache().context().database(); dbMgr.waitForCheckpoint("test"); }
try (IgniteDataStreamer<Integer, String> dataStreamer = igniteEx.dataStreamer(DEFAULT_CACHE_NAME)) { for (int i = 0; i < CACHE_ENTRIES_COUNT; i++) dataStreamer.addData(i, UUID.randomUUID().toString());
try (IgniteDataStreamer<Integer, String> ldr = grid(0).dataStreamer(DEFAULT_CACHE_NAME)) { for (int i = 0; i < cnt; i++) ldr.addData(i, Integer.toString(i));
/** * Process bulk load COPY command. * * @param cmd The command. * @param qryId Query id. * @return The context (which is the result of the first request/response). * @throws IgniteCheckedException If something failed. */ public FieldsQueryCursor<List<?>> processBulkLoadCommand(SqlBulkLoadCommand cmd, Long qryId) throws IgniteCheckedException { if (cmd.packetSize() == null) cmd.packetSize(BulkLoadAckClientParameters.DFLT_PACKET_SIZE); GridH2Table tbl = schemaMgr.dataTable(cmd.schemaName(), cmd.tableName()); if (tbl == null) { throw new IgniteSQLException("Table does not exist: " + cmd.tableName(), IgniteQueryErrorCode.TABLE_NOT_FOUND); } H2Utils.checkAndStartNotStartedCache(ctx, tbl); UpdatePlan plan = UpdatePlanBuilder.planForBulkLoad(cmd, tbl); IgniteClosureX<List<?>, IgniteBiTuple<?, ?>> dataConverter = new BulkLoadDataConverter(plan); IgniteDataStreamer<Object, Object> streamer = ctx.grid().dataStreamer(tbl.cacheName()); BulkLoadCacheWriter outputWriter = new BulkLoadStreamerWriter(streamer); BulkLoadParser inputParser = BulkLoadParser.createParser(cmd.inputFormat()); BulkLoadProcessor processor = new BulkLoadProcessor(inputParser, dataConverter, outputWriter, idx.runningQueryManager(), qryId); BulkLoadAckClientParameters params = new BulkLoadAckClientParameters(cmd.localFileName(), cmd.packetSize()); return new BulkLoadContextCursor(processor, params); }
try (IgniteDataStreamer<Integer, IndexedObject> dataLdr = ignite.dataStreamer(cacheName)) { for (int i = 0; i < KEYS_COUNT; ++i) { if (i % (KEYS_COUNT / 100) == 0)
/** * Reinit internal cache using passed ignite instance and fill it with data if required. * * @param ignite Node to get or create cache from. * @param fillData Whether the cache should be filled with new data or not. */ public void initCache(IgniteEx ignite, boolean fillData) { cache = ignite.getOrCreateCache( createCacheConfiguration() .setAtomicityMode(atomicityMode()) .setCacheMode(cacheMode()) ); if (fillData) { try (IgniteDataStreamer<KeyType, ValueType> dataStreamer = ignite.dataStreamer(cache.getName())) { dataStreamer.allowOverwrite(true); for (int i = 0; i < entriesCount(); i++) dataStreamer.addData(createKey(i), createValue(i)); } } }
try (IgniteDataStreamer<Integer, String> ldr = grid(1).dataStreamer(DEFAULT_CACHE_NAME)) { for (int i = 0; i < cnt; i++) ldr.addData(i, Integer.toString(i));
try(IgniteDataStreamer<Integer, Value> streamer = grid().dataStreamer(cacheName0)) { for (int i = 0; i < ENTRIES / 2; ++i) streamer.addData(i, new Value(i)); try(IgniteDataStreamer<Integer, Value> streamer = grid().dataStreamer(cacheName1)) { for (int i = ENTRIES / 2; i < ENTRIES; ++i) streamer.addData(i, new Value(i));
/** */ private void checkRowCacheOnPageEviction() { grid().getOrCreateCache(cacheConfiguration(CACHE_NAME, true)); int grpId = grid().cachex(CACHE_NAME).context().groupId(); assertEquals(grpId, grid().cachex(CACHE_NAME).context().groupId()); try (IgniteDataStreamer<Integer, Value> stream = grid().dataStreamer(CACHE_NAME)) { for (int i = 0; i < ENTRIES; ++i) stream.addData(i, new Value(i)); } H2RowCache rowCache = rowCache(grid()).forGroup(grpId); fillRowCache(CACHE_NAME); assertNotNull(rowCache); int rowCacheSizeBeforeEvict = rowCache.size(); try (IgniteDataStreamer<Integer, Value> stream = grid().dataStreamer(CACHE_NAME)) { for (int i = ENTRIES; i < 2 * ENTRIES; ++i) stream.addData(i, new Value(i)); } assertTrue("rowCache size before evictions: " + rowCacheSizeBeforeEvict + ", after evictions: " + rowCache.size(), rowCacheSizeBeforeEvict > rowCache.size()); }
try (IgniteDataStreamer<Integer, DbValue> st = ig.dataStreamer(DEFAULT_CACHE_NAME)) { st.allowOverwrite(true);
try (IgniteDataStreamer<Integer, IndexedObject> dataLdr = ignite.dataStreamer("dyncache")) { for (int i = 0; i < LARGE_KEYS_COUNT; ++i) { if (i % (LARGE_KEYS_COUNT / 100) == 0)
try (IgniteDataStreamer<Object, Object> s = ig.dataStreamer(TEST_CACHE_NAME)) { s.allowOverwrite(true); for (int i = 0; i < 50; i++) {
/** * @throws Exception if failed. */ @Test public void testDataStreamerModifiesReplicatedCacheSize() throws Exception { startGridsMultiThreaded(2); IgniteEx ignite = grid(0); ignite.createCache( new CacheConfiguration<>("test") .setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL) .setCacheMode(CacheMode.REPLICATED) ); try (IgniteDataStreamer<Object, Object> streamer = ignite.dataStreamer("test")) { streamer.addData(1, "a"); streamer.addData(keyInDifferentPartition(ignite, "test", 1), "b"); } assertEquals(2, ignite.cache("test").size()); assertEquals(1, grid(0).cache("test").localSize()); assertEquals(1, grid(0).cache("test").localSize(BACKUP)); assertEquals(1, grid(1).cache("test").localSize()); assertEquals(1, grid(1).cache("test").localSize(BACKUP)); }