public float nextFloat() { return (float)((nextInt() & floatMask) / floatDivid); }
ParallelRandom demo = new ParallelRandom(queue, warmupSize, System.currentTimeMillis()); println((IntBuffer)demo.getSeeds().read(queue)); IntBuffer b = demo.next(); println(b); b = demo.next(); println(b); b = demo.next(); println(b); demo.doNext(); res |= random.nextInt(); demo.getQueue().finish(); long time = System.nanoTime() - start; demo = new ParallelRandom(queue, testSize, System.currentTimeMillis()); b = demo.next(); start = System.nanoTime(); demo.doNext(); demo.getQueue().finish(); time = System.nanoTime() - start; stat.add(time);
public void testPICircle() { try { ParallelRandom random = new ParallelRandom( JavaCL.createBestContext().createDefaultQueue(), nPoints * 2, Pointer<Integer> values = random.next(); for (int iPoint = 0; iPoint < nPoints; iPoint++) { int offset = iPoint * 2;
/** * Returns a direct NIO buffer containing the next {@link ParallelRandom#getParallelSize() } random integers.<br> * This buffer is read only and will only be valid until any of the "next" method is called again. * @return output buffer of capacity ; see {@link ParallelRandom#getParallelSize() } */ public synchronized Pointer<Integer> next() { CLEvent evt = doNext(); //queue.finish(); evt = null; //return outputBuffer; //return (mappedOutputBuffer = output.map(queue, MapFlags.Read, evt)).asReadOnlyBuffer(); return output.read(queue, evt); }
private synchronized void waitForData(int n) { try { if (lastData == null) { //lastOutputData = NIOUtils.directInts(parallelSize, context.getKernelsDefaultByteOrder()); if (preloadEvent == null) preloadEvent = randomProgram.gen_numbers(queue, seeds, output, globalWorkSizes, null); readLastOutputData(); } if (consumedInts > parallelSize - n) { preload().waitFor(); consumedInts = 0; readLastOutputData(); } if (preload && preloadEvent == null) preload(); } catch (CLBuildException ex) { throw new RuntimeException(ex); } } private synchronized void readLastOutputData() {
/** * If true, a new batch of parallel random numbers is automatically precomputed in background as soon as one starts to consume numbers from the current batch (this gives faster random numbers, at the risk of computing more values than needed) */ public synchronized void setPreload(boolean preload) throws CLBuildException { this.preload = preload; if (preload && preloadEvent == null) { if (lastData == null) { preloadEvent = randomProgram.gen_numbers(queue, seeds, output, globalWorkSizes, null); } else if (consumedInts > 0) { preload(); } } } private synchronized CLEvent preload() throws CLBuildException {
initSeeds(seedsBuf, seed);
ParallelRandom demo = new ParallelRandom(queue, warmupSize, System.currentTimeMillis()); println(demo.getSeeds().read(queue)); Pointer<Integer> b = demo.next(); println(b); b = demo.next(); println(b); b = demo.next(); println(b); demo.doNext(); res |= random.nextInt(); demo.getQueue().finish(); long time = System.nanoTime() - start; demo = new ParallelRandom(queue, testSize, System.currentTimeMillis()); b = demo.next(); start = System.nanoTime(); demo.doNext(); demo.getQueue().finish(); time = System.nanoTime() - start; stat.add(time);
/** * Copies the next {@link ParallelRandom#getParallelSize() } random integers in the provided output buffer * @param output */ public synchronized void next(Pointer<Integer> output) { CLEvent evt = doNext(); this.output.read(queue, output, true, evt); }
private synchronized void waitForData(int n) { try { if (lastData == null) { //lastOutputData = NIOUtils.directInts(parallelSize, context.getKernelsDefaultByteOrder()); if (preloadEvent == null) preloadEvent = randomProgram.gen_numbers(queue, seeds, output, globalWorkSizes, null); readLastOutputData(); } if (consumedInts > parallelSize - n) { preload().waitFor(); consumedInts = 0; readLastOutputData(); } if (preload && preloadEvent == null) preload(); } catch (CLBuildException ex) { throw new RuntimeException(ex); } } private synchronized void readLastOutputData() {
/** * If true, a new batch of parallel random numbers is automatically precomputed in background as soon as one starts to consume numbers from the current batch (this gives faster random numbers, at the risk of computing more values than needed) */ public synchronized void setPreload(boolean preload) throws CLBuildException { this.preload = preload; if (preload && preloadEvent == null) { if (lastData == null) { preloadEvent = randomProgram.gen_numbers(queue, seeds, output, globalWorkSizes, null); } else if (consumedInts > 0) { preload(); } } } private synchronized CLEvent preload() throws CLBuildException {
initSeeds(seedsBuf, seed);
ParallelRandom demo = new ParallelRandom(queue, warmupSize, System.currentTimeMillis()); println(demo.getSeeds().read(queue)); Pointer<Integer> b = demo.next(); println(b); b = demo.next(); println(b); b = demo.next(); println(b); demo.doNext(); res |= random.nextInt(); demo.getQueue().finish(); long time = System.nanoTime() - start; demo = new ParallelRandom(queue, testSize, System.currentTimeMillis()); b = demo.next(); start = System.nanoTime(); demo.doNext(); demo.getQueue().finish(); time = System.nanoTime() - start; stat.add(time);
/** * Copies the next {@link ParallelRandom#getParallelSize() } random integers in the provided output buffer * @param output */ public synchronized void next(Pointer<Integer> output) { CLEvent evt = doNext(); this.output.read(queue, output, true, evt); }
/** * Returns a direct NIO buffer containing the next {@link ParallelRandom#getParallelSize() } random integers.<br> * This buffer is read only and will only be valid until any of the "next" method is called again. * @return output buffer of capacity ; see {@link ParallelRandom#getParallelSize() } */ public synchronized Pointer<Integer> next() { CLEvent evt = doNext(); //queue.finish(); evt = null; //return outputBuffer; //return (mappedOutputBuffer = output.map(queue, MapFlags.Read, evt)).asReadOnlyBuffer(); return output.read(queue, evt); }
public float nextFloat() { return (float)((nextInt() & floatMask) / floatDivid); }