@Override public void start() throws Exception { logger.info("Starting LocalKVStoreProvider"); coreStoreProvider.start(); if (fabricService != null) { final DefaultDataStoreRpcHandler rpcHandler = new LocalDataStoreRpcHandler(hostName, coreStoreProvider); final NodeEndpoint thisNode = NodeEndpoint.newBuilder() .setAddress(hostName) .setFabricPort(fabricService.get().getPort()) .build(); try { // DatastoreRpcService registers itself with fabric //noinspection ResultOfObjectAllocationIgnored new DatastoreRpcService(DirectProvider.wrap(thisNode), fabricService.get(), allocator, rpcHandler); } catch (RpcException e) { throw new DatastoreException("Failed to start rpc service", e); } } stores = StoreLoader.buildStores(scan, new StoreBuildingFactory() { @Override public <K, V> StoreBuilder<K, V> newStore() { return LocalKVStoreProvider.this.newStore(); } }); // recover after the stores are built coreStoreProvider.recoverIfPreviouslyCrashed(); logger.info("LocalKVStoreProvider is up"); }
public List<MinorFragmentEndpoint> getEndpoints() { List<MinorFragmentEndpoint> l = new ArrayList<MinorFragmentEndpoint>(); for (int i = 0; i < NUM_FRAGMENTS; i++) { l.add(new MinorFragmentEndpoint(i, NodeEndpoint.newBuilder().setAddress(String.format("a_%d", i)).setFabricPort(1).build())); } return l; }
@Test public void checkMemoryLeak() throws Exception { RoundRobinSender sender = new RoundRobinSender(1, null, Arrays.asList( new MinorFragmentEndpoint(1, NodeEndpoint.newBuilder().setAddress("a").setFabricPort(1).build()), new MinorFragmentEndpoint(2, NodeEndpoint.newBuilder().setAddress("b").setFabricPort(2).build()) ), getSchema() ); final AccountingExecTunnel tunnel = mock(AccountingExecTunnel.class); doAnswer(new Answer<Void>(){ @Override public Void answer(InvocationOnMock invocation) throws Throwable { final FragmentWritableBatch batch = (FragmentWritableBatch) invocation.getArguments()[0]; for(ByteBuf b : batch.getBuffers()){ b.release(); } return null; }}).when(tunnel).sendRecordBatch(any(FragmentWritableBatch.class)); final TunnelProvider provider = mock(TunnelProvider.class); when(provider.getExecTunnel(any(NodeEndpoint.class))).thenReturn(tunnel); try(RoundRobinOperator op = newOperator(RoundRobinOperator.class, sender, DEFAULT_BATCH, provider); TpchGenerator g = TpchGenerator.singleGenerator(TpchTable.NATION, 0.1, getTestAllocator());){ op.setup(g.getOutput()); g.next(DEFAULT_BATCH); op.consumeData(g.next(DEFAULT_BATCH)); op.noMoreToConsume(); } }
@Test public void checkMemoryLeak() throws Exception { BroadcastSender sender = new BroadcastSender(1, null, Arrays.asList( new MinorFragmentEndpoint(1, NodeEndpoint.newBuilder().setAddress("a").setFabricPort(1).build()), new MinorFragmentEndpoint(2, NodeEndpoint.newBuilder().setAddress("b").setFabricPort(2).build()) ), getSchema() ); final AccountingExecTunnel tunnel = mock(AccountingExecTunnel.class); doAnswer(new Answer<Void>(){ @Override public Void answer(InvocationOnMock invocation) throws Throwable { final FragmentWritableBatch batch = (FragmentWritableBatch) invocation.getArguments()[0]; for(ByteBuf b : batch.getBuffers()){ b.release(); } return null; }}).when(tunnel).sendRecordBatch(any(FragmentWritableBatch.class)); final TunnelProvider provider = mock(TunnelProvider.class); when(provider.getExecTunnel(any(NodeEndpoint.class))).thenReturn(tunnel); try(BroadcastOperator op = newOperator(BroadcastOperator.class, sender, DEFAULT_BATCH, provider); TpchGenerator g = TpchGenerator.singleGenerator(TpchTable.NATION, 0.1, getTestAllocator());){ op.setup(g.getOutput()); g.next(DEFAULT_BATCH); op.consumeData(g.next(DEFAULT_BATCH)); op.noMoreToConsume(); } }
setFabricPort(other.getFabricPort());
@Override void initProvider() throws Exception { allocator = new RootAllocator(20 * 1024 * 1024); pool = new CloseableThreadPool("test-remoteocckvstore"); localFabricService = new FabricServiceImpl(HOSTNAME, 45678, true, THREAD_COUNT, allocator, RESERVATION, MAX_ALLOCATION, TIMEOUT, pool); localFabricService.start(); remoteFabricService = new FabricServiceImpl(HOSTNAME, 45679, true, THREAD_COUNT, allocator, RESERVATION, MAX_ALLOCATION, TIMEOUT, pool); remoteFabricService.start(); localKVStoreProvider = new LocalKVStoreProvider(DremioTest.CLASSPATH_SCAN_RESULT, DirectProvider.wrap(localFabricService), allocator, HOSTNAME, tmpFolder.getRoot().toString(), true, true, true, false); localKVStoreProvider.start(); remoteKVStoreProvider = new RemoteKVStoreProvider( DremioTest.CLASSPATH_SCAN_RESULT, DirectProvider.wrap(NodeEndpoint.newBuilder() .setAddress(HOSTNAME) .setFabricPort(localFabricService.getPort()) .build()), DirectProvider.wrap(remoteFabricService), allocator, HOSTNAME); remoteKVStoreProvider.start(); }
@Override KVStoreProvider createKKStoreProvider() throws Exception { allocator = new RootAllocator(20 * 1024 * 1024); pool = new CloseableThreadPool("test-remoteocckvstore"); localFabricService = new FabricServiceImpl(HOSTNAME, 45678, true, THREAD_COUNT, allocator, RESERVATION, MAX_ALLOCATION, TIMEOUT, pool); localFabricService.start(); remoteFabricService = new FabricServiceImpl(HOSTNAME, 45679, true, THREAD_COUNT, allocator, RESERVATION, MAX_ALLOCATION, TIMEOUT, pool); remoteFabricService.start(); localKVStoreProvider = new LocalKVStoreProvider(DremioTest.CLASSPATH_SCAN_RESULT, DirectProvider.<FabricService>wrap(localFabricService), allocator, HOSTNAME, null, true, true, true, false); localKVStoreProvider.start(); remoteKVStoreProvider = new RemoteKVStoreProvider( DremioTest.CLASSPATH_SCAN_RESULT, DirectProvider.wrap(NodeEndpoint.newBuilder() .setAddress(HOSTNAME) .setFabricPort(localFabricService.getPort()) .build()), DirectProvider.<FabricService>wrap(remoteFabricService), allocator, HOSTNAME); remoteKVStoreProvider.start(); return remoteKVStoreProvider; }
/** * Check that a dead node doesn't not trigger a successful query notification if * node managing the last major fragments (see DX-10956) */ @Test public void testNodeDead() { InOrder inOrder = Mockito.inOrder(completionListener); AttemptObservers observers = AttemptObservers.of(observer); QueryManager queryManager = new QueryManager(queryId, context, null, completionListener, new Pointer<>(), observers, true, true, catalog); final NodeEndpoint endpoint = NodeEndpoint.newBuilder().setAddress("host1").setFabricPort(12345).build(); PlanFragment fragment = PlanFragment.newBuilder() .setAssignment(endpoint) .setHandle(FragmentHandle.newBuilder().setMajorFragmentId(0).setQueryId(queryId).build()) .build(); ExecutionPlan executionPlan = new ExecutionPlan(new Screen(null), 0, Collections.singletonList(fragment)); observers.planCompleted(executionPlan); // Notify node is dead queryManager.getNodeStatusListener().nodesUnregistered(ImmutableSet.of(endpoint)); // Ideally, we should not even call succeeded... inOrder.verify(completionListener).failed(any(Exception.class)); inOrder.verify(completionListener).succeeded(); }
@Override KVStoreProvider createKKStoreProvider() throws Exception { allocator = new RootAllocator(20 * 1024 * 1024); pool = new CloseableThreadPool("test-remoteindexedkvstore"); localFabricService = new FabricServiceImpl(HOSTNAME, 45678, true, THREAD_COUNT, allocator, RESERVATION, MAX_ALLOCATION, TIMEOUT, pool); localFabricService.start(); final Provider<FabricService> fab = () -> localFabricService; remoteFabricService = new FabricServiceImpl(HOSTNAME, 45679, true, THREAD_COUNT, allocator, RESERVATION, MAX_ALLOCATION, TIMEOUT, pool); remoteFabricService.start(); final Provider<FabricService> rfab = () -> remoteFabricService; localKVStoreProvider = new LocalKVStoreProvider(DremioTest.CLASSPATH_SCAN_RESULT, fab, allocator, HOSTNAME, null, true, true, true, false); localKVStoreProvider.start(); remoteKVStoreProvider = new RemoteKVStoreProvider( DremioTest.CLASSPATH_SCAN_RESULT, DirectProvider.wrap(NodeEndpoint.newBuilder() .setAddress(HOSTNAME) .setFabricPort(localFabricService.getPort()) .build()), rfab, allocator, HOSTNAME); remoteKVStoreProvider.start(); return remoteKVStoreProvider; }
private void print(String fragmentFile, int bitCount, int expectedFragmentCount) throws Exception{ System.out.println(String.format("=================Building plan fragments for [%s]. Allowing %d total Nodes.==================", fragmentFile, bitCount)); PhysicalPlanReader ppr = PhysicalPlanReaderTestFactory.defaultPhysicalPlanReader(DEFAULT_SABOT_CONFIG, CLASSPATH_SCAN_RESULT); Fragment fragmentRoot = getRootFragment(ppr, fragmentFile); SimpleParallelizer par = new SimpleParallelizer(1000*1000, 5, 10, 1.2, AbstractAttemptObserver.NOOP, true, 1.5d); List<NodeEndpoint> endpoints = Lists.newArrayList(); NodeEndpoint localBit = null; for(int i =0; i < bitCount; i++) { NodeEndpoint b1 = NodeEndpoint.newBuilder().setAddress("localhost").setFabricPort(1234+i).build(); if (i == 0) { localBit = b1; } endpoints.add(b1); } final QueryContextInformation queryContextInfo = Utilities.createQueryContextInfo("dummySchemaName"); List<PlanFragment> qwu = par.getFragments(new OptionList(), localBit, QueryId.getDefaultInstance(), endpoints, ppr, fragmentRoot, UserSession.Builder.newBuilder().withCredentials(UserBitShared.UserCredentials.newBuilder().setUserName("foo").build()).build(), queryContextInfo, null); assertEquals(expectedFragmentCount, qwu.size() + 1 /* root fragment is not part of the getFragments() list*/); }
@Test public void testResourceSchedulingInProfile() throws Exception { AttemptObservers observers = AttemptObservers.of(observer); final NodeEndpoint endpoint = NodeEndpoint.newBuilder().setAddress("host1").setFabricPort(12345).build(); when(context.getCurrentEndpoint()).thenReturn(endpoint); QueryManager queryManager = new QueryManager(queryId, context, null, completionListener, new Pointer<>(), observers, true, true, catalog); ResourceSchedulingDecisionInfo result = new ResourceSchedulingDecisionInfo(); result.setQueueId("abcd"); result.setQueueName("queue.abcd"); observers.resourcesScheduled(result); UserBitShared.QueryProfile queryProfile = queryManager.getQueryProfile("my description", UserBitShared.QueryResult.QueryState.RUNNING, null, "some reason"); assertNotNull(queryProfile.getResourceSchedulingProfile()); assertEquals("abcd", queryProfile.getResourceSchedulingProfile().getQueueId()); assertEquals("queue.abcd", queryProfile.getResourceSchedulingProfile().getQueueName()); } }
private static final CoordinationProtos.NodeEndpoint newNodeEndpoint(String address, int port) { return CoordinationProtos.NodeEndpoint.newBuilder().setAddress(address).setFabricPort(port).build(); }
public ServiceHolder(BufferAllocator allocator, Provider<Iterable<NodeEndpoint>> nodeProvider, PDFSMode mode, String name) throws Exception{ this.allocator = allocator.newChildAllocator(name, 0, Long.MAX_VALUE); pool = new CloseableThreadPool(name); fabric = new FabricServiceImpl(HOSTNAME, 9970, true, THREAD_COUNT, this.allocator, RESERVATION, MAX_ALLOCATION, TIMEOUT, pool); fabric.start(); endpoint = NodeEndpoint.newBuilder() .setAddress(fabric.getAddress()).setFabricPort(fabric.getPort()) .setRoles(Roles.newBuilder().setJavaExecutor(mode == PDFSMode.DATA)) .build(); service = new PDFSService(DirectProvider.wrap((FabricService) fabric), DirectProvider.wrap(endpoint), nodeProvider, DremioTest.DEFAULT_SABOT_CONFIG, this.allocator, mode); service.start(); fileSystem = service.createFileSystem(); }
private static final NodeEndpoint newNodeEndpoint(String address, int port) { return NodeEndpoint.newBuilder().setAddress(address).setFabricPort(port).build(); }
private static final NodeEndpoint newNodeEndpoint(String address, int port) { return NodeEndpoint.newBuilder().setAddress(address).setFabricPort(port).build(); } }
private static NodeEndpoint getEndpoint(String host) { return NodeEndpoint.newBuilder().setAddress(host).setFabricPort(1234).build(); }
@Before public void setUpPDFSService() throws IOException { NodeEndpoint endpoint = NodeEndpoint.newBuilder() .setAddress(fabric.getAddress()).setFabricPort(fabric.getPort()) .build(); PDFSProtocol pdfsProtocol = PDFSProtocol.newInstance(endpoint, DremioTest.DEFAULT_SABOT_CONFIG, allocator, true); FabricRunnerFactory factory = fabric.registerProtocol(pdfsProtocol); sabotFS = new RemoteNodeFileSystem(factory.getCommandRunner(fabric.getAddress(), fabric.getPort()), allocator); sabotFS.initialize(URI.create(format("sabot://%s:%d", fabric.getAddress(), fabric.getPort())), new Configuration(false)); }
/** * Create a {@link com.dremio.exec.dfs.proto.beans.NodeEndpoint} * @param address the address * @param port the control port * @return */ private static NodeEndpoint newNodeEndpoint(String address, int port) { return NodeEndpoint.newBuilder().setAddress(address).setFabricPort(port).build(); }
/** * Create a {@link com.dremio.exec.proto.beans.NodeEndpoint} * @param address the address * @param port the control port * @return */ private static NodeEndpoint newNodeEndpoint(String address, int port) { return NodeEndpoint.newBuilder().setAddress(address).setFabricPort(port).setRoles(Roles.newBuilder().setJavaExecutor(true)).build(); }
/** * Create a {@link com.dremio.exec.dfs.proto.beans.NodeEndpoint} * @param address the address * @param port the control port * @return */ private static NodeEndpoint newNodeEndpoint(String address, int port) { return NodeEndpoint.newBuilder().setAddress(address).setFabricPort(port).build(); }