final String port = connectInfo.length==2?connectInfo[1]:config.getString(ExecConstants.INITIAL_USER_PORT); endpoint = NodeEndpoint.newBuilder() .setAddress(connectInfo[0]) .setUserPort(Integer.parseInt(port)) .build(); } else { if (clusterCoordinator == null) {
@Test public void testQueueingAllocations() throws Exception { final CoordinationProtos.NodeEndpoint nodeEndpoint = CoordinationProtos.NodeEndpoint.newBuilder() .setAddress("host1") .setFabricPort(1234) .setUserPort(2345) .setAvailableCores(3) .setMaxDirectMemory(8 * 1024) .setRoles(ClusterCoordinator.Role.toEndpointRoles(Sets.newHashSet(ClusterCoordinator.Role.EXECUTOR))) .build();
.setAddress(bogusAddress) .build(); copyAllocations2.add(resourceAllocator.createAllocation(bogusEndpoint, allocation.getMemory(), allocation.getMajorFragment())); } else {
.setAddress("host1") .setFabricPort(1234) .setUserPort(2345) .setRoles(ClusterCoordinator.Role.toEndpointRoles(Sets.newHashSet(ClusterCoordinator.Role.COORDINATOR))) .build(); .setAddress("host2") .setFabricPort(1235) .setUserPort(2346) .setRoles(ClusterCoordinator.Role.toEndpointRoles(Sets.newHashSet(ClusterCoordinator.Role.COORDINATOR))) .build(); .setAddress("host3") .setFabricPort(1236) .setUserPort(2347) .setRoles(ClusterCoordinator.Role.toEndpointRoles(Sets.newHashSet(ClusterCoordinator.Role.COORDINATOR))) .build();
List<CoordExecRPC.IncomingMinorFragment> list = new ArrayList<>(2); MinorFragmentEndpoint ep1 = mock(MinorFragmentEndpoint.class); when(ep1.getEndpoint()).thenReturn(CoordinationProtos.NodeEndpoint.newBuilder().setAddress("localhost").setFabricPort(12345).build()); MinorFragmentEndpoint ep2 = mock(MinorFragmentEndpoint.class); when(ep2.getEndpoint()).thenReturn(CoordinationProtos.NodeEndpoint.newBuilder().setAddress("localhost").setFabricPort(12345).build()); list.add(CoordExecRPC.IncomingMinorFragment.newBuilder().setEndpoint(ep1.getEndpoint()).setMinorFragment(ep1.getId()).build()); list.add(CoordExecRPC.IncomingMinorFragment.newBuilder().setEndpoint(ep2.getEndpoint()).setMinorFragment(ep2.getId()).build());
.setAddress("host1") .setFabricPort(1234) .setUserPort(2345) .setRoles(ClusterCoordinator.Role.toEndpointRoles(Sets.newHashSet(ClusterCoordinator.Role.COORDINATOR))) .build(); .setAddress("host2") .setFabricPort(1235) .setUserPort(2346) .setRoles(ClusterCoordinator.Role.toEndpointRoles(Sets.newHashSet(ClusterCoordinator.Role.COORDINATOR))) .build();
datasetConfig.setRecordSchema(schema.toByteString()); TableauMessageBodyGenerator generator = new TableauMessageBodyGenerator(configuration, NodeEndpoint.newBuilder().setAddress("foo").setUserPort(12345).build()); MultivaluedMap<String, Object> httpHeaders = new MultivaluedHashMap<>(); ByteArrayOutputStream baos = new ByteArrayOutputStream();
.setAddress(rpcBindAddress) .setUserPort(userport) .setFabricPort(fabric.getPort()) .setStartTime(System.currentTimeMillis()) .setMaxDirectMemory(VM.getMaxDirectMemory()) .setAvailableCores(VM.availableProcessors()) .setRoles(ClusterCoordinator.Role.toEndpointRoles(roles)); identityBuilder.setProvisionId(containerId); final NodeEndpoint identity = identityBuilder.build(); return new SabotContext( bootstrapContext.getDremioConfig(),
.setAddress("host1") .setFabricPort(1234) .setUserPort(2345) .setAvailableCores(3) .setMaxDirectMemory(8 * 1024) .setRoles(ClusterCoordinator.Role.toEndpointRoles(Sets.newHashSet(ClusterCoordinator.Role.EXECUTOR))) .build();
@Test public void testQueueingSemaphoreException() throws Exception { final CoordinationProtos.NodeEndpoint nodeEndpoint = CoordinationProtos.NodeEndpoint.newBuilder() .setAddress("host1") .setFabricPort(1234) .setUserPort(2345) .setAvailableCores(3) .setMaxDirectMemory(8 * 1024) .setRoles(ClusterCoordinator.Role.toEndpointRoles(Sets.newHashSet(ClusterCoordinator.Role.EXECUTOR))) .build();
.setAddress("host1") .setFabricPort(1234) .setUserPort(2345) .setAvailableCores(3) .setMaxDirectMemory(8 * 1024) .setRoles(ClusterCoordinator.Role.toEndpointRoles(Sets.newHashSet(ClusterCoordinator.Role.EXECUTOR))) .build(); .setAddress("host2") .setFabricPort(1234) .setUserPort(2345) .setAvailableCores(5) .setMaxDirectMemory(16 * 1024) .setRoles(ClusterCoordinator.Role.toEndpointRoles(Sets.newHashSet(ClusterCoordinator.Role.EXECUTOR))) .build(); .setAddress("host3") .setFabricPort(1234) .setUserPort(2345) .setAvailableCores(5) .setMaxDirectMemory(16 * 1024) .setRoles(ClusterCoordinator.Role.toEndpointRoles(Sets.newHashSet(ClusterCoordinator.Role.EXECUTOR))) .build();
return; case 1: builder.setAddress(input.readString()); break; case 2: builder.setUserPort(input.readInt32()); break; case 3: builder.setFabricPort(input.readInt32()); break; case 5: builder.setRoles(input.mergeObject(com.dremio.exec.proto.CoordinationProtos.Roles.newBuilder(), com.dremio.exec.proto.SchemaCoordinationProtos.Roles.MERGE)); builder.setStartTime(input.readInt64()); break; case 102: builder.setProvisionId(input.readString()); break; case 103: builder.setMaxDirectMemory(input.readInt64()); break; case 104: builder.setAvailableCores(input.readInt32()); break; default:
DatasetConfig datasetConfig = new DatasetConfig(); datasetConfig.setFullPathList(path.toPathList()); TableauMessageBodyGenerator generator = new TableauMessageBodyGenerator(configuration, NodeEndpoint.newBuilder().setAddress("foo").setUserPort(12345).build()); MultivaluedMap<String, Object> httpHeaders = new MultivaluedHashMap<>(); ByteArrayOutputStream baos = new ByteArrayOutputStream();
@Test public void checkMemoryLeak() throws Exception { BroadcastSender sender = new BroadcastSender(1, null, Arrays.asList( new MinorFragmentEndpoint(1, NodeEndpoint.newBuilder().setAddress("a").setFabricPort(1).build()), new MinorFragmentEndpoint(2, NodeEndpoint.newBuilder().setAddress("b").setFabricPort(2).build()) ), getSchema() ); final AccountingExecTunnel tunnel = mock(AccountingExecTunnel.class); doAnswer(new Answer<Void>(){ @Override public Void answer(InvocationOnMock invocation) throws Throwable { final FragmentWritableBatch batch = (FragmentWritableBatch) invocation.getArguments()[0]; for(ByteBuf b : batch.getBuffers()){ b.release(); } return null; }}).when(tunnel).sendRecordBatch(any(FragmentWritableBatch.class)); final TunnelProvider provider = mock(TunnelProvider.class); when(provider.getExecTunnel(any(NodeEndpoint.class))).thenReturn(tunnel); try(BroadcastOperator op = newOperator(BroadcastOperator.class, sender, DEFAULT_BATCH, provider); TpchGenerator g = TpchGenerator.singleGenerator(TpchTable.NATION, 0.1, getTestAllocator());){ op.setup(g.getOutput()); g.next(DEFAULT_BATCH); op.consumeData(g.next(DEFAULT_BATCH)); op.noMoreToConsume(); } }
bitField0_ |= 0x00000001; address_ = other.address_; onChanged(); setUserPort(other.getUserPort()); setFabricPort(other.getFabricPort()); mergeRoles(other.getRoles()); setStartTime(other.getStartTime()); onChanged(); setMaxDirectMemory(other.getMaxDirectMemory()); setAvailableCores(other.getAvailableCores()); this.mergeUnknownFields(other.getUnknownFields()); return this;
@Test public void checkMemoryLeak() throws Exception { RoundRobinSender sender = new RoundRobinSender(1, null, Arrays.asList( new MinorFragmentEndpoint(1, NodeEndpoint.newBuilder().setAddress("a").setFabricPort(1).build()), new MinorFragmentEndpoint(2, NodeEndpoint.newBuilder().setAddress("b").setFabricPort(2).build()) ), getSchema() ); final AccountingExecTunnel tunnel = mock(AccountingExecTunnel.class); doAnswer(new Answer<Void>(){ @Override public Void answer(InvocationOnMock invocation) throws Throwable { final FragmentWritableBatch batch = (FragmentWritableBatch) invocation.getArguments()[0]; for(ByteBuf b : batch.getBuffers()){ b.release(); } return null; }}).when(tunnel).sendRecordBatch(any(FragmentWritableBatch.class)); final TunnelProvider provider = mock(TunnelProvider.class); when(provider.getExecTunnel(any(NodeEndpoint.class))).thenReturn(tunnel); try(RoundRobinOperator op = newOperator(RoundRobinOperator.class, sender, DEFAULT_BATCH, provider); TpchGenerator g = TpchGenerator.singleGenerator(TpchTable.NATION, 0.1, getTestAllocator());){ op.setup(g.getOutput()); g.next(DEFAULT_BATCH); op.consumeData(g.next(DEFAULT_BATCH)); op.noMoreToConsume(); } }
@Override public void start() throws Exception { logger.info("Starting LocalKVStoreProvider"); coreStoreProvider.start(); if (fabricService != null) { final DefaultDataStoreRpcHandler rpcHandler = new LocalDataStoreRpcHandler(hostName, coreStoreProvider); final NodeEndpoint thisNode = NodeEndpoint.newBuilder() .setAddress(hostName) .setFabricPort(fabricService.get().getPort()) .build(); try { // DatastoreRpcService registers itself with fabric //noinspection ResultOfObjectAllocationIgnored new DatastoreRpcService(DirectProvider.wrap(thisNode), fabricService.get(), allocator, rpcHandler); } catch (RpcException e) { throw new DatastoreException("Failed to start rpc service", e); } } stores = StoreLoader.buildStores(scan, new StoreBuildingFactory() { @Override public <K, V> StoreBuilder<K, V> newStore() { return LocalKVStoreProvider.this.newStore(); } }); // recover after the stores are built coreStoreProvider.recoverIfPreviouslyCrashed(); logger.info("LocalKVStoreProvider is up"); }
@Override void initProvider() throws Exception { allocator = new RootAllocator(20 * 1024 * 1024); pool = new CloseableThreadPool("test-remoteocckvstore"); localFabricService = new FabricServiceImpl(HOSTNAME, 45678, true, THREAD_COUNT, allocator, RESERVATION, MAX_ALLOCATION, TIMEOUT, pool); localFabricService.start(); remoteFabricService = new FabricServiceImpl(HOSTNAME, 45679, true, THREAD_COUNT, allocator, RESERVATION, MAX_ALLOCATION, TIMEOUT, pool); remoteFabricService.start(); localKVStoreProvider = new LocalKVStoreProvider(DremioTest.CLASSPATH_SCAN_RESULT, DirectProvider.wrap(localFabricService), allocator, HOSTNAME, tmpFolder.getRoot().toString(), true, true, true, false); localKVStoreProvider.start(); remoteKVStoreProvider = new RemoteKVStoreProvider( DremioTest.CLASSPATH_SCAN_RESULT, DirectProvider.wrap(NodeEndpoint.newBuilder() .setAddress(HOSTNAME) .setFabricPort(localFabricService.getPort()) .build()), DirectProvider.wrap(remoteFabricService), allocator, HOSTNAME); remoteKVStoreProvider.start(); }
@Override KVStoreProvider createKKStoreProvider() throws Exception { allocator = new RootAllocator(20 * 1024 * 1024); pool = new CloseableThreadPool("test-remoteocckvstore"); localFabricService = new FabricServiceImpl(HOSTNAME, 45678, true, THREAD_COUNT, allocator, RESERVATION, MAX_ALLOCATION, TIMEOUT, pool); localFabricService.start(); remoteFabricService = new FabricServiceImpl(HOSTNAME, 45679, true, THREAD_COUNT, allocator, RESERVATION, MAX_ALLOCATION, TIMEOUT, pool); remoteFabricService.start(); localKVStoreProvider = new LocalKVStoreProvider(DremioTest.CLASSPATH_SCAN_RESULT, DirectProvider.<FabricService>wrap(localFabricService), allocator, HOSTNAME, null, true, true, true, false); localKVStoreProvider.start(); remoteKVStoreProvider = new RemoteKVStoreProvider( DremioTest.CLASSPATH_SCAN_RESULT, DirectProvider.wrap(NodeEndpoint.newBuilder() .setAddress(HOSTNAME) .setFabricPort(localFabricService.getPort()) .build()), DirectProvider.<FabricService>wrap(remoteFabricService), allocator, HOSTNAME); remoteKVStoreProvider.start(); return remoteKVStoreProvider; }
@Override KVStoreProvider createKKStoreProvider() throws Exception { allocator = new RootAllocator(20 * 1024 * 1024); pool = new CloseableThreadPool("test-remoteindexedkvstore"); localFabricService = new FabricServiceImpl(HOSTNAME, 45678, true, THREAD_COUNT, allocator, RESERVATION, MAX_ALLOCATION, TIMEOUT, pool); localFabricService.start(); final Provider<FabricService> fab = () -> localFabricService; remoteFabricService = new FabricServiceImpl(HOSTNAME, 45679, true, THREAD_COUNT, allocator, RESERVATION, MAX_ALLOCATION, TIMEOUT, pool); remoteFabricService.start(); final Provider<FabricService> rfab = () -> remoteFabricService; localKVStoreProvider = new LocalKVStoreProvider(DremioTest.CLASSPATH_SCAN_RESULT, fab, allocator, HOSTNAME, null, true, true, true, false); localKVStoreProvider.start(); remoteKVStoreProvider = new RemoteKVStoreProvider( DremioTest.CLASSPATH_SCAN_RESULT, DirectProvider.wrap(NodeEndpoint.newBuilder() .setAddress(HOSTNAME) .setFabricPort(localFabricService.getPort()) .build()), rfab, allocator, HOSTNAME); remoteKVStoreProvider.start(); return remoteKVStoreProvider; }