propertyStoreSize / 2, propertyStoreSize / 2, 0 /*node labels left as 0 for now*/); importer.doImport( Inputs.input( nodes, relationships, IdMappers.actual(), Collectors.badCollector( badOutput, 0 ), estimates ) );
try importer.doImport( input ); success = true;
private void createRandomData( int count ) throws Exception { Config config = Config.defaults(); RecordFormats recordFormats = RecordFormatSelector.selectForConfig( config, NullLogProvider.getInstance() ); try ( RandomDataInput input = new RandomDataInput( count ); JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { BatchImporter importer = new ParallelBatchImporter( directory.databaseLayout(), fileSystemRule.get(), null, DEFAULT, NullLogService.getInstance(), ExecutionMonitors.invisible(), EMPTY, config, recordFormats, NO_MONITOR, jobScheduler ); importer.doImport( input ); } }
inserter.doImport( Inputs.input( nodes( nodeRandomSeed, NODE_COUNT, config.batchSize(), inputIdGenerator, groupDistribution ), relationships( relationshipRandomSeed, RELATIONSHIP_COUNT, config.batchSize(),
ImportTool.printOverview( dir, Collections.emptyList(), Collections.emptyList(), importConfig, System.out ); consumer.doImport( input );
@Test public void shouldImportDataComingFromCsvFiles() throws Exception { // GIVEN Config dbConfig = Config.builder().withSetting( db_timezone, LogTimeZone.SYSTEM.name() ).build(); try ( JobScheduler scheduler = new ThreadPoolJobScheduler() ) { BatchImporter importer = new ParallelBatchImporter( directory.databaseLayout(), fileSystemRule.get(), null, smallBatchSizeConfig(), NullLogService.getInstance(), invisible(), AdditionalInitialIds.EMPTY, dbConfig, RecordFormatSelector.defaultFormat(), NO_MONITOR, scheduler ); List<InputEntity> nodeData = randomNodeData(); List<InputEntity> relationshipData = randomRelationshipData( nodeData ); // WHEN importer.doImport( csv( nodeDataAsFile( nodeData ), relationshipDataAsFile( relationshipData ), IdType.STRING, lowBufferSize( COMMAS ), silentBadCollector( 0 ) ) ); // THEN verifyImportedData( nodeData, relationshipData ); } }
/** * There was this problem where some steps and in particular parallel CSV input parsing that * paniced would hang the import entirely. */ @Test public void shouldExitAndThrowExceptionOnPanic() throws Exception { try ( JobScheduler jobScheduler = new ThreadPoolJobScheduler() ) { BatchImporter importer = new ParallelBatchImporter( directory.databaseLayout(), fs, null, Configuration.DEFAULT, NullLogService.getInstance(), ExecutionMonitors.invisible(), AdditionalInitialIds.EMPTY, Config.defaults(), StandardV3_0.RECORD_FORMATS, NO_MONITOR, jobScheduler ); Iterable<DataFactory> nodeData = datas( data( NO_DECORATOR, fileAsCharReadable( nodeCsvFileWithBrokenEntries() ) ) ); Input brokenCsvInput = new CsvInput( nodeData, defaultFormatNodeFileHeader(), datas(), defaultFormatRelationshipFileHeader(), IdType.ACTUAL, csvConfigurationWithLowBufferSize(), new BadCollector( NullOutputStream.NULL_OUTPUT_STREAM, 0, 0 ) ); importer.doImport( brokenCsvInput ); fail( "Should have failed properly" ); } catch ( InputException e ) { // THEN assertTrue( e.getCause() instanceof DataAfterQuoteException ); // and we managed to shut down properly } }
try importer.doImport( input ); success = true;
propertyStoreSize / 2, propertyStoreSize / 2, 0 /*node labels left as 0 for now*/); importer.doImport( Inputs.input( nodes, relationships, IdMappers.actual(), Collectors.badCollector( badOutput, 0 ), estimates ) );
inserter.doImport( Inputs.input( nodes( nodeRandomSeed, NODE_COUNT, inputIdGenerator, groups ), relationships( relationshipRandomSeed, RELATIONSHIP_COUNT, inputIdGenerator, groups ),