/** * Gets the set of resources information that are from the Spark Streaming Core. It excludes any * Spark streaming extensions, such as Kafka or Flume. They need to be excluded since they are not * part of Spark distribution and it should be loaded from the user program ClassLoader. This filtering * is needed for unit-testing because in unit-test, those extension classes are loadable from the system * classloader, causing same classes being loaded through different classloader. */ private Set<ClassPath.ResourceInfo> getSparkStreamingResources() { if (sparkStreamingResources != null) { return sparkStreamingResources; } synchronized (this) { if (sparkStreamingResources != null) { return sparkStreamingResources; } try { sparkStreamingResources = ClassPathResources.getClassPathResources(getClass().getClassLoader(), StreamingContext.class); } catch (IOException e) { LOG.warn("Failed to find resources for Spark StreamingContext.", e); sparkStreamingResources = Collections.emptySet(); } return sparkStreamingResources; } } };
/** * Gets the set of resources information that are from the Spark Streaming Core. It excludes any * Spark streaming extensions, such as Kafka or Flume. They need to be excluded since they are not * part of Spark distribution and it should be loaded from the user program ClassLoader. This filtering * is needed for unit-testing because in unit-test, those extension classes are loadable from the system * classloader, causing same classes being loaded through different classloader. */ private Set<ClassPath.ResourceInfo> getSparkStreamingResources() { if (sparkStreamingResources != null) { return sparkStreamingResources; } synchronized (this) { if (sparkStreamingResources != null) { return sparkStreamingResources; } try { sparkStreamingResources = ClassPathResources.getClassPathResources(getClass().getClassLoader(), StreamingContext.class); } catch (IOException e) { LOG.warn("Failed to find resources for Spark StreamingContext.", e); sparkStreamingResources = Collections.emptySet(); } return sparkStreamingResources; } } };
/** * Gets the set of resources information that are from the Spark Streaming Core. It excludes any * Spark streaming extensions, such as Kafka or Flume. They need to be excluded since they are not * part of Spark distribution and it should be loaded from the user program ClassLoader. This filtering * is needed for unit-testing because in unit-test, those extension classes are loadable from the system * classloader, causing same classes being loaded through different classloader. */ private Set<ClassPath.ResourceInfo> getSparkStreamingResources() { if (sparkStreamingResources != null) { return sparkStreamingResources; } synchronized (this) { if (sparkStreamingResources != null) { return sparkStreamingResources; } try { sparkStreamingResources = ClassPathResources.getClassPathResources(getClass().getClassLoader(), StreamingContext.class); } catch (IOException e) { LOG.warn("Failed to find resources for Spark StreamingContext.", e); sparkStreamingResources = Collections.emptySet(); } return sparkStreamingResources; } } };
/** * Returns a Set of resources name that are visible through the cdap-api module as well as Hadoop classes. * This includes all classes+resources in cdap-api plus all classes+resources that cdap-api * depends on (for example, sl4j, gson, etc). */ private static Set<String> createBaseResources() throws IOException { // Everything should be traceable in the same ClassLoader of this class, which is the CDAP system ClassLoader ClassLoader classLoader = ProgramResources.class.getClassLoader(); // Gather resources information for cdap-api classes // Add everything in cdap-api as visible resources // Trace dependencies for cdap-api classes Set<String> result = ClassPathResources.getResourcesWithDependencies(classLoader, Application.class); // Gather resources for javax.ws.rs classes. They are not traceable from the api classes. Iterables.addAll(result, Iterables.transform(ClassPathResources.getClassPathResources(classLoader, Path.class), ClassPathResources.RESOURCE_INFO_TO_RESOURCE_NAME)); // Gather Hadoop classes and resources getResources(ClassPath.from(classLoader, JAR_ONLY_URI), HADOOP_PACKAGES, EXCLUDE_PACKAGES, ClassPathResources.RESOURCE_INFO_TO_RESOURCE_NAME, result); return Collections.unmodifiableSet(result); }
/** * Returns a Set of resources name that are visible through the cdap-api module as well as Hadoop classes. * This includes all classes+resources in cdap-api plus all classes+resources that cdap-api * depends on (for example, sl4j, gson, etc). */ private static Set<String> createBaseResources() throws IOException { // Everything should be traceable in the same ClassLoader of this class, which is the CDAP system ClassLoader ClassLoader classLoader = ProgramResources.class.getClassLoader(); // Gather resources information for cdap-api classes // Add everything in cdap-api as visible resources // Trace dependencies for cdap-api classes Set<String> result = ClassPathResources.getResourcesWithDependencies(classLoader, Application.class); // Gather resources for javax.ws.rs classes. They are not traceable from the api classes. Iterables.addAll(result, Iterables.transform(ClassPathResources.getClassPathResources(classLoader, Path.class), ClassPathResources.RESOURCE_INFO_TO_RESOURCE_NAME)); // Gather Hadoop classes and resources getResources(ClassPath.from(classLoader, JAR_ONLY_URI), HADOOP_PACKAGES, EXCLUDE_PACKAGES, ClassPathResources.RESOURCE_INFO_TO_RESOURCE_NAME, result); return Collections.unmodifiableSet(result); }