for (Map.Entry<String, DistributedCache.DistributedCacheEntry> artifacts : jobGraph.getUserArtifacts().entrySet()) { artifactFileNames.add(new JobSubmitRequestBody.DistributedCacheFile(artifacts.getKey(), new Path(artifacts.getValue().filePath).getName())); filesToUpload.add(new FileUpload(Paths.get(artifacts.getValue().filePath), RestConstants.CONTENT_TYPE_BINARY));
@Test public void testArtifactCompression() throws IOException { Path plainFile1 = tmp.newFile("plainFile1").toPath(); Path plainFile2 = tmp.newFile("plainFile2").toPath(); Path directory1 = tmp.newFolder("directory1").toPath(); Files.createDirectory(directory1.resolve("containedFile1")); Path directory2 = tmp.newFolder("directory2").toPath(); Files.createDirectory(directory2.resolve("containedFile2")); JobGraph jb = new JobGraph(); final String executableFileName = "executableFile"; final String nonExecutableFileName = "nonExecutableFile"; final String executableDirName = "executableDir"; final String nonExecutableDirName = "nonExecutableDIr"; Collection<Tuple2<String, DistributedCache.DistributedCacheEntry>> originalArtifacts = Arrays.asList( Tuple2.of(executableFileName, new DistributedCache.DistributedCacheEntry(plainFile1.toString(), true)), Tuple2.of(nonExecutableFileName, new DistributedCache.DistributedCacheEntry(plainFile2.toString(), false)), Tuple2.of(executableDirName, new DistributedCache.DistributedCacheEntry(directory1.toString(), true)), Tuple2.of(nonExecutableDirName, new DistributedCache.DistributedCacheEntry(directory2.toString(), false)) ); JobGraphGenerator.addUserArtifactEntries(originalArtifacts, jb); Map<String, DistributedCache.DistributedCacheEntry> submittedArtifacts = jb.getUserArtifacts(); DistributedCache.DistributedCacheEntry executableFileEntry = submittedArtifacts.get(executableFileName); assertState(executableFileEntry, true, false); DistributedCache.DistributedCacheEntry nonExecutableFileEntry = submittedArtifacts.get(nonExecutableFileName); assertState(nonExecutableFileEntry, false, false); DistributedCache.DistributedCacheEntry executableDirEntry = submittedArtifacts.get(executableDirName); assertState(executableDirEntry, true, true); DistributedCache.DistributedCacheEntry nonExecutableDirEntry = submittedArtifacts.get(nonExecutableDirName); assertState(nonExecutableDirEntry, false, true); }
/** * Extracts all files required for the execution from the given {@link JobGraph} and uploads them using the {@link BlobClient} * from the given {@link Supplier}. * * @param jobGraph jobgraph requiring files * @param clientSupplier supplier of blob client to upload files with * @throws FlinkException if the upload fails */ public static void extractAndUploadJobGraphFiles(JobGraph jobGraph, SupplierWithException<BlobClient, IOException> clientSupplier) throws FlinkException { List<Path> userJars = jobGraph.getUserJars(); Collection<Tuple2<String, Path>> userArtifacts = jobGraph.getUserArtifacts().entrySet().stream() .map(entry -> Tuple2.of(entry.getKey(), new Path(entry.getValue().filePath))) .collect(Collectors.toList()); uploadJobGraphFiles(jobGraph, userJars, userArtifacts, clientSupplier); }
/** * Extracts all files required for the execution from the given {@link JobGraph} and uploads them using the {@link BlobClient} * from the given {@link Supplier}. * * @param jobGraph jobgraph requiring files * @param clientSupplier supplier of blob client to upload files with * @throws FlinkException if the upload fails */ public static void extractAndUploadJobGraphFiles(JobGraph jobGraph, SupplierWithException<BlobClient, IOException> clientSupplier) throws FlinkException { List<Path> userJars = jobGraph.getUserJars(); Collection<Tuple2<String, Path>> userArtifacts = jobGraph.getUserArtifacts().entrySet().stream() .map(entry -> Tuple2.of(entry.getKey(), new Path(entry.getValue().filePath))) .collect(Collectors.toList()); uploadJobGraphFiles(jobGraph, userJars, userArtifacts, clientSupplier); }
/** * Extracts all files required for the execution from the given {@link JobGraph} and uploads them using the {@link BlobClient} * from the given {@link Supplier}. * * @param jobGraph jobgraph requiring files * @param clientSupplier supplier of blob client to upload files with * @throws FlinkException if the upload fails */ public static void extractAndUploadJobGraphFiles(JobGraph jobGraph, SupplierWithException<BlobClient, IOException> clientSupplier) throws FlinkException { List<Path> userJars = jobGraph.getUserJars(); Collection<Tuple2<String, Path>> userArtifacts = jobGraph.getUserArtifacts().entrySet().stream() .map(entry -> Tuple2.of(entry.getKey(), new Path(entry.getValue().filePath))) .collect(Collectors.toList()); uploadJobGraphFiles(jobGraph, userJars, userArtifacts, clientSupplier); }
for (Map.Entry<String, DistributedCache.DistributedCacheEntry> artifacts : jobGraph.getUserArtifacts().entrySet()) { artifactFileNames.add(new JobSubmitRequestBody.DistributedCacheFile(artifacts.getKey(), new Path(artifacts.getValue().filePath).getName())); filesToUpload.add(new FileUpload(Paths.get(artifacts.getValue().filePath), RestConstants.CONTENT_TYPE_BINARY));
if (executionMode == ExecutionMode.DETACHED && jobGraph.getUserArtifacts() != null) { final InetSocketAddress address = new InetSocketAddress(blobServer.getPort()); List<Tuple2<String, Path>> userArtifacts = new ArrayList<>(); for (Map.Entry<String, DistributedCache.DistributedCacheEntry> entry : jobGraph.getUserArtifacts().entrySet()) { if (!new Path(entry.getValue().filePath).getFileSystem().isDistributedFS()) { userArtifacts.add(new Tuple2<>(entry.getKey(), new Path(entry.getKey())));
for (Map.Entry<String, DistributedCache.DistributedCacheEntry> artifacts : jobGraph.getUserArtifacts().entrySet()) { try { Path file = new Path(artifacts.getValue().filePath);