org.apache.flink.runtime.blob.PermanentBlobKey Java Examples
The following examples show how to use
org.apache.flink.runtime.blob.PermanentBlobKey.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TaskDeploymentDescriptorTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testOffLoadedAndNonOffLoadedPayload() { final TaskDeploymentDescriptor taskDeploymentDescriptor = createTaskDeploymentDescriptor( new TaskDeploymentDescriptor.NonOffloaded<>(serializedJobInformation), new TaskDeploymentDescriptor.Offloaded<>(new PermanentBlobKey())); SerializedValue<JobInformation> actualSerializedJobInformation = taskDeploymentDescriptor.getSerializedJobInformation(); assertThat(actualSerializedJobInformation, is(serializedJobInformation)); try { taskDeploymentDescriptor.getSerializedTaskInformation(); fail("Expected to fail since the task information should be offloaded."); } catch (IllegalStateException expected) { // expected } }
Example #2
Source File: ClientUtilsTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void uploadAndSetUserJars() throws Exception { java.nio.file.Path tmpDir = temporaryFolder.newFolder().toPath(); JobGraph jobGraph = new JobGraph(); Collection<Path> jars = Arrays.asList( new Path(Files.createFile(tmpDir.resolve("jar1.jar")).toString()), new Path(Files.createFile(tmpDir.resolve("jar2.jar")).toString())); jars.forEach(jobGraph::addJar); assertEquals(jars.size(), jobGraph.getUserJars().size()); assertEquals(0, jobGraph.getUserJarBlobKeys().size()); ClientUtils.extractAndUploadJobGraphFiles(jobGraph, () -> new BlobClient(new InetSocketAddress("localhost", blobServer.getPort()), new Configuration())); assertEquals(jars.size(), jobGraph.getUserJars().size()); assertEquals(jars.size(), jobGraph.getUserJarBlobKeys().size()); assertEquals(jars.size(), jobGraph.getUserJarBlobKeys().stream().distinct().count()); for (PermanentBlobKey blobKey : jobGraph.getUserJarBlobKeys()) { blobServer.getFile(jobGraph.getJobID(), blobKey); } }
Example #3
Source File: ExecutionJobVertex.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
public Either<SerializedValue<TaskInformation>, PermanentBlobKey> getTaskInformationOrBlobKey() throws IOException { // only one thread should offload the task information, so let's also let only one thread // serialize the task information! synchronized (stateMonitor) { if (taskInformationOrBlobKey == null) { final BlobWriter blobWriter = graph.getBlobWriter(); final TaskInformation taskInformation = new TaskInformation( jobVertex.getID(), jobVertex.getName(), parallelism, maxParallelism, jobVertex.getInvokableClassName(), jobVertex.getConfiguration()); taskInformationOrBlobKey = BlobWriter.serializeAndTryOffload( taskInformation, getJobId(), blobWriter); } return taskInformationOrBlobKey; } }
Example #4
Source File: TaskDeploymentDescriptorTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void testOffLoadedAndNonOffLoadedPayload() { final TaskDeploymentDescriptor taskDeploymentDescriptor = createTaskDeploymentDescriptor( new TaskDeploymentDescriptor.NonOffloaded<>(serializedJobInformation), new TaskDeploymentDescriptor.Offloaded<>(new PermanentBlobKey())); SerializedValue<JobInformation> actualSerializedJobInformation = taskDeploymentDescriptor.getSerializedJobInformation(); assertThat(actualSerializedJobInformation, is(serializedJobInformation)); try { taskDeploymentDescriptor.getSerializedTaskInformation(); fail("Expected to fail since the task information should be offloaded."); } catch (IllegalStateException expected) { // expected } }
Example #5
Source File: ExecutionGraphDeploymentWithBlobServerTest.java From flink with Apache License 2.0 | 6 votes |
@Before public void setupBlobServer() throws IOException { Configuration config = new Configuration(); // always offload the serialized job and task information config.setInteger(BlobServerOptions.OFFLOAD_MINSIZE, 0); blobServer = Mockito.spy(new BlobServer(config, new VoidBlobStore())); blobWriter = blobServer; blobCache = blobServer; seenHashes.clear(); // verify that we do not upload the same content more than once doAnswer( invocation -> { PermanentBlobKey key = (PermanentBlobKey) invocation.callRealMethod(); assertTrue(seenHashes.add(key.getHash())); return key; } ).when(blobServer).putPermanent(any(JobID.class), Matchers.<byte[]>any()); blobServer.start(); }
Example #6
Source File: ExecutionJobVertex.java From flink with Apache License 2.0 | 6 votes |
public Either<SerializedValue<TaskInformation>, PermanentBlobKey> getTaskInformationOrBlobKey() throws IOException { // only one thread should offload the task information, so let's also let only one thread // serialize the task information! synchronized (stateMonitor) { if (taskInformationOrBlobKey == null) { final BlobWriter blobWriter = graph.getBlobWriter(); final TaskInformation taskInformation = new TaskInformation( jobVertex.getID(), jobVertex.getName(), parallelism, maxParallelism, jobVertex.getInvokableClassName(), jobVertex.getConfiguration()); taskInformationOrBlobKey = BlobWriter.serializeAndTryOffload( taskInformation, getJobId(), blobWriter); } return taskInformationOrBlobKey; } }
Example #7
Source File: ClientUtilsTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Test public void uploadAndSetUserJars() throws Exception { java.nio.file.Path tmpDir = temporaryFolder.newFolder().toPath(); JobGraph jobGraph = new JobGraph(); Collection<Path> jars = Arrays.asList( new Path(Files.createFile(tmpDir.resolve("jar1.jar")).toString()), new Path(Files.createFile(tmpDir.resolve("jar2.jar")).toString())); jars.forEach(jobGraph::addJar); assertEquals(jars.size(), jobGraph.getUserJars().size()); assertEquals(0, jobGraph.getUserJarBlobKeys().size()); ClientUtils.extractAndUploadJobGraphFiles(jobGraph, () -> new BlobClient(new InetSocketAddress("localhost", blobServer.getPort()), new Configuration())); assertEquals(jars.size(), jobGraph.getUserJars().size()); assertEquals(jars.size(), jobGraph.getUserJarBlobKeys().size()); assertEquals(jars.size(), jobGraph.getUserJarBlobKeys().stream().distinct().count()); for (PermanentBlobKey blobKey : jobGraph.getUserJarBlobKeys()) { blobServer.getFile(jobGraph.getJobID(), blobKey); } }
Example #8
Source File: ClientUtilsTest.java From flink with Apache License 2.0 | 6 votes |
@Test public void uploadAndSetUserJars() throws Exception { java.nio.file.Path tmpDir = temporaryFolder.newFolder().toPath(); JobGraph jobGraph = new JobGraph(); Collection<Path> jars = Arrays.asList( new Path(Files.createFile(tmpDir.resolve("jar1.jar")).toString()), new Path(Files.createFile(tmpDir.resolve("jar2.jar")).toString())); jars.forEach(jobGraph::addJar); assertEquals(jars.size(), jobGraph.getUserJars().size()); assertEquals(0, jobGraph.getUserJarBlobKeys().size()); ClientUtils.extractAndUploadJobGraphFiles(jobGraph, () -> new BlobClient(new InetSocketAddress("localhost", blobServer.getPort()), new Configuration())); assertEquals(jars.size(), jobGraph.getUserJars().size()); assertEquals(jars.size(), jobGraph.getUserJarBlobKeys().size()); assertEquals(jars.size(), jobGraph.getUserJarBlobKeys().stream().distinct().count()); for (PermanentBlobKey blobKey : jobGraph.getUserJarBlobKeys()) { blobServer.getFile(jobGraph.getJobID(), blobKey); } }
Example #9
Source File: ExecutionGraphDeploymentWithBlobServerTest.java From Flink-CEPplus with Apache License 2.0 | 6 votes |
@Before public void setupBlobServer() throws IOException { Configuration config = new Configuration(); // always offload the serialized job and task information config.setInteger(BlobServerOptions.OFFLOAD_MINSIZE, 0); blobServer = Mockito.spy(new BlobServer(config, new VoidBlobStore())); blobWriter = blobServer; blobCache = blobServer; seenHashes.clear(); // verify that we do not upload the same content more than once doAnswer( invocation -> { PermanentBlobKey key = (PermanentBlobKey) invocation.callRealMethod(); assertTrue(seenHashes.add(key.getHash())); return key; } ).when(blobServer).putPermanent(any(JobID.class), Matchers.<byte[]>any()); blobServer.start(); }
Example #10
Source File: ExecutionGraphDeploymentWithBlobServerTest.java From flink with Apache License 2.0 | 6 votes |
@Before public void setupBlobServer() throws IOException { Configuration config = new Configuration(); // always offload the serialized job and task information config.setInteger(BlobServerOptions.OFFLOAD_MINSIZE, 0); blobServer = Mockito.spy(new BlobServer(config, new VoidBlobStore())); blobWriter = blobServer; blobCache = blobServer; seenHashes.clear(); // verify that we do not upload the same content more than once doAnswer( invocation -> { PermanentBlobKey key = (PermanentBlobKey) invocation.callRealMethod(); assertTrue(seenHashes.add(key.getHash())); return key; } ).when(blobServer).putPermanent(any(JobID.class), Matchers.<byte[]>any()); blobServer.start(); }
Example #11
Source File: ExecutionGraphDeploymentWithBlobServerTest.java From flink with Apache License 2.0 | 5 votes |
@Override protected void checkJobOffloaded(ExecutionGraph eg) throws Exception { Either<SerializedValue<JobInformation>, PermanentBlobKey> jobInformationOrBlobKey = eg.getJobInformationOrBlobKey(); assertTrue(jobInformationOrBlobKey.isRight()); // must not throw: blobServer.getFile(eg.getJobID(), jobInformationOrBlobKey.right()); }
Example #12
Source File: BlobLibraryCacheManager.java From flink with Apache License 2.0 | 5 votes |
private ResolvedClassLoader(URLClassLoader classLoader, Collection<PermanentBlobKey> requiredLibraries, Collection<URL> requiredClassPaths) { this.classLoader = classLoader; // NOTE: do not store the class paths, i.e. URLs, into a set for performance reasons // see http://findbugs.sourceforge.net/bugDescriptions.html#DMI_COLLECTION_OF_URLS // -> alternatively, compare their string representation this.classPaths = new HashSet<>(requiredClassPaths.size()); for (URL url : requiredClassPaths) { classPaths.add(url.toString()); } this.libraries = new HashSet<>(requiredLibraries); }
Example #13
Source File: BlobLibraryCacheManager.java From flink with Apache License 2.0 | 5 votes |
private void verifyClassLoader(Collection<PermanentBlobKey> requiredLibraries, Collection<URL> requiredClassPaths) { // Make sure the previous registration referred to the same libraries and class paths. // NOTE: the original collections may contain duplicates and may not already be Set // collections with fast checks whether an item is contained in it. // lazy construction of a new set for faster comparisons if (libraries.size() != requiredLibraries.size() || !new HashSet<>(requiredLibraries).containsAll(libraries)) { throw new IllegalStateException( "The library registration references a different set of library BLOBs than" + " previous registrations for this job:\nold:" + libraries + "\nnew:" + requiredLibraries); } // lazy construction of a new set with String representations of the URLs if (classPaths.size() != requiredClassPaths.size() || !requiredClassPaths.stream().map(URL::toString).collect(Collectors.toSet()) .containsAll(classPaths)) { throw new IllegalStateException( "The library registration references a different set of library BLOBs than" + " previous registrations for this job:\nold:" + classPaths + "\nnew:" + requiredClassPaths); } }
Example #14
Source File: JobGraph.java From flink with Apache License 2.0 | 5 votes |
/** * Adds the BLOB referenced by the key to the JobGraph's dependencies. * * @param key * path of the JAR file required to run the job on a task manager */ public void addUserJarBlobKey(PermanentBlobKey key) { if (key == null) { throw new IllegalArgumentException(); } if (!userJarBlobKeys.contains(key)) { userJarBlobKeys.add(key); } }
Example #15
Source File: ExecutionGraphDeploymentWithBlobServerTest.java From flink with Apache License 2.0 | 5 votes |
@Override protected void checkTaskOffloaded(ExecutionGraph eg, JobVertexID jobVertexId) throws Exception { Either<SerializedValue<TaskInformation>, PermanentBlobKey> taskInformationOrBlobKey = eg.getJobVertex(jobVertexId).getTaskInformationOrBlobKey(); assertTrue(taskInformationOrBlobKey.isRight()); // must not throw: blobServer.getFile(eg.getJobID(), taskInformationOrBlobKey.right()); }
Example #16
Source File: JobGraph.java From flink with Apache License 2.0 | 5 votes |
public void setUserArtifactBlobKey(String entryName, PermanentBlobKey blobKey) throws IOException { byte[] serializedBlobKey; serializedBlobKey = InstantiationUtil.serializeObject(blobKey); userArtifacts.computeIfPresent(entryName, (key, originalEntry) -> new DistributedCache.DistributedCacheEntry( originalEntry.filePath, originalEntry.isExecutable, serializedBlobKey, originalEntry.isZipped )); }
Example #17
Source File: TaskDeploymentDescriptorFactory.java From flink with Apache License 2.0 | 5 votes |
private static MaybeOffloaded<JobInformation> getSerializedJobInformation(ExecutionGraph executionGraph) { Either<SerializedValue<JobInformation>, PermanentBlobKey> jobInformationOrBlobKey = executionGraph.getJobInformationOrBlobKey(); if (jobInformationOrBlobKey.isLeft()) { return new TaskDeploymentDescriptor.NonOffloaded<>(jobInformationOrBlobKey.left()); } else { return new TaskDeploymentDescriptor.Offloaded<>(jobInformationOrBlobKey.right()); } }
Example #18
Source File: TaskDeploymentDescriptorFactory.java From flink with Apache License 2.0 | 5 votes |
private static MaybeOffloaded<TaskInformation> getSerializedTaskInformation( Either<SerializedValue<TaskInformation>, PermanentBlobKey> taskInfo) { return taskInfo.isLeft() ? new TaskDeploymentDescriptor.NonOffloaded<>(taskInfo.left()) : new TaskDeploymentDescriptor.Offloaded<>(taskInfo.right()); }
Example #19
Source File: FileCacheReadsFromBlobTest.java From flink with Apache License 2.0 | 5 votes |
@Override public File getFile(JobID jobId, PermanentBlobKey key) throws IOException { if (key.equals(permanentBlobKey)) { File f = temporaryFolder.newFile("cacheFile"); FileUtils.writeFileUtf8(f, testFileContent); return f; } else { throw new IllegalArgumentException("This service contains only entry for " + permanentBlobKey); } }
Example #20
Source File: FileCacheDirectoriesTest.java From flink with Apache License 2.0 | 5 votes |
@Override public File getFile(JobID jobId, PermanentBlobKey key) throws IOException { if (key.equals(permanentBlobKey)) { final java.nio.file.Path directory = temporaryFolder.newFolder("zipArchive").toPath(); final java.nio.file.Path containedFile = directory.resolve("cacheFile"); Files.copy(new ByteArrayInputStream(testFileContent.getBytes(StandardCharsets.UTF_8)), containedFile); Path zipPath = FileUtils.compressDirectory(new Path(directory.toString()), new Path(directory + ".zip")); return new File(zipPath.getPath()); } else { throw new IllegalArgumentException("This service contains only entry for " + permanentBlobKey); } }
Example #21
Source File: JobInformation.java From flink with Apache License 2.0 | 5 votes |
public JobInformation( JobID jobId, String jobName, SerializedValue<ExecutionConfig> serializedExecutionConfig, Configuration jobConfiguration, Collection<PermanentBlobKey> requiredJarFileBlobKeys, Collection<URL> requiredClasspathURLs) { this.jobId = Preconditions.checkNotNull(jobId); this.jobName = Preconditions.checkNotNull(jobName); this.serializedExecutionConfig = Preconditions.checkNotNull(serializedExecutionConfig); this.jobConfiguration = Preconditions.checkNotNull(jobConfiguration); this.requiredJarFileBlobKeys = Preconditions.checkNotNull(requiredJarFileBlobKeys); this.requiredClasspathURLs = Preconditions.checkNotNull(requiredClasspathURLs); }
Example #22
Source File: BlobLibraryCacheManager.java From flink with Apache License 2.0 | 5 votes |
@Override public ClassLoader getOrResolveClassLoader(Collection<PermanentBlobKey> requiredJarFiles, Collection<URL> requiredClasspaths) throws IOException { verifyIsNotClosed(); return libraryCacheEntry.getOrResolveClassLoader( requiredJarFiles, requiredClasspaths); }
Example #23
Source File: JobInformation.java From flink with Apache License 2.0 | 5 votes |
public JobInformation( JobID jobId, String jobName, SerializedValue<ExecutionConfig> serializedExecutionConfig, Configuration jobConfiguration, Collection<PermanentBlobKey> requiredJarFileBlobKeys, Collection<URL> requiredClasspathURLs) { this.jobId = Preconditions.checkNotNull(jobId); this.jobName = Preconditions.checkNotNull(jobName); this.serializedExecutionConfig = Preconditions.checkNotNull(serializedExecutionConfig); this.jobConfiguration = Preconditions.checkNotNull(jobConfiguration); this.requiredJarFileBlobKeys = Preconditions.checkNotNull(requiredJarFileBlobKeys); this.requiredClasspathURLs = Preconditions.checkNotNull(requiredClasspathURLs); }
Example #24
Source File: JobGraph.java From flink with Apache License 2.0 | 5 votes |
public void setUserArtifactBlobKey(String entryName, PermanentBlobKey blobKey) throws IOException { byte[] serializedBlobKey; serializedBlobKey = InstantiationUtil.serializeObject(blobKey); userArtifacts.computeIfPresent(entryName, (key, originalEntry) -> new DistributedCache.DistributedCacheEntry( originalEntry.filePath, originalEntry.isExecutable, serializedBlobKey, originalEntry.isZipped )); }
Example #25
Source File: JobGraph.java From flink with Apache License 2.0 | 5 votes |
/** * Adds the BLOB referenced by the key to the JobGraph's dependencies. * * @param key * path of the JAR file required to run the job on a task manager */ public void addUserJarBlobKey(PermanentBlobKey key) { if (key == null) { throw new IllegalArgumentException(); } if (!userJarBlobKeys.contains(key)) { userJarBlobKeys.add(key); } }
Example #26
Source File: BlobLibraryCacheManager.java From flink with Apache License 2.0 | 5 votes |
public void register( ExecutionAttemptID task, Collection<PermanentBlobKey> requiredLibraries, Collection<URL> requiredClasspaths) { // Make sure the previous registration referred to the same libraries and class paths. // NOTE: the original collections may contain duplicates and may not already be Set // collections with fast checks whether an item is contained in it. // lazy construction of a new set for faster comparisons if (libraries.size() != requiredLibraries.size() || !new HashSet<>(requiredLibraries).containsAll(libraries)) { throw new IllegalStateException( "The library registration references a different set of library BLOBs than" + " previous registrations for this job:\nold:" + libraries.toString() + "\nnew:" + requiredLibraries.toString()); } // lazy construction of a new set with String representations of the URLs if (classPaths.size() != requiredClasspaths.size() || !requiredClasspaths.stream().map(URL::toString).collect(Collectors.toSet()) .containsAll(classPaths)) { throw new IllegalStateException( "The library registration references a different set of library BLOBs than" + " previous registrations for this job:\nold:" + classPaths.toString() + "\nnew:" + requiredClasspaths.toString()); } this.referenceHolders.add(task); }
Example #27
Source File: BlobLibraryCacheManager.java From flink with Apache License 2.0 | 5 votes |
/** * Creates a cache entry for a flink class loader with the given <tt>libraryURLs</tt>. * * @param requiredLibraries * BLOB keys required by the class loader (stored for ensuring consistency among different * job/task registrations) * @param requiredClasspaths * class paths required by the class loader (stored for ensuring consistency among * different job/task registrations) * @param libraryURLs * complete list of URLs to use for the class loader (includes references to the * <tt>requiredLibraries</tt> and <tt>requiredClasspaths</tt>) * @param initialReference * reference holder ID * @param classLoaderResolveOrder Whether to resolve classes first in the child ClassLoader * or parent ClassLoader * @param alwaysParentFirstPatterns A list of patterns for classes that should always be * resolved from the parent ClassLoader (if possible). */ LibraryCacheEntry( Collection<PermanentBlobKey> requiredLibraries, Collection<URL> requiredClasspaths, URL[] libraryURLs, ExecutionAttemptID initialReference, FlinkUserCodeClassLoaders.ResolveOrder classLoaderResolveOrder, String[] alwaysParentFirstPatterns) { this.classLoader = FlinkUserCodeClassLoaders.create( classLoaderResolveOrder, libraryURLs, FlinkUserCodeClassLoaders.class.getClassLoader(), alwaysParentFirstPatterns); // NOTE: do not store the class paths, i.e. URLs, into a set for performance reasons // see http://findbugs.sourceforge.net/bugDescriptions.html#DMI_COLLECTION_OF_URLS // -> alternatively, compare their string representation this.classPaths = new HashSet<>(requiredClasspaths.size()); for (URL url : requiredClasspaths) { classPaths.add(url.toString()); } this.libraries = new HashSet<>(requiredLibraries); this.referenceHolders = new HashSet<>(); this.referenceHolders.add(initialReference); }
Example #28
Source File: ClassloadingProps.java From flink with Apache License 2.0 | 5 votes |
/** * Constructor of ClassloadingProps. * * @param blobManagerPort The port of the blobManager * @param requiredJarFiles The blob keys of the required jar files * @param requiredClasspaths The urls of the required classpaths */ public ClassloadingProps( final int blobManagerPort, final Collection<PermanentBlobKey> requiredJarFiles, final Collection<URL> requiredClasspaths) { this.blobManagerPort = blobManagerPort; this.requiredJarFiles = requiredJarFiles; this.requiredClasspaths = requiredClasspaths; }
Example #29
Source File: ZooKeeperDefaultDispatcherRunnerTest.java From flink with Apache License 2.0 | 5 votes |
private JobGraph createJobGraphWithBlobs() throws IOException { final JobVertex vertex = new JobVertex("test vertex"); vertex.setInvokableClass(NoOpInvokable.class); vertex.setParallelism(1); final JobGraph jobGraph = new JobGraph("Test job graph", vertex); final PermanentBlobKey permanentBlobKey = blobServer.putPermanent(jobGraph.getJobID(), new byte[256]); jobGraph.addUserJarBlobKey(permanentBlobKey); return jobGraph; }
Example #30
Source File: ExecutionGraphDeploymentWithBlobServerTest.java From flink with Apache License 2.0 | 5 votes |
@Override protected void checkJobOffloaded(ExecutionGraph eg) throws Exception { Either<SerializedValue<JobInformation>, PermanentBlobKey> jobInformationOrBlobKey = eg.getJobInformationOrBlobKey(); assertTrue(jobInformationOrBlobKey.isRight()); // must not throw: blobServer.getFile(eg.getJobID(), jobInformationOrBlobKey.right()); }