Java Code Examples for build.bazel.remote.execution.v2.Digest#getSizeBytes()
The following examples show how to use
build.bazel.remote.execution.v2.Digest#getSizeBytes() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Utils.java From bazel-buildfarm with Apache License 2.0 | 6 votes |
public static ListenableFuture<Digest> putBlobFuture( Instance instance, Digest digest, ByteString data, long writeDeadlineAfter, TimeUnit writeDeadlineAfterUnits, RequestMetadata requestMetadata) throws ExcessiveWriteSizeException { if (digest.getSizeBytes() != data.size()) { return immediateFailedFuture( invalidDigestSize(digest.getSizeBytes(), data.size()).asRuntimeException()); } Write write = instance.getBlobWrite(digest, UUID.randomUUID(), requestMetadata); // indicate that we know this write is novel write.reset(); SettableFuture<Digest> future = SettableFuture.create(); write.addListener(() -> future.set(digest), directExecutor()); try (OutputStream out = write.getOutput(writeDeadlineAfter, writeDeadlineAfterUnits, () -> {})) { data.writeTo(out); } catch (Exception e) { future.setException(e); } return future; }
Example 2
Source File: WriteStreamObserver.java From bazel-buildfarm with Apache License 2.0 | 6 votes |
private Write getWrite(String resourceName) throws ExcessiveWriteSizeException, InstanceNotFoundException, InvalidResourceNameException { switch (detectResourceOperation(resourceName)) { case UploadBlob: Digest uploadBlobDigest = parseUploadBlobDigest(resourceName); expectedCommittedSize = uploadBlobDigest.getSizeBytes(); return ByteStreamService.getUploadBlobWrite( instances.getFromUploadBlob(resourceName), uploadBlobDigest, parseUploadBlobUUID(resourceName)); case OperationStream: return ByteStreamService.getOperationStreamWrite( instances.getFromOperationStream(resourceName), resourceName); case Blob: default: throw INVALID_ARGUMENT .withDescription("unknown resource operation for " + resourceName) .asRuntimeException(); } }
Example 3
Source File: BlobWriteObserver.java From bazel-buildfarm with Apache License 2.0 | 6 votes |
BlobWriteObserver(String resourceName, SimpleBlobStore simpleBlobStore) throws InvalidResourceNameException { Digest digest = parseUploadBlobDigest(resourceName); this.resourceName = resourceName; this.size = digest.getSizeBytes(); buffer = new RingBufferInputStream((int) Math.min(size, BLOB_BUFFER_SIZE)); putThread = new Thread( () -> { try { simpleBlobStore.put(digest.getHash(), size, buffer); } catch (IOException e) { buffer.shutdown(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } }); putThread.start(); }
Example 4
Source File: MemoryWriteOutputStream.java From bazel-buildfarm with Apache License 2.0 | 6 votes |
MemoryWriteOutputStream( ContentAddressableStorage storage, Digest digest, ListenableFuture<ByteString> writtenFuture) { this.storage = storage; this.digest = digest; this.writtenFuture = writtenFuture; if (digest.getSizeBytes() > Integer.MAX_VALUE) { throw new IllegalArgumentException( String.format( "content size %d exceeds maximum of %d", digest.getSizeBytes(), Integer.MAX_VALUE)); } out = ByteString.newOutput((int) digest.getSizeBytes()); hashOut = DigestUtil.forDigest(digest).newHashingOutputStream(out); addListener( () -> { future.set(null); try { hashOut.close(); } catch (IOException e) { // ignore } }, directExecutor()); }
Example 5
Source File: MemoryCAS.java From bazel-buildfarm with Apache License 2.0 | 6 votes |
@Override public Iterable<Digest> findMissingBlobs(Iterable<Digest> digests) throws InterruptedException { ImmutableList.Builder<Digest> builder = ImmutableList.builder(); synchronized (this) { // incur access use of the digest for (Digest digest : digests) { if (digest.getSizeBytes() != 0 && !contains(digest)) { builder.add(digest); } } } ImmutableList<Digest> missing = builder.build(); if (delegate != null && !missing.isEmpty()) { return delegate.findMissingBlobs(missing); } return missing; }
Example 6
Source File: Writes.java From bazel-buildfarm with Apache License 2.0 | 6 votes |
public Write get(Digest digest, UUID uuid, RequestMetadata requestMetadata) throws ExcessiveWriteSizeException { if (digest.getSizeBytes() == 0) { return new CompleteWrite(0); } BlobWriteKey key = BlobWriteKey.newBuilder().setDigest(digest).setIdentifier(uuid.toString()).build(); try { return new InvalidatingWrite( blobWriteInstances.get(key).getBlobWrite(digest, uuid, requestMetadata), () -> blobWriteInstances.invalidate(key)); } catch (ExecutionException e) { Throwable cause = e.getCause(); throwIfInstanceOf(cause, RuntimeException.class); throw new UncheckedExecutionException(cause); } }
Example 7
Source File: Extract.java From bazel-buildfarm with Apache License 2.0 | 6 votes |
static ByteString getBlobIntoFile( String type, String instanceName, Digest digest, ByteStreamStub bsStub, Path root) throws IOException, InterruptedException { Path file = root.resolve(digest.getHash()); if (Files.exists(file) && Files.size(file) == digest.getSizeBytes()) { try (InputStream in = Files.newInputStream(file)) { return ByteString.readFrom(in); } } System.out.println("Getting " + type + " " + digest.getHash() + "/" + digest.getSizeBytes()); ByteString content = getBlob(instanceName, digest, bsStub); try (OutputStream out = Files.newOutputStream(file)) { content.writeTo(out); } return content; }
Example 8
Source File: GrpcCAS.java From bazel-buildfarm with Apache License 2.0 | 6 votes |
public static Write newWrite( Channel channel, String instanceName, Digest digest, UUID uuid, RequestMetadata requestMetadata) { HashCode hash = HashCode.fromString(digest.getHash()); String resourceName = ByteStreamUploader.uploadResourceName(instanceName, uuid, hash, digest.getSizeBytes()); Supplier<ByteStreamBlockingStub> bsBlockingStub = Suppliers.memoize( () -> ByteStreamGrpc.newBlockingStub(channel) .withInterceptors(attachMetadataInterceptor(requestMetadata))); Supplier<ByteStreamStub> bsStub = Suppliers.memoize( () -> ByteStreamGrpc.newStub(channel) .withInterceptors(attachMetadataInterceptor(requestMetadata))); return new StubWriteOutputStream( bsBlockingStub, bsStub, resourceName, digest.getSizeBytes(), /* autoflush=*/ false); }
Example 9
Source File: Extract.java From bazel-buildfarm with Apache License 2.0 | 5 votes |
static Runnable blobGetter( Path root, String instanceName, Digest digest, ByteStreamStub bsStub, AtomicLong outstandingOperations, ListeningScheduledExecutorService retryService) { if (digest.getSizeBytes() == 0) { return () -> outstandingOperations.getAndDecrement(); } return new Runnable() { @Override public void run() { Path file = root.resolve(digest.getHash()); try { if (!Files.exists(file) || Files.size(file) != digest.getSizeBytes()) { System.out.println("Getting blob " + digest.getHash() + "/" + digest.getSizeBytes()); try (OutputStream out = Files.newOutputStream(file)) { try (InputStream in = newInput(instanceName, digest, bsStub, retryService)) { ByteStreams.copy(in, out); } } } } catch (IOException e) { e.printStackTrace(); } outstandingOperations.getAndDecrement(); } }; }
Example 10
Source File: ByteStreamService.java From bazel-buildfarm with Apache License 2.0 | 5 votes |
static Write getBlobWrite(Instance instance, Digest digest) { return new Write() { @Override public long getCommittedSize() { return isComplete() ? digest.getSizeBytes() : 0; } @Override public boolean isComplete() { return instance.containsBlob(digest, TracingMetadataUtils.fromCurrentContext()); } @Override public FeedbackOutputStream getOutput( long deadlineAfter, TimeUnit deadlineAfterUnits, Runnable onReadyHandler) throws IOException { throw new IOException("cannot get output of blob write"); } @Override public void reset() { throw new RuntimeException("cannot reset a blob write"); } @Override public void addListener(Runnable onCompleted, Executor executor) { throw new RuntimeException("cannot add listener to blob write"); } }; }
Example 11
Source File: AbstractServerInstance.java From bazel-buildfarm with Apache License 2.0 | 5 votes |
ByteString getBlob(Digest blobDigest, long offset, long count) throws IndexOutOfBoundsException, InterruptedException { if (blobDigest.getSizeBytes() == 0) { if (offset == 0 && count >= 0) { return ByteString.EMPTY; } else { throw new IndexOutOfBoundsException(); } } Blob blob = contentAddressableStorage.get(blobDigest); if (blob == null) { return null; } if (offset < 0 || (blob.isEmpty() && offset > 0) || (!blob.isEmpty() && offset >= blob.size()) || count < 0) { throw new IndexOutOfBoundsException(); } long endIndex = offset + count; return blob.getData() .substring((int) offset, (int) (endIndex > blob.size() ? blob.size() : endIndex)); }
Example 12
Source File: ShardInstance.java From bazel-buildfarm with Apache License 2.0 | 5 votes |
ListenableFuture<Directory> expectDirectory( String reason, Digest directoryBlobDigest, Executor executor, RequestMetadata requestMetadata) { if (directoryBlobDigest.getSizeBytes() == 0) { return immediateFuture(Directory.getDefaultInstance()); } Supplier<ListenableFuture<Directory>> fetcher = () -> notFoundNull( expect(directoryBlobDigest, Directory.parser(), executor, requestMetadata)); // is there a better interface to use for the cache with these nice futures? return catching( directoryCache.get( directoryBlobDigest, new Callable<ListenableFuture<? extends Directory>>() { @Override public ListenableFuture<Directory> call() { logger.log( Level.INFO, format( "transformQueuedOperation(%s): fetching directory %s", reason, DigestUtil.toString(directoryBlobDigest))); return fetcher.get(); } }), InvalidCacheLoadException.class, (e) -> { return null; }, directExecutor()); }
Example 13
Source File: CFCExecFileSystem.java From bazel-buildfarm with Apache License 2.0 | 5 votes |
private ListenableFuture<Void> put( Path path, FileNode fileNode, ImmutableList.Builder<String> inputFiles) { Path filePath = path.resolve(fileNode.getName()); Digest digest = fileNode.getDigest(); if (digest.getSizeBytes() == 0) { return listeningDecorator(fetchService) .submit( () -> { Files.createFile(filePath); // ignore executable return null; }); } String key = fileCache.getKey(digest, fileNode.getIsExecutable()); return transformAsync( fileCache.put(digest, fileNode.getIsExecutable(), fetchService), (fileCachePath) -> { checkNotNull(key); // we saw null entries in the built immutable list without synchronization synchronized (inputFiles) { inputFiles.add(key); } if (fileNode.getDigest().getSizeBytes() != 0) { try { Files.createLink(filePath, fileCachePath); } catch (IOException e) { return immediateFailedFuture(e); } } return immediateFuture(null); }, fetchService); }
Example 14
Source File: EmptyInputStreamFactory.java From bazel-buildfarm with Apache License 2.0 | 5 votes |
@Override public InputStream newInput(Digest blobDigest, long offset) throws IOException, InterruptedException { if (blobDigest.getSizeBytes() == 0) { return ByteString.EMPTY.newInput(); } return delegate.newInput(blobDigest, offset); }
Example 15
Source File: CASFileCache.java From bazel-buildfarm with Apache License 2.0 | 5 votes |
@Override public InputStream newInput(Digest digest, long offset) throws IOException { try { return newLocalInput(digest, offset); } catch (NoSuchFileException e) { if (delegate == null) { throw e; } } if (digest.getSizeBytes() > maxEntrySizeInBytes) { return delegate.newInput(digest, offset); } Write write = getWrite(digest, UUID.randomUUID(), RequestMetadata.getDefaultInstance()); return newReadThroughInput(digest, offset, write); }
Example 16
Source File: Writes.java From bazel-buildfarm with Apache License 2.0 | 5 votes |
private synchronized Write getNonEmpty(Digest digest, UUID uuid) { Blob blob = storage.get(digest); if (blob != null) { return new CompleteWrite(digest.getSizeBytes()); } return get(BlobWriteKey.newBuilder().setDigest(digest).setIdentifier(uuid.toString()).build()); }
Example 17
Source File: CASFileCache.java From bazel-buildfarm with Apache License 2.0 | 5 votes |
private void fetchDirectory( Path path, Digest digest, Map<Digest, Directory> directoriesIndex, ImmutableList.Builder<String> inputsBuilder, ImmutableList.Builder<ListenableFuture<Path>> putFutures, ExecutorService service) throws IOException, InterruptedException { if (Files.exists(path)) { if (Files.isDirectory(path)) { logger.log(Level.FINE, "removing existing directory " + path + " for fetch"); Directories.remove(path); } else { Files.delete(path); } } Directory directory; if (digest.getSizeBytes() == 0) { directory = Directory.getDefaultInstance(); } else { directory = directoriesIndex.get(digest); } if (directory == null) { throw new IOException( format("directory not found for %s(%s)", path, DigestUtil.toString(digest))); } Files.createDirectory(path); putDirectoryFiles(directory.getFilesList(), path, inputsBuilder, putFutures, service); for (DirectoryNode directoryNode : directory.getDirectoriesList()) { fetchDirectory( path.resolve(directoryNode.getName()), directoryNode.getDigest(), directoriesIndex, inputsBuilder, putFutures, service); } }
Example 18
Source File: CASFileCache.java From bazel-buildfarm with Apache License 2.0 | 5 votes |
@Override public Write getWrite(Digest digest, UUID uuid, RequestMetadata requestMetadata) { if (digest.getSizeBytes() == 0) { return new CompleteWrite(0); } try { return writes.get( BlobWriteKey.newBuilder().setDigest(digest).setIdentifier(uuid.toString()).build()); } catch (ExecutionException e) { logger.log( Level.SEVERE, "error getting write for " + DigestUtil.toString(digest) + ":" + uuid, e); throw new IllegalStateException("write create must not fail", e.getCause()); } }
Example 19
Source File: TreeIterator.java From bazel-buildfarm with Apache License 2.0 | 4 votes |
private @Nullable Directory getDirectory(Digest digest) { if (digest.getSizeBytes() == 0) { return Directory.getDefaultInstance(); } return directoryFetcher.fetch(digest); }
Example 20
Source File: Writes.java From bazel-buildfarm with Apache License 2.0 | 4 votes |
Write get(Digest digest, UUID uuid) { if (digest.getSizeBytes() == 0) { return new CompleteWrite(0); } return getNonEmpty(digest, uuid); }