org.apache.htrace.Trace Java Examples
The following examples show how to use
org.apache.htrace.Trace.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TracingExample.java From accumulo-examples with Apache License 2.0 | 6 votes |
private void readEntries(Opts opts) throws TableNotFoundException { Scanner scanner = client.createScanner(opts.getTableName(), opts.auths); // Trace the read operation. TraceScope readScope = Trace.startSpan("Client Read", Sampler.ALWAYS); System.out.println("TraceID: " + Long.toHexString(readScope.getSpan().getTraceId())); int numberOfEntriesRead = 0; for (Entry<Key,Value> entry : scanner) { System.out.println(entry.getKey().toString() + " -> " + entry.getValue().toString()); ++numberOfEntriesRead; } // You can add additional metadata (key, values) to Spans which will be able to be viewed in the // Monitor readScope.getSpan().addKVAnnotation("Number of Entries Read".getBytes(UTF_8), String.valueOf(numberOfEntriesRead).getBytes(UTF_8)); readScope.close(); }
Example #2
Source File: TracingExample.java From accumulo-examples with Apache License 2.0 | 6 votes |
private void createEntries(Opts opts) throws TableNotFoundException, AccumuloException { // Trace the write operation. Note, unless you flush the BatchWriter, you will not capture // the write operation as it is occurs asynchronously. You can optionally create additional // Spans // within a given Trace as seen below around the flush TraceScope scope = Trace.startSpan("Client Write", Sampler.ALWAYS); System.out.println("TraceID: " + Long.toHexString(scope.getSpan().getTraceId())); try (BatchWriter batchWriter = client.createBatchWriter(opts.getTableName())) { Mutation m = new Mutation("row"); m.put("cf", "cq", "value"); batchWriter.addMutation(m); // You can add timeline annotations to Spans which will be able to be viewed in the Monitor scope.getSpan().addTimelineAnnotation("Initiating Flush"); batchWriter.flush(); } scope.close(); }
Example #3
Source File: DFSClient.java From hadoop with Apache License 2.0 | 6 votes |
/** * Decrypts a EDEK by consulting the KeyProvider. */ private KeyVersion decryptEncryptedDataEncryptionKey(FileEncryptionInfo feInfo) throws IOException { TraceScope scope = Trace.startSpan("decryptEDEK", traceSampler); try { KeyProvider provider = getKeyProvider(); if (provider == null) { throw new IOException("No KeyProvider is configured, cannot access" + " an encrypted file"); } EncryptedKeyVersion ekv = EncryptedKeyVersion.createForDecryption( feInfo.getKeyName(), feInfo.getEzKeyVersionName(), feInfo.getIV(), feInfo.getEncryptedDataEncryptionKey()); try { KeyProviderCryptoExtension cryptoProvider = KeyProviderCryptoExtension .createKeyProviderCryptoExtension(provider); return cryptoProvider.decryptEncryptedKey(ekv); } catch (GeneralSecurityException e) { throw new IOException(e); } } finally { scope.close(); } }
Example #4
Source File: DFSInputStream.java From big-c with Apache License 2.0 | 6 votes |
private Callable<ByteBuffer> getFromOneDataNode(final DNAddrPair datanode, final LocatedBlock block, final long start, final long end, final ByteBuffer bb, final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap, final int hedgedReadId) { final Span parentSpan = Trace.currentSpan(); return new Callable<ByteBuffer>() { @Override public ByteBuffer call() throws Exception { byte[] buf = bb.array(); int offset = bb.position(); TraceScope scope = Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan); try { actualGetFromOneDataNode(datanode, block, start, end, buf, offset, corruptedBlockMap); return bb; } finally { scope.close(); } } }; }
Example #5
Source File: DFSClient.java From hadoop with Apache License 2.0 | 6 votes |
public void removeDefaultAcl(String src) throws IOException { checkOpen(); TraceScope scope = Trace.startSpan("removeDefaultAcl", traceSampler); try { namenode.removeDefaultAcl(src); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, AclException.class, FileNotFoundException.class, NSQuotaExceededException.class, SafeModeException.class, SnapshotAccessControlException.class, UnresolvedPathException.class); } finally { scope.close(); } }
Example #6
Source File: DFSClient.java From big-c with Apache License 2.0 | 6 votes |
public void removeAclEntries(String src, List<AclEntry> aclSpec) throws IOException { checkOpen(); TraceScope scope = Trace.startSpan("removeAclEntries", traceSampler); try { namenode.removeAclEntries(src, aclSpec); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, AclException.class, FileNotFoundException.class, NSQuotaExceededException.class, SafeModeException.class, SnapshotAccessControlException.class, UnresolvedPathException.class); } finally { scope.close(); } }
Example #7
Source File: BlockReaderLocalLegacy.java From big-c with Apache License 2.0 | 6 votes |
/** * Reads bytes into a buffer until EOF or the buffer's limit is reached */ private int fillBuffer(FileInputStream stream, ByteBuffer buf) throws IOException { TraceScope scope = Trace.startSpan("BlockReaderLocalLegacy#fillBuffer(" + blockId + ")", Sampler.NEVER); try { int bytesRead = stream.getChannel().read(buf); if (bytesRead < 0) { //EOF return bytesRead; } while (buf.remaining() > 0) { int n = stream.getChannel().read(buf); if (n < 0) { //EOF return bytesRead; } bytesRead += n; } return bytesRead; } finally { scope.close(); } }
Example #8
Source File: BlockStorageLocationUtil.java From hadoop with Apache License 2.0 | 6 votes |
@Override public HdfsBlocksMetadata call() throws Exception { HdfsBlocksMetadata metadata = null; // Create the RPC proxy and make the RPC ClientDatanodeProtocol cdp = null; TraceScope scope = Trace.startSpan("getHdfsBlocksMetadata", parentSpan); try { cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration, timeout, connectToDnViaHostname); metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens); } catch (IOException e) { // Bubble this up to the caller, handle with the Future throw e; } finally { scope.close(); if (cdp != null) { RPC.stopProxy(cdp); } } return metadata; }
Example #9
Source File: IndexRegionObserver.java From phoenix with Apache License 2.0 | 6 votes |
private void doIndexWritesWithExceptions(BatchMutateContext context, boolean post) throws IOException { ListMultimap<HTableInterfaceReference, Mutation> indexUpdates = post ? context.postIndexUpdates : context.preIndexUpdates; //short circuit, if we don't need to do any work if (context == null || indexUpdates == null || indexUpdates.isEmpty()) { return; } // get the current span, or just use a null-span to avoid a bunch of if statements try (TraceScope scope = Trace.startSpan("Completing " + (post ? "post" : "pre") + " index writes")) { Span current = scope.getSpan(); if (current == null) { current = NullSpan.INSTANCE; } current.addTimelineAnnotation("Actually doing " + (post ? "post" : "pre") + " index update for first time"); if (post) { postWriter.write(indexUpdates, false, context.clientVersion); } else { preWriter.write(indexUpdates, false, context.clientVersion); } } }
Example #10
Source File: WritableRpcEngine.java From big-c with Apache License 2.0 | 6 votes |
@Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { long startTime = 0; if (LOG.isDebugEnabled()) { startTime = Time.now(); } TraceScope traceScope = null; if (Trace.isTracing()) { traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method)); } ObjectWritable value; try { value = (ObjectWritable) client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId, fallbackToSimpleAuth); } finally { if (traceScope != null) traceScope.close(); } if (LOG.isDebugEnabled()) { long callTime = Time.now() - startTime; LOG.debug("Call: " + method.getName() + " " + callTime); } return value.get(); }
Example #11
Source File: BlockReaderLocalLegacy.java From hadoop with Apache License 2.0 | 6 votes |
/** * Reads bytes into a buffer until EOF or the buffer's limit is reached */ private int fillBuffer(FileInputStream stream, ByteBuffer buf) throws IOException { TraceScope scope = Trace.startSpan("BlockReaderLocalLegacy#fillBuffer(" + blockId + ")", Sampler.NEVER); try { int bytesRead = stream.getChannel().read(buf); if (bytesRead < 0) { //EOF return bytesRead; } while (buf.remaining() > 0) { int n = stream.getChannel().read(buf); if (n < 0) { //EOF return bytesRead; } bytesRead += n; } return bytesRead; } finally { scope.close(); } }
Example #12
Source File: WritableRpcEngine.java From hadoop with Apache License 2.0 | 6 votes |
@Override public Object invoke(Object proxy, Method method, Object[] args) throws Throwable { long startTime = 0; if (LOG.isDebugEnabled()) { startTime = Time.now(); } TraceScope traceScope = null; if (Trace.isTracing()) { traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method)); } ObjectWritable value; try { value = (ObjectWritable) client.call(RPC.RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId, fallbackToSimpleAuth); } finally { if (traceScope != null) traceScope.close(); } if (LOG.isDebugEnabled()) { long callTime = Time.now() - startTime; LOG.debug("Call: " + method.getName() + " " + callTime); } return value.get(); }
Example #13
Source File: DFSClient.java From big-c with Apache License 2.0 | 6 votes |
/** * Decrypts a EDEK by consulting the KeyProvider. */ private KeyVersion decryptEncryptedDataEncryptionKey(FileEncryptionInfo feInfo) throws IOException { TraceScope scope = Trace.startSpan("decryptEDEK", traceSampler); try { KeyProvider provider = getKeyProvider(); if (provider == null) { throw new IOException("No KeyProvider is configured, cannot access" + " an encrypted file"); } EncryptedKeyVersion ekv = EncryptedKeyVersion.createForDecryption( feInfo.getKeyName(), feInfo.getEzKeyVersionName(), feInfo.getIV(), feInfo.getEncryptedDataEncryptionKey()); try { KeyProviderCryptoExtension cryptoProvider = KeyProviderCryptoExtension .createKeyProviderCryptoExtension(provider); return cryptoProvider.decryptEncryptedKey(ekv); } catch (GeneralSecurityException e) { throw new IOException(e); } } finally { scope.close(); } }
Example #14
Source File: DFSInputStream.java From hadoop with Apache License 2.0 | 6 votes |
private Callable<ByteBuffer> getFromOneDataNode(final DNAddrPair datanode, final LocatedBlock block, final long start, final long end, final ByteBuffer bb, final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap, final int hedgedReadId) { final Span parentSpan = Trace.currentSpan(); return new Callable<ByteBuffer>() { @Override public ByteBuffer call() throws Exception { byte[] buf = bb.array(); int offset = bb.position(); TraceScope scope = Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan); try { actualGetFromOneDataNode(datanode, block, start, end, buf, offset, corruptedBlockMap); return bb; } finally { scope.close(); } } }; }
Example #15
Source File: ProtoUtil.java From hadoop with Apache License 2.0 | 6 votes |
public static RpcRequestHeaderProto makeRpcRequestHeader(RPC.RpcKind rpcKind, RpcRequestHeaderProto.OperationProto operation, int callId, int retryCount, byte[] uuid) { RpcRequestHeaderProto.Builder result = RpcRequestHeaderProto.newBuilder(); result.setRpcKind(convert(rpcKind)).setRpcOp(operation).setCallId(callId) .setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid)); // Add tracing info if we are currently tracing. if (Trace.isTracing()) { Span s = Trace.currentSpan(); result.setTraceInfo(RPCTraceInfoProto.newBuilder() .setParentId(s.getSpanId()) .setTraceId(s.getTraceId()).build()); } return result.build(); }
Example #16
Source File: DFSClient.java From big-c with Apache License 2.0 | 5 votes |
public DatanodeStorageReport[] getDatanodeStorageReport( DatanodeReportType type) throws IOException { checkOpen(); TraceScope scope = Trace.startSpan("datanodeStorageReport", traceSampler); try { return namenode.getDatanodeStorageReport(type); } finally { scope.close(); } }
Example #17
Source File: Sender.java From big-c with Apache License 2.0 | 5 votes |
@Override public void releaseShortCircuitFds(SlotId slotId) throws IOException { ReleaseShortCircuitAccessRequestProto.Builder builder = ReleaseShortCircuitAccessRequestProto.newBuilder(). setSlotId(PBHelper.convert(slotId)); if (Trace.isTracing()) { Span s = Trace.currentSpan(); builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder() .setTraceId(s.getTraceId()).setParentId(s.getSpanId())); } ReleaseShortCircuitAccessRequestProto proto = builder.build(); send(out, Op.RELEASE_SHORT_CIRCUIT_FDS, proto); }
Example #18
Source File: CachePoolIterator.java From big-c with Apache License 2.0 | 5 votes |
@Override public BatchedEntries<CachePoolEntry> makeRequest(String prevKey) throws IOException { TraceScope scope = Trace.startSpan("listCachePools", traceSampler); try { return namenode.listCachePools(prevKey); } finally { scope.close(); } }
Example #19
Source File: DataTransferProtoUtil.java From big-c with Apache License 2.0 | 5 votes |
static BaseHeaderProto buildBaseHeader(ExtendedBlock blk, Token<BlockTokenIdentifier> blockToken) { BaseHeaderProto.Builder builder = BaseHeaderProto.newBuilder() .setBlock(PBHelper.convert(blk)) .setToken(PBHelper.convert(blockToken)); if (Trace.isTracing()) { Span s = Trace.currentSpan(); builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder() .setTraceId(s.getTraceId()) .setParentId(s.getSpanId())); } return builder.build(); }
Example #20
Source File: DataTransferProtoUtil.java From big-c with Apache License 2.0 | 5 votes |
public static TraceScope continueTraceSpan(DataTransferTraceInfoProto proto, String description) { TraceScope scope = null; TraceInfo info = fromProto(proto); if (info != null) { scope = Trace.startSpan(description, info); } return scope; }
Example #21
Source File: DFSClient.java From big-c with Apache License 2.0 | 5 votes |
/** * Get the difference between two snapshots, or between a snapshot and the * current tree of a directory. * @see ClientProtocol#getSnapshotDiffReport(String, String, String) */ public SnapshotDiffReport getSnapshotDiffReport(String snapshotDir, String fromSnapshot, String toSnapshot) throws IOException { checkOpen(); TraceScope scope = Trace.startSpan("getSnapshotDiffReport", traceSampler); try { return namenode.getSnapshotDiffReport(snapshotDir, fromSnapshot, toSnapshot); } catch(RemoteException re) { throw re.unwrapRemoteException(); } finally { scope.close(); } }
Example #22
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
/** * Dumps DFS data structures into specified file. * * @see ClientProtocol#metaSave(String) */ public void metaSave(String pathname) throws IOException { TraceScope scope = Trace.startSpan("metaSave", traceSampler); try { namenode.metaSave(pathname); } finally { scope.close(); } }
Example #23
Source File: DFSClient.java From big-c with Apache License 2.0 | 5 votes |
public DatanodeInfo[] datanodeReport(DatanodeReportType type) throws IOException { checkOpen(); TraceScope scope = Trace.startSpan("datanodeReport", traceSampler); try { return namenode.getDatanodeReport(type); } finally { scope.close(); } }
Example #24
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
TraceScope getPathTraceScope(String description, String path) { TraceScope scope = Trace.startSpan(description, traceSampler); Span span = scope.getSpan(); if (span != null) { if (path != null) { span.addKVAnnotation(PATH, path.getBytes(Charset.forName("UTF-8"))); } } return scope; }
Example #25
Source File: DFSClient.java From big-c with Apache License 2.0 | 5 votes |
/** * Same {{@link #mkdirs(String, FsPermission, boolean)} except * that the permissions has already been masked against umask. */ public boolean primitiveMkdir(String src, FsPermission absPermission, boolean createParent) throws IOException { checkOpen(); if (absPermission == null) { absPermission = FsPermission.getDefault().applyUMask(dfsClientConf.uMask); } if(LOG.isDebugEnabled()) { LOG.debug(src + ": masked=" + absPermission); } TraceScope scope = Trace.startSpan("mkdir", traceSampler); try { return namenode.mkdirs(src, absPermission, createParent); } catch(RemoteException re) { throw re.unwrapRemoteException(AccessControlException.class, InvalidPathException.class, FileAlreadyExistsException.class, FileNotFoundException.class, ParentNotDirectoryException.class, SafeModeException.class, NSQuotaExceededException.class, DSQuotaExceededException.class, UnresolvedPathException.class, SnapshotAccessControlException.class); } finally { scope.close(); } }
Example #26
Source File: PhoenixTransactionalIndexer.java From phoenix with Apache License 2.0 | 5 votes |
@Override public void postBatchMutateIndispensably(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp, final boolean success) throws IOException { BatchMutateContext context = getBatchMutateContext(c); if (context == null || context.indexUpdates == null) { return; } // get the current span, or just use a null-span to avoid a bunch of if statements try (TraceScope scope = Trace.startSpan("Starting to write index updates")) { Span current = scope.getSpan(); if (current == null) { current = NullSpan.INSTANCE; } if (success) { // if miniBatchOp was successfully written, write index updates if (!context.indexUpdates.isEmpty()) { this.writer.write(context.indexUpdates, false, context.clientVersion); } current.addTimelineAnnotation("Wrote index updates"); } } catch (Throwable t) { String msg = "Failed to write index updates:" + context.indexUpdates; LOGGER.error(msg, t); ServerUtil.throwIOException(msg, t); } finally { removeBatchMutateContext(c); } }
Example #27
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException { TraceScope scope = Trace.startSpan("rollingUpgrade", traceSampler); try { return namenode.rollingUpgrade(action); } finally { scope.close(); } }
Example #28
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
/** * @see ClientProtocol#finalizeUpgrade() */ public void finalizeUpgrade() throws IOException { TraceScope scope = Trace.startSpan("finalizeUpgrade", traceSampler); try { namenode.finalizeUpgrade(); } finally { scope.close(); } }
Example #29
Source File: DFSOutputStream.java From hadoop with Apache License 2.0 | 5 votes |
private void queueCurrentPacket() { synchronized (dataQueue) { if (currentPacket == null) return; currentPacket.addTraceParent(Trace.currentSpan()); dataQueue.addLast(currentPacket); lastQueuedSeqno = currentPacket.getSeqno(); if (DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Queued packet " + currentPacket.getSeqno()); } currentPacket = null; dataQueue.notifyAll(); } }
Example #30
Source File: DFSClient.java From hadoop with Apache License 2.0 | 5 votes |
/** * Requests the namenode to tell all datanodes to use a new, non-persistent * bandwidth value for dfs.balance.bandwidthPerSec. * See {@link ClientProtocol#setBalancerBandwidth(long)} * for more details. * * @see ClientProtocol#setBalancerBandwidth(long) */ public void setBalancerBandwidth(long bandwidth) throws IOException { TraceScope scope = Trace.startSpan("setBalancerBandwidth", traceSampler); try { namenode.setBalancerBandwidth(bandwidth); } finally { scope.close(); } }