org.apache.htrace.core.Tracer Java Examples
The following examples show how to use
org.apache.htrace.core.Tracer.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestZipkinSpanReceiver.java From incubator-retired-htrace with Apache License 2.0 | 6 votes |
@Test public void testSimpleTraces() throws IOException, InterruptedException { FakeZipkinTransport transport = new FakeZipkinTransport(); Tracer tracer = newTracer(transport); Span rootSpan = new MilliSpan.Builder(). description("root"). spanId(new SpanId(100, 100)). tracerId("test"). begin(System.currentTimeMillis()). build(); TraceScope rootScope = tracer.newScope("root"); TraceScope innerOne = tracer.newScope("innerOne"); TraceScope innerTwo = tracer.newScope("innerTwo"); innerTwo.close(); Assert.assertTrue(transport.nextMessageAsSpan().getName().contains("innerTwo")); innerOne.close(); Assert.assertTrue(transport.nextMessageAsSpan().getName().contains("innerOne")); rootSpan.addKVAnnotation("foo", "bar"); rootSpan.addTimelineAnnotation("timeline"); rootScope.close(); Assert.assertTrue(transport.nextMessageAsSpan().getName().contains("root")); tracer.close(); }
Example #2
Source File: TestFlumeSpanReceiver.java From incubator-retired-htrace with Apache License 2.0 | 6 votes |
@Test(timeout=120000) public void testSimpleTraces() throws IOException, InterruptedException { Tracer tracer = newTracer(); Span rootSpan = new MilliSpan.Builder(). description("root"). spanId(new SpanId(100, 100)). tracerId("test"). begin(System.currentTimeMillis()). build(); TraceScope rootScope = tracer.newScope("root"); TraceScope innerOne = tracer.newScope("innerOne"); TraceScope innerTwo = tracer.newScope("innerTwo"); innerTwo.close(); Assert.assertTrue(flumeServer.nextEventBodyAsString().contains("innerTwo")); innerOne.close(); Assert.assertTrue(flumeServer.nextEventBodyAsString().contains("innerOne")); rootSpan.addKVAnnotation("foo", "bar"); rootSpan.addTimelineAnnotation("timeline"); rootScope.close(); Assert.assertTrue(flumeServer.nextEventBodyAsString().contains("root")); tracer.close(); }
Example #3
Source File: HBaseSpanReceiver.java From incubator-retired-htrace with Apache License 2.0 | 6 votes |
/** * Run basic test. Adds span to an existing htrace table in an existing hbase setup. * Requires a running hbase to send the traces too with an already created trace * table (Default table name is 'htrace' with column families 's' and 'i'). * * @param args Default arguments which passed to main method * @throws InterruptedException Thread.sleep() can cause interruption in current thread. */ public static void main(String[] args) throws Exception { Tracer tracer = new Tracer.Builder(). conf(new HBaseHTraceConfiguration(HBaseConfiguration.create())). build(); tracer.addSampler(Sampler.ALWAYS); TraceScope parent = tracer.newScope("HBaseSpanReceiver.main.parent"); Thread.sleep(10); long traceid = parent.getSpan().getSpanId().getHigh(); TraceScope child1 = tracer.newScope("HBaseSpanReceiver.main.child.1"); Thread.sleep(10); child1.close(); TraceScope child2 = tracer.newScope("HBaseSpanReceiver.main.child.2"); Thread.sleep(10); TraceScope gchild = tracer.newScope("HBaseSpanReceiver.main.grandchild"); gchild.addTimelineAnnotation("annotation 1."); Thread.sleep(10); gchild.addTimelineAnnotation("annotation 2."); gchild.close(); Thread.sleep(10); child2.close(); Thread.sleep(10); parent.close(); tracer.close(); System.out.println("trace id: " + traceid); }
Example #4
Source File: TestFlumeSpanReceiver.java From incubator-retired-htrace with Apache License 2.0 | 5 votes |
@Test(timeout=120000) public void testResilience() throws IOException { Tracer tracer = newTracer(); TraceCreator traceCreator = new TraceCreator(tracer); flumeServer.alwaysFail(); traceCreator.createThreadedTrace(); }
Example #5
Source File: HadoopShim.java From pentaho-hadoop-shims with Apache License 2.0 | 5 votes |
@Override public Class[] getHbaseDependencyClasses() { return new Class[] { HConstants.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.class, Put.class, RpcServer.class, CompatibilityFactory.class, JobUtil.class, TableMapper.class, FastLongHistogram.class, Snapshot.class, ZooKeeper.class, Channel.class, Message.class, UnsafeByteOperations.class, Lists.class, Tracer.class, MetricRegistry.class, ArrayUtils.class, ObjectMapper.class, Versioned.class, JsonView.class, ZKWatcher.class }; }
Example #6
Source File: HadoopShim.java From pentaho-hadoop-shims with Apache License 2.0 | 5 votes |
@Override public Class[] getHbaseDependencyClasses() { return new Class[] { HConstants.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.class, Put.class, RpcServer.class, CompatibilityFactory.class, JobUtil.class, TableMapper.class, FastLongHistogram.class, Snapshot.class, ZooKeeper.class, Channel.class, Message.class, UnsafeByteOperations.class, Lists.class, Tracer.class, MetricRegistry.class, ArrayUtils.class, ObjectMapper.class, Versioned.class, JsonView.class, ZKWatcher.class }; }
Example #7
Source File: HadoopShim.java From pentaho-hadoop-shims with Apache License 2.0 | 5 votes |
@Override public Class[] getHbaseDependencyClasses() { return new Class[] { HConstants.class, org.apache.hadoop.hbase.protobuf.generated.ClientProtos.class, org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.class, Put.class, RpcServer.class, CompatibilityFactory.class, JobUtil.class, TableMapper.class, FastLongHistogram.class, Snapshot.class, ZooKeeper.class, Channel.class, Message.class, UnsafeByteOperations.class, Lists.class, Tracer.class, MetricRegistry.class, ArrayUtils.class, ObjectMapper.class, Versioned.class, JsonView.class, ZKWatcher.class, CacheLoader.class }; }
Example #8
Source File: TraceUtil.java From hbase with Apache License 2.0 | 5 votes |
/** * Wrapper method to add timeline annotiation to current span with given message */ public static void addTimelineAnnotation(String msg) { Span span = Tracer.getCurrentSpan(); if (span != null) { span.addTimelineAnnotation(msg); } }
Example #9
Source File: TraceUtil.java From hbase with Apache License 2.0 | 5 votes |
/** * Wrapper method to add key-value pair to TraceInfo of actual span */ public static void addKVAnnotation(String key, String value){ Span span = Tracer.getCurrentSpan(); if (span != null) { span.addKVAnnotation(key, value); } }
Example #10
Source File: TraceUtil.java From hbase with Apache License 2.0 | 5 votes |
public static void initTracer(Configuration c) { if (c != null) { conf = new HBaseHTraceConfiguration(c); } if (tracer == null && conf != null) { tracer = new Tracer.Builder("Tracer").conf(conf).build(); } }
Example #11
Source File: Call.java From hbase with Apache License 2.0 | 5 votes |
protected Call(int id, final Descriptors.MethodDescriptor md, Message param, final CellScanner cells, final Message responseDefaultType, int timeout, int priority, RpcCallback<Call> callback, MetricsConnection.CallStats callStats) { this.param = param; this.md = md; this.cells = cells; this.callStats = callStats; this.callStats.setStartTime(EnvironmentEdgeManager.currentTime()); this.responseDefaultType = responseDefaultType; this.id = id; this.timeout = timeout; this.priority = priority; this.callback = callback; this.span = Tracer.getCurrentSpan(); }
Example #12
Source File: EventHandler.java From hbase with Apache License 2.0 | 5 votes |
/** * Default base class constructor. */ public EventHandler(Server server, EventType eventType) { this.parent = Tracer.getCurrentSpan(); this.server = server; this.eventType = eventType; seqid = seqids.incrementAndGet(); if (server != null) { this.waitingTimeForEvents = server.getConfiguration(). getInt("hbase.master.event.waiting.time", 1000); } }
Example #13
Source File: TestFlumeSpanReceiver.java From incubator-retired-htrace with Apache License 2.0 | 5 votes |
@Test(timeout=120000) public void testConcurrency() throws IOException { Tracer tracer = newTracer(); TraceCreator traceCreator = new TraceCreator(tracer); flumeServer.alwaysOk(); traceCreator.createThreadedTrace(); }
Example #14
Source File: TestFlumeSpanReceiver.java From incubator-retired-htrace with Apache License 2.0 | 5 votes |
private Tracer newTracer() { return new Tracer.Builder(). name("FlumeTracer"). tracerPool(new TracerPool("newTracer")). conf(HTraceConfiguration.fromKeyValuePairs( FlumeSpanReceiver.FLUME_PORT_KEY, Integer.toString(flumeServer.getPort()), "span.receiver.classes", FlumeSpanReceiver.class.getName(), "sampler.classes", AlwaysSampler.class.getName() )).build(); }
Example #15
Source File: TestZipkinSpanReceiver.java From incubator-retired-htrace with Apache License 2.0 | 5 votes |
@Test public void testConcurrency() throws IOException { Tracer tracer = newTracer(new FakeZipkinTransport(){ @Override public void send(List<byte[]> spans) throws IOException { /*do nothing*/ } }); TraceCreator traceCreator = new TraceCreator(tracer); traceCreator.createThreadedTrace(); }
Example #16
Source File: TestZipkinSpanReceiver.java From incubator-retired-htrace with Apache License 2.0 | 5 votes |
private Tracer newTracer(final Transport transport) { TracerPool pool = new TracerPool("newTracer"); pool.addReceiver(new ZipkinSpanReceiver(HTraceConfiguration.EMPTY) { @Override protected Transport createTransport(HTraceConfiguration conf) { return transport; } }); return new Tracer.Builder("ZipkinTracer"). tracerPool(pool). conf(HTraceConfiguration.fromKeyValuePairs( "sampler.classes", AlwaysSampler.class.getName() )). build(); }
Example #17
Source File: ITZipkinReceiver.java From incubator-retired-htrace with Apache License 2.0 | 4 votes |
@Test public void testKafkaTransport() throws Exception { String topic = "zipkin"; // Kafka setup EmbeddedZookeeper zkServer = new EmbeddedZookeeper(TestZKUtils.zookeeperConnect()); ZkClient zkClient = new ZkClient(zkServer.connectString(), 30000, 30000, ZKStringSerializer$.MODULE$); Properties props = TestUtils.createBrokerConfig(0, TestUtils.choosePort(), false); KafkaConfig config = new KafkaConfig(props); KafkaServer kafkaServer = TestUtils.createServer(config, new MockTime()); Buffer<KafkaServer> servers = JavaConversions.asScalaBuffer(Collections.singletonList(kafkaServer)); TestUtils.createTopic(zkClient, topic, 1, 1, servers, new Properties()); zkClient.close(); TestUtils.waitUntilMetadataIsPropagated(servers, topic, 0, 5000); // HTrace HTraceConfiguration hTraceConfiguration = HTraceConfiguration.fromKeyValuePairs( "sampler.classes", "AlwaysSampler", "span.receiver.classes", ZipkinSpanReceiver.class.getName(), "zipkin.kafka.metadata.broker.list", config.advertisedHostName() + ":" + config.advertisedPort(), "zipkin.kafka.topic", topic, ZipkinSpanReceiver.TRANSPORT_CLASS_KEY, KafkaTransport.class.getName() ); final Tracer tracer = new Tracer.Builder("test-tracer") .tracerPool(new TracerPool("test-tracer-pool")) .conf(hTraceConfiguration) .build(); String scopeName = "test-kafka-transport-scope"; TraceScope traceScope = tracer.newScope(scopeName); traceScope.close(); tracer.close(); // Kafka consumer Properties consumerProps = new Properties(); consumerProps.put("zookeeper.connect", props.getProperty("zookeeper.connect")); consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "testing.group"); consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "smallest"); ConsumerConnector connector = kafka.consumer.Consumer.createJavaConsumerConnector(new kafka.consumer.ConsumerConfig(consumerProps)); Map<String, Integer> topicCountMap = new HashMap<>(); topicCountMap.put(topic, 1); Map<String, List<KafkaStream<byte[], byte[]>>> streams = connector.createMessageStreams(topicCountMap); ConsumerIterator<byte[], byte[]> it = streams.get(topic).get(0).iterator(); // Test Assert.assertTrue("We should have one message in Kafka", it.hasNext()); Span span = new Span(); new TDeserializer(new TBinaryProtocol.Factory()).deserialize(span, it.next().message()); Assert.assertEquals("The span name should match our scope description", span.getName(), scopeName); kafkaServer.shutdown(); }