Java Code Examples for com.google.common.net.HostAndPort#getPort()
The following examples show how to use
com.google.common.net.HostAndPort#getPort() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: StreamableRDDTest_Failures.java From spliceengine with GNU Affero General Public License v3.0 | 6 votes |
@Test public void testBasicStream() throws Exception { StreamListener<ExecRow> sl = new StreamListener<>(); HostAndPort hostAndPort = server.getHostAndPort(); server.register(sl); JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(tenRows, 2).mapToPair(new FailsFunction(3)); StreamableRDD srdd = new StreamableRDD(rdd.values(), sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort()); srdd.submit(); Iterator<ExecRow> it = sl.getIterator(); int count = 0; while (it.hasNext()) { ExecRow execRow = it.next(); LOG.trace(execRow); count++; assertNotNull(execRow); assertTrue(execRow.getColumn(1).getInt() < 10); } assertEquals(10, count); }
Example 2
Source File: CassandraNodeImpl.java From brooklyn-library with Apache License 2.0 | 6 votes |
@Override public Long call() { Integer privatePort = entity.getThriftPort(); if (privatePort == null) return -1L; HostAndPort hp = BrooklynAccessUtils.getBrooklynAccessibleAddress(entity, privatePort); try { long start = System.currentTimeMillis(); Socket s = new Socket(hp.getHostText(), hp.getPort()); s.close(); long latency = System.currentTimeMillis() - start; return latency; } catch (Exception e) { Exceptions.propagateIfFatal(e); if (log.isDebugEnabled()) log.debug("Cassandra thrift port poll failure: "+e); return -1L; } }
Example 3
Source File: Bootstrap.java From floodlight_with_topoguard with Apache License 2.0 | 6 votes |
public boolean bootstrap(HostAndPort seed, Node localNode) throws SyncException { this.localNode = localNode; succeeded = false; SocketAddress sa = new InetSocketAddress(seed.getHostText(), seed.getPort()); ChannelFuture future = bootstrap.connect(sa); future.awaitUninterruptibly(); if (!future.isSuccess()) { logger.debug("Could not connect to " + seed, future.getCause()); return false; } Channel channel = future.getChannel(); logger.debug("[{}] Connected to {}", localNode != null ? localNode.getNodeId() : null, seed); try { channel.getCloseFuture().await(); } catch (InterruptedException e) { logger.debug("Interrupted while waiting for bootstrap"); return succeeded; } return succeeded; }
Example 4
Source File: TestApacheThriftMethodInvoker.java From drift with Apache License 2.0 | 6 votes |
private static int logThrift(HostAndPort address, List<LogEntry> messages) { try { TSocket socket = new TSocket(address.getHost(), address.getPort()); socket.open(); try { TBinaryProtocol tp = new TBinaryProtocol(new TFramedTransport(socket)); assertEquals(new scribe.Client(tp).Log(messages), ResultCode.OK); } finally { socket.close(); } } catch (TException e) { throw new RuntimeException(e); } return 1; }
Example 5
Source File: StreamableRDDTest_Failures.java From spliceengine with GNU Affero General Public License v3.0 | 6 votes |
@Test public void testFailureBoundary() throws Exception { StreamListener<ExecRow> sl = new StreamListener<>(); HostAndPort hostAndPort = server.getHostAndPort(); server.register(sl); JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(tenRows, 20).mapToPair(new FailsFunction(4)); StreamableRDD srdd = new StreamableRDD(rdd.values(), sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort()); srdd.submit(); Iterator<ExecRow> it = sl.getIterator(); int count = 0; while (it.hasNext()) { ExecRow execRow = it.next(); LOG.trace(execRow); count++; assertNotNull(execRow); assertTrue(execRow.getColumn(1).getInt() < 10); } assertEquals(10, count); }
Example 6
Source File: ProgressMonitor.java From secor with Apache License 2.0 | 6 votes |
public ProgressMonitor(SecorConfig config) throws Exception { mConfig = config; mZookeeperConnector = new ZookeeperConnector(mConfig); try { Class timestampClass = Class.forName(mConfig.getKafkaClientClass()); this.mKafkaClient = (KafkaClient) timestampClass.newInstance(); this.mKafkaClient.init(config); } catch (ClassNotFoundException | InstantiationException | IllegalAccessException e) { throw new RuntimeException(e); } mMessageParser = (MessageParser) ReflectionUtil.createMessageParser( mConfig.getMessageParserClass(), mConfig); mPrefix = mConfig.getMonitoringPrefix(); if (Strings.isNullOrEmpty(mPrefix)) { mPrefix = "secor"; } if (mConfig.getStatsDHostPort() != null && !mConfig.getStatsDHostPort().isEmpty()) { HostAndPort hostPort = HostAndPort.fromString(mConfig.getStatsDHostPort()); mStatsDClient = new NonBlockingStatsDClient(null, hostPort.getHost(), hostPort.getPort(), mConfig.getStatsDDogstatsdConstantTags()); } }
Example 7
Source File: OperatorUtil.java From doctorkafka with Apache License 2.0 | 6 votes |
public static void startOstrichService(String serviceName, String tsdbHostPort, int ostrichPort) { final int TSDB_METRICS_PUSH_INTERVAL_IN_MILLISECONDS = 10 * 1000; OstrichAdminService ostrichService = new OstrichAdminService(ostrichPort); ostrichService.startAdminHttpService(); if (tsdbHostPort != null) { LOG.info("Starting the OpenTsdb metrics pusher"); try { HostAndPort pushHostPort = HostAndPort.fromString(tsdbHostPort); MetricsPusher metricsPusher = new MetricsPusher( pushHostPort.getHost(), pushHostPort.getPort(), new OpenTsdbMetricConverter(serviceName, HostName), TSDB_METRICS_PUSH_INTERVAL_IN_MILLISECONDS); metricsPusher.start(); LOG.info("OpenTsdb metrics pusher started!"); } catch (Throwable t) { // pusher fail is OK, do LOG.error("Exception when starting stats pusher: ", t); } } }
Example 8
Source File: StreamableRDDTest_Failures.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Test public void testFailureAfterLimit() throws StandardException { StreamListener<ExecRow> sl = new StreamListener<>(40000, 300); HostAndPort hostAndPort = server.getHostAndPort(); server.register(sl); List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>(); for(int i = 0; i < 100000; ++i) { manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2))); } JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 13).mapToPair(new FailsFunction(40301));; final StreamableRDD srdd = new StreamableRDD(rdd.values(), sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort()); new Thread() { @Override public void run() { try { srdd.submit(); } catch (Exception e) { throw new RuntimeException(e); } } }.start(); Iterator<ExecRow> it = sl.getIterator(); int count = 0; int first = 300; while (it.hasNext()) { ExecRow execRow = it.next(); assertNotNull(execRow); assertEquals(count+first, execRow.getColumn(1).getInt()); count++; } assertEquals(40000, count); }
Example 9
Source File: StreamableRDDTest_Failures.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Test public void testFailureAfterOffset() throws StandardException { StreamListener<ExecRow> sl = new StreamListener<>(40000, 300); HostAndPort hostAndPort = server.getHostAndPort(); server.register(sl); List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>(); for(int i = 0; i < 100000; ++i) { manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2))); } JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 13).mapToPair(new FailsFunction(14000));; final StreamableRDD srdd = new StreamableRDD(rdd.values(), sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort()); new Thread() { @Override public void run() { try { srdd.submit(); } catch (Exception e) { throw new RuntimeException(e); } } }.start(); Iterator<ExecRow> it = sl.getIterator(); int count = 0; int first = 300; while (it.hasNext()) { ExecRow execRow = it.next(); assertNotNull(execRow); assertEquals(count+first, execRow.getColumn(1).getInt()); count++; } assertEquals(40000, count); }
Example 10
Source File: PortMapping.java From brooklyn-server with Apache License 2.0 | 5 votes |
public PortMapping(String publicIpId, HostAndPort publicEndpoint, Location target, int privatePort) { this.publicIpId = checkNotNull(publicIpId, "publicIpId"); this.publicEndpoint = checkNotNull(publicEndpoint, "publicEndpoint"); this.publicPort = publicEndpoint.getPort(); this.target = target; this.privatePort = privatePort; }
Example 11
Source File: MongoClientWrapper.java From mongowp with Apache License 2.0 | 5 votes |
private void testAddress(HostAndPort address, MongoClientOptions options) throws UnreachableMongoServerException { SocketAddress sa = new InetSocketAddress(address.getHostText(), address.getPort()); try (Socket s = options.getSocketFactory().createSocket()) { s.connect(sa, 3000); } catch (IOException ex) { throw new UnreachableMongoServerException(address, ex); } }
Example 12
Source File: MongoDBClientSupport.java From brooklyn-library with Apache License 2.0 | 5 votes |
/** * Creates a {@link MongoDBClientSupport} instance in standalone mode. */ public static MongoDBClientSupport forServer(AbstractMongoDBServer standalone) throws UnknownHostException { HostAndPort hostAndPort = BrooklynAccessUtils.getBrooklynAccessibleAddress(standalone, standalone.getAttribute(MongoDBServer.PORT)); ServerAddress address = new ServerAddress(hostAndPort.getHostText(), hostAndPort.getPort()); if (MongoDBAuthenticationUtils.usesAuthentication(standalone)) { return new MongoDBClientSupport(address, standalone.sensors().get(MongoDBAuthenticationMixins.ROOT_USERNAME), standalone.sensors().get(MongoDBAuthenticationMixins.ROOT_PASSWORD), standalone.sensors().get(MongoDBAuthenticationMixins.AUTHENTICATION_DATABASE)); } else { return new MongoDBClientSupport(address); } }
Example 13
Source File: StreamableRDDTest_Failures.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Test public void testFailureDuringRecoveryWarmup() throws StandardException, FileNotFoundException, UnsupportedEncodingException { int size = 100000; int batches = 2; int batchSize = 512; StreamListener<ExecRow> sl = new StreamListener<>(-1, 0, batches, batchSize); HostAndPort hostAndPort = server.getHostAndPort(); server.register(sl); List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>(); for(int i = 0; i < size; ++i) { manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2))); } JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 2).sortByKey().mapToPair(new FailsTwiceFunction(10000, 100)); final StreamableRDD srdd = new StreamableRDD(rdd.values(), null, sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort(), batches, batchSize); new Thread() { @Override public void run() { try { srdd.submit(); } catch (Exception e) { throw new RuntimeException(e); } } }.start(); Iterator<ExecRow> it = sl.getIterator(); int count = 0; while (it.hasNext()) { ExecRow execRow = it.next(); assertNotNull(execRow); count++; } assertEquals(size, count); }
Example 14
Source File: StreamableRDDTest.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Test public void testOffset() throws StandardException { StreamListener<ExecRow> sl = new StreamListener<>(-1, 60000); HostAndPort hostAndPort = server.getHostAndPort(); server.register(sl); List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>(); for(int i = 0; i < 100000; ++i) { manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2))); } JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 13); final StreamableRDD srdd = new StreamableRDD(rdd.values(), sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort()); new Thread() { @Override public void run() { try { srdd.submit(); } catch (Exception e) { throw new RuntimeException(e); } } }.start(); Iterator<ExecRow> it = sl.getIterator(); int count = 0; int first = 60000; while (it.hasNext()) { ExecRow execRow = it.next(); assertNotNull(execRow); assertEquals(count+first, execRow.getColumn(1).getInt()); count++; } assertEquals(100000-60000, count); }
Example 15
Source File: StreamableRDDTest.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Test public void testSmallLimit() throws StandardException { int limit = 2000; int offset = 0; int total = 4000; int batches = 2; int batchSize = 512; StreamListener<ExecRow> sl = new StreamListener<>(limit, offset, batches, batchSize); HostAndPort hostAndPort = server.getHostAndPort(); server.register(sl); List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>(); for(int i = 0; i < total; ++i) { manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2))); } JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 1); final StreamableRDD srdd = new StreamableRDD(rdd.values(), null, sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort(), batches, batchSize); new Thread() { @Override public void run() { try { srdd.submit(); } catch (Exception e) { throw new RuntimeException(e); } } }.start(); Iterator<ExecRow> it = sl.getIterator(); int count = 0; int first = offset; while (it.hasNext()) { ExecRow execRow = it.next(); assertNotNull(execRow); assertEquals(count+first, execRow.getColumn(1).getInt()); count++; } assertEquals(limit, count); }
Example 16
Source File: StreamableRDDTest.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Test public void testBlockingLarge() throws StandardException { StreamListener<ExecRow> sl = new StreamListener<>(); HostAndPort hostAndPort = server.getHostAndPort(); server.register(sl); List<Tuple2<ExecRow,ExecRow>> manyRows = new ArrayList<>(); for(int i = 0; i < 100000; ++i) { manyRows.add(new Tuple2<ExecRow, ExecRow>(getExecRow(i, 1), getExecRow(i, 2))); } JavaPairRDD<ExecRow, ExecRow> rdd = SpliceSpark.getContextUnsafe().parallelizePairs(manyRows, 12); final StreamableRDD srdd = new StreamableRDD(rdd.values(), sl.getUuid(), hostAndPort.getHostText(), hostAndPort.getPort()); new Thread() { @Override public void run() { try { srdd.submit(); } catch (Exception e) { throw new RuntimeException(e); } } }.start(); Iterator<ExecRow> it = sl.getIterator(); int count = 0; while (it.hasNext()) { ExecRow execRow = it.next(); count++; assertNotNull(execRow); } assertEquals(100000, count); }
Example 17
Source File: Replica.java From TakinRPC with Apache License 2.0 | 5 votes |
@Nonnull public static Replica fromString(@Nonnull String info) { try { checkNotNull(info); HostAndPort hostAndPort = HostAndPort.fromString(info); InetAddress addr = InetAddress.getByName(hostAndPort.getHostText()); InetSocketAddress saddr = new InetSocketAddress(addr, hostAndPort.getPort()); return new Replica(saddr); } catch (UnknownHostException e) { throw Throwables.propagate(e); } }
Example 18
Source File: PortForwardManagerImpl.java From brooklyn-server with Apache License 2.0 | 5 votes |
protected void associateImpl(String publicIpId, HostAndPort publicEndpoint, Location l, int privatePort) { synchronized (mutex) { String publicIp = publicEndpoint.getHostText(); int publicPort = publicEndpoint.getPort(); recordPublicIpHostname(publicIpId, publicIp); PortMapping mapping = new PortMapping(publicIpId, publicEndpoint, l, privatePort); PortMapping oldMapping = getPortMappingWithPublicSide(publicIpId, publicPort); log.debug(this+" associating public "+publicEndpoint+" on "+publicIpId+" with private port "+privatePort+" at "+l+" ("+mapping+")" +(oldMapping == null ? "" : " (overwriting "+oldMapping+" )")); mappings.put(makeKey(publicIpId, publicPort), mapping); } onChanged(); }
Example 19
Source File: CassandraThriftFacade.java From emodb with Apache License 2.0 | 4 votes |
/** Note: <code>seeds</code> may be a single host or comma-delimited list. */ public static CassandraThriftFacade forSeedsAndPort(String seeds, int defaultPort) { final String seed = seeds.contains(",") ? seeds.substring(0, seeds.indexOf(',')) : seeds; HostAndPort host = HostAndPort.fromString(seed).withDefaultPort(defaultPort); return new CassandraThriftFacade(new TFramedTransport(new TSocket(host.getHostText(), host.getPort()))); }
Example 20
Source File: RedisCommandInvoker.java From hanboDB with Apache License 2.0 | 4 votes |
public void startAsyncReplication(String slaverHost) { HostAndPort hostAndPort = HostAndPort.fromString(slaverHost); Jedis jedis = new Jedis(hostAndPort.getHostText(), hostAndPort.getPort(), 60 * 1000, 60 * 1000); jedis.ping(); slaverHostList.add(jedis); }