org.apache.cassandra.config.Config Java Examples
The following examples show how to use
org.apache.cassandra.config.Config.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: JVMStabilityInspector.java From stratio-cassandra with Apache License 2.0 | 6 votes |
/** * Certain Throwables and Exceptions represent "Die" conditions for the server. * @param t * The Throwable to check for server-stop conditions */ public static void inspectThrowable(Throwable t) { boolean isUnstable = false; if (t instanceof OutOfMemoryError) isUnstable = true; if (DatabaseDescriptor.getDiskFailurePolicy() == Config.DiskFailurePolicy.die) if (t instanceof FSError || t instanceof CorruptSSTableException) isUnstable = true; // Check for file handle exhaustion if (t instanceof FileNotFoundException || t instanceof SocketException) if (t.getMessage().contains("Too many open files")) isUnstable = true; if (isUnstable) killer.killCurrentJVM(t); }
Example #2
Source File: CommitLog.java From stratio-cassandra with Apache License 2.0 | 6 votes |
private CommitLog() { DatabaseDescriptor.createAllDirectories(); allocator = new CommitLogSegmentManager(); executor = DatabaseDescriptor.getCommitLogSync() == Config.CommitLogSync.batch ? new BatchCommitLogService(this) : new PeriodicCommitLogService(this); MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); try { mbs.registerMBean(this, new ObjectName("org.apache.cassandra.db:type=Commitlog")); } catch (Exception e) { throw new RuntimeException(e); } // register metrics metrics = new CommitLogMetrics(executor, allocator); }
Example #3
Source File: OutboundTcpConnectionPool.java From stratio-cassandra with Apache License 2.0 | 6 votes |
public static Socket newSocket(InetAddress endpoint) throws IOException { // zero means 'bind on any available port.' if (isEncryptedChannel(endpoint)) { if (Config.getOutboundBindAny()) return SSLFactory.getSocket(DatabaseDescriptor.getServerEncryptionOptions(), endpoint, DatabaseDescriptor.getSSLStoragePort()); else return SSLFactory.getSocket(DatabaseDescriptor.getServerEncryptionOptions(), endpoint, DatabaseDescriptor.getSSLStoragePort(), FBUtilities.getLocalAddress(), 0); } else { Socket socket = SocketChannel.open(new InetSocketAddress(endpoint, DatabaseDescriptor.getStoragePort())).socket(); if (Config.getOutboundBindAny() && !socket.isBound()) socket.bind(new InetSocketAddress(FBUtilities.getLocalAddress(), 0)); return socket; } }
Example #4
Source File: CommitLogTest.java From stratio-cassandra with Apache License 2.0 | 6 votes |
@Test public void testCommitFailurePolicy_die() { KillerForTests killerForTests = new KillerForTests(); JVMStabilityInspector.Killer originalKiller = JVMStabilityInspector.replaceKiller(killerForTests); Config.CommitFailurePolicy oldPolicy = DatabaseDescriptor.getCommitFailurePolicy(); try { DatabaseDescriptor.setCommitFailurePolicy(Config.CommitFailurePolicy.die); CommitLog.handleCommitError("Testing die policy", new Throwable()); Assert.assertTrue(killerForTests.wasKilled()); } finally { DatabaseDescriptor.setCommitFailurePolicy(oldPolicy); JVMStabilityInspector.replaceKiller(originalKiller); } }
Example #5
Source File: CommitLogTest.java From stratio-cassandra with Apache License 2.0 | 6 votes |
@Test public void testCommitFailurePolicy_stop() throws ConfigurationException { // Need storage service active so stop policy can shutdown gossip StorageService.instance.initServer(); Assert.assertTrue(Gossiper.instance.isEnabled()); Config.CommitFailurePolicy oldPolicy = DatabaseDescriptor.getCommitFailurePolicy(); try { DatabaseDescriptor.setCommitFailurePolicy(Config.CommitFailurePolicy.stop); CommitLog.handleCommitError("Test stop error", new Throwable()); Assert.assertFalse(Gossiper.instance.isEnabled()); } finally { DatabaseDescriptor.setCommitFailurePolicy(oldPolicy); } }
Example #6
Source File: OffsetAwareConfigurationLoader.java From stratio-cassandra with Apache License 2.0 | 6 votes |
@Override public Config loadConfig() throws ConfigurationException { Config config = super.loadConfig(); config.rpc_port += offset; config.native_transport_port += offset; config.storage_port += offset; config.commitlog_directory += File.pathSeparator + offset; config.saved_caches_directory += File.pathSeparator + offset; for (int i = 0; i < config.data_file_directories.length; i++) config.data_file_directories[i] += File.pathSeparator + offset; return config; }
Example #7
Source File: JVMStabilityInspectorTest.java From stratio-cassandra with Apache License 2.0 | 5 votes |
@Test public void testKill() throws Exception { KillerForTests killerForTests = new KillerForTests(); JVMStabilityInspector.Killer originalKiller = JVMStabilityInspector.replaceKiller(killerForTests); Config.DiskFailurePolicy oldPolicy = DatabaseDescriptor.getDiskFailurePolicy(); Config.CommitFailurePolicy oldCommitPolicy = DatabaseDescriptor.getCommitFailurePolicy(); try { killerForTests.reset(); JVMStabilityInspector.inspectThrowable(new IOException()); assertFalse(killerForTests.wasKilled()); killerForTests.reset(); JVMStabilityInspector.inspectThrowable(new OutOfMemoryError()); assertTrue(killerForTests.wasKilled()); DatabaseDescriptor.setDiskFailurePolicy(Config.DiskFailurePolicy.die); killerForTests.reset(); JVMStabilityInspector.inspectThrowable(new FSReadError(new IOException(), "blah")); assertTrue(killerForTests.wasKilled()); DatabaseDescriptor.setCommitFailurePolicy(Config.CommitFailurePolicy.die); killerForTests.reset(); JVMStabilityInspector.inspectCommitLogThrowable(new Throwable()); assertTrue(killerForTests.wasKilled()); } finally { JVMStabilityInspector.replaceKiller(originalKiller); DatabaseDescriptor.setDiskFailurePolicy(oldPolicy); DatabaseDescriptor.setCommitFailurePolicy(oldCommitPolicy); } }
Example #8
Source File: CassandraClusterInfo.java From hdfs2cass with Apache License 2.0 | 5 votes |
public void validateThriftAccessible(final Optional<Integer> rpcPort) { Config.setClientMode(true); int port = rpcPort.or(ConfigHelper.getOutputRpcPort(new Configuration())); ExternalSSTableLoaderClient client = new ExternalSSTableLoaderClient(this.host, port, null, null); client.init(this.keyspace); if (client.getCFMetaData(this.keyspace, this.columnFamily) == null) { throw new CrunchRuntimeException("Column family not accessible: " + this.keyspace + "." + this.columnFamily); } }
Example #9
Source File: CrunchBulkRecordWriter.java From hdfs2cass with Apache License 2.0 | 5 votes |
public CrunchBulkRecordWriter(TaskAttemptContext context) { Config.setClientMode(true); Config.setOutboundBindAny(true); this.conf = HadoopCompat.getConfiguration(context); this.context = context; int megabitsPerSec = Integer.parseInt(conf.get(STREAM_THROTTLE_MBITS, "0")); LOG.info("Setting stream throttling to " + megabitsPerSec); DatabaseDescriptor.setStreamThroughputOutboundMegabitsPerSec(megabitsPerSec); DatabaseDescriptor.setInterDCStreamThroughputOutboundMegabitsPerSec(megabitsPerSec); heartbeat = new ProgressHeartbeat(context, 120); }
Example #10
Source File: AbstractBulkRecordWriter.java From stratio-cassandra with Apache License 2.0 | 5 votes |
protected AbstractBulkRecordWriter(Configuration conf) { Config.setClientMode(true); Config.setOutboundBindAny(true); this.conf = conf; DatabaseDescriptor.setStreamThroughputOutboundMegabitsPerSec(Integer.parseInt(conf.get(STREAM_THROTTLE_MBITS, "0"))); maxFailures = Integer.parseInt(conf.get(MAX_FAILED_HOSTS, "0")); bufferSize = Integer.parseInt(conf.get(BUFFER_SIZE_IN_MB, "64")); }
Example #11
Source File: SegmentedFile.java From stratio-cassandra with Apache License 2.0 | 5 votes |
/** * @return A SegmentedFile.Builder. */ public static Builder getBuilder(Config.DiskAccessMode mode) { return mode == Config.DiskAccessMode.mmap ? new MmappedSegmentedFile.Builder() : new BufferedPoolingSegmentedFile.Builder(); }
Example #12
Source File: SSTableReader.java From stratio-cassandra with Apache License 2.0 | 5 votes |
void ensureReadMeter() { if (readMeter != null) return; // Don't track read rates for tables in the system keyspace and don't bother trying to load or persist // the read meter when in client mode. if (Keyspace.SYSTEM_KS.equals(desc.ksname) || Config.isClientMode()) { readMeter = null; readMeterSyncFuture = null; return; } readMeter = SystemKeyspace.getSSTableReadMeter(desc.ksname, desc.cfname, desc.generation); // sync the average read rate to system.sstable_activity every five minutes, starting one minute from now readMeterSyncFuture = syncExecutor.scheduleAtFixedRate(new Runnable() { public void run() { if (!isCompacted.get()) { meterSyncThrottle.acquire(); SystemKeyspace.persistSSTableReadMeter(desc.ksname, desc.cfname, desc.generation, readMeter); } } }, 1, 5, TimeUnit.MINUTES); }
Example #13
Source File: JVMStabilityInspector.java From stratio-cassandra with Apache License 2.0 | 5 votes |
public static void inspectCommitLogThrowable(Throwable t) { if (DatabaseDescriptor.getCommitFailurePolicy() == Config.CommitFailurePolicy.die) killer.killCurrentJVM(t); else inspectThrowable(t); }
Example #14
Source File: SimpleSeedProvider.java From stratio-cassandra with Apache License 2.0 | 5 votes |
public List<InetAddress> getSeeds() { Config conf; try { conf = DatabaseDescriptor.loadConfig(); } catch (Exception e) { throw new AssertionError(e); } String[] hosts = conf.seed_provider.parameters.get("seeds").split(",", -1); List<InetAddress> seeds = new ArrayList<InetAddress>(hosts.length); for (String host : hosts) { try { seeds.add(InetAddress.getByName(host.trim())); } catch (UnknownHostException ex) { // not fatal... DD will bark if there end up being zero seeds. logger.warn("Seed provider couldn't lookup host {}", host); } } return Collections.unmodifiableList(seeds); }
Example #15
Source File: CQLSSTableWriterLongTest.java From stratio-cassandra with Apache License 2.0 | 4 votes |
@AfterClass public static void tearDown() { Config.setClientMode(false); }
Example #16
Source File: OutboundTcpConnection.java From stratio-cassandra with Apache License 2.0 | 4 votes |
private boolean shouldCompressConnection() { // assumes version >= 1.2 return DatabaseDescriptor.internodeCompression() == Config.InternodeCompression.all || (DatabaseDescriptor.internodeCompression() == Config.InternodeCompression.dc && !isLocalDC(poolReference.endPoint())); }
Example #17
Source File: CQLSSTableWriterTest.java From stratio-cassandra with Apache License 2.0 | 4 votes |
@AfterClass public static void tearDown() { Config.setClientMode(false); }
Example #18
Source File: CQLSSTableWriterClientTest.java From stratio-cassandra with Apache License 2.0 | 4 votes |
@AfterClass public static void cleanup() throws Exception { Config.setClientMode(false); }
Example #19
Source File: SegmentedFile.java From hadoop-sstable with Apache License 2.0 | 4 votes |
/** * @return A SegmentedFile.Builder. */ public static Builder getBuilder(Config.DiskAccessMode mode, FileSystem fs) { return mode == Config.DiskAccessMode.mmap ? new MmappedSegmentedFile.Builder(fs) : new BufferedPoolingSegmentedFile.Builder(fs); }
Example #20
Source File: CountTombstones.java From cassandra-opstools with Apache License 2.0 | 4 votes |
/** * Counts the number of tombstones, per row, in a given SSTable * * Assumes RandomPartitioner, standard columns and UTF8 encoded row keys * * Does not require a cassandra.yaml file or system tables. * * @param args command lines arguments * * @throws java.io.IOException on failure to open/read/write files or output streams */ public static void main(String[] args) throws IOException, ParseException { String usage = String.format("Usage: %s [-l] <sstable> [<sstable> ...]%n", CountTombstones.class.getName()); final Options options = new Options(); options.addOption("l", "legend", false, "Include column name explanation"); options.addOption("p", "partitioner", true, "The partitioner used by database"); CommandLineParser parser = new BasicParser(); CommandLine cmd = parser.parse(options, args); if (cmd.getArgs().length < 1) { System.err.println("You must supply at least one sstable"); System.err.println(usage); System.exit(1); } // Fake DatabaseDescriptor settings so we don't have to load cassandra.yaml etc Config.setClientMode(true); String partitionerName = String.format("org.apache.cassandra.dht.%s", cmd.hasOption("p") ? cmd.getOptionValue("p") : "RandomPartitioner"); try { Class<?> clazz = Class.forName(partitionerName); IPartitioner partitioner = (IPartitioner) clazz.newInstance(); DatabaseDescriptor.setPartitioner(partitioner); } catch (Exception e) { throw new RuntimeException("Can't instantiate partitioner " + partitionerName); } PrintStream out = System.out; for (String arg : cmd.getArgs()) { String ssTableFileName = new File(arg).getAbsolutePath(); Descriptor descriptor = Descriptor.fromFilename(ssTableFileName); run(descriptor, cmd, out); } System.exit(0); }