Java Code Examples for org.apache.spark.SparkConf#getBoolean()
The following examples show how to use
org.apache.spark.SparkConf#getBoolean() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SparkExecutionContext.java From systemds with Apache License 2.0 | 6 votes |
public SparkClusterConfig() { SparkConf sconf = createSystemDSSparkConf(); _confOnly = true; //parse version and config String sparkVersion = getSparkVersionString(); _legacyVersion = (UtilFunctions.compareVersion(sparkVersion, "1.6.0") < 0 || sconf.getBoolean("spark.memory.useLegacyMode", false) ); //obtain basic spark configurations if( _legacyVersion ) analyzeSparkConfiguationLegacy(sconf); else analyzeSparkConfiguation(sconf); //log debug of created spark cluster config if( LOG.isDebugEnabled() ) LOG.debug( this.toString() ); }
Example 2
Source File: SparkExecutionContext.java From systemds with Apache License 2.0 | 6 votes |
public SparkClusterConfig() { SparkConf sconf = createSystemDSSparkConf(); _confOnly = true; //parse version and config String sparkVersion = getSparkVersionString(); _legacyVersion = (UtilFunctions.compareVersion(sparkVersion, "1.6.0") < 0 || sconf.getBoolean("spark.memory.useLegacyMode", false) ); //obtain basic spark configurations if( _legacyVersion ) analyzeSparkConfiguationLegacy(sconf); else analyzeSparkConfiguation(sconf); //log debug of created spark cluster config if( LOG.isDebugEnabled() ) LOG.debug( this.toString() ); }
Example 3
Source File: Word2VecVariables.java From deeplearning4j with Apache License 2.0 | 6 votes |
@SuppressWarnings("unchecked") public static <T> T assignVar(String variableName, SparkConf conf, Class clazz) throws Exception { Object ret; if (clazz.equals(Integer.class)) { ret = conf.getInt(variableName, (Integer) getDefault(variableName)); } else if (clazz.equals(Double.class)) { ret = conf.getDouble(variableName, (Double) getDefault(variableName)); } else if (clazz.equals(Boolean.class)) { ret = conf.getBoolean(variableName, (Boolean) getDefault(variableName)); } else if (clazz.equals(String.class)) { ret = conf.get(variableName, (String) getDefault(variableName)); } else if (clazz.equals(Long.class)) { ret = conf.getLong(variableName, (Long) getDefault(variableName)); } else { throw new Exception("Variable Type not supported. Only boolean, int, double and String supported."); } return (T) ret; }
Example 4
Source File: Word2VecPerformerVoid.java From deeplearning4j with Apache License 2.0 | 6 votes |
public void setup(SparkConf conf) { useAdaGrad = conf.getBoolean(ADAGRAD, false); negative = conf.getDouble(NEGATIVE, 5); numWords = conf.getInt(NUM_WORDS, 1); window = conf.getInt(WINDOW, 5); alpha = conf.getDouble(ALPHA, 0.025f); minAlpha = conf.getDouble(MIN_ALPHA, 1e-2f); totalWords = conf.getInt(NUM_WORDS, 1); iterations = conf.getInt(ITERATIONS, 5); vectorLength = conf.getInt(VECTOR_LENGTH, 100); initExpTable(); if (negative > 0 && conf.contains(TABLE)) { ByteArrayInputStream bis = new ByteArrayInputStream(conf.get(TABLE).getBytes()); DataInputStream dis = new DataInputStream(bis); table = Nd4j.read(dis); } }
Example 5
Source File: Word2VecPerformer.java From deeplearning4j with Apache License 2.0 | 6 votes |
public void setup(SparkConf conf) { useAdaGrad = conf.getBoolean(Word2VecVariables.ADAGRAD, false); negative = conf.getDouble(Word2VecVariables.NEGATIVE, 5); numWords = conf.getInt(Word2VecVariables.NUM_WORDS, 1); window = conf.getInt(Word2VecVariables.WINDOW, 5); alpha = conf.getDouble(Word2VecVariables.ALPHA, 0.025f); minAlpha = conf.getDouble(Word2VecVariables.MIN_ALPHA, 1e-2f); totalWords = conf.getInt(Word2VecVariables.NUM_WORDS, 1); vectorLength = conf.getInt(Word2VecVariables.VECTOR_LENGTH, 100); initExpTable(); if (negative > 0 && conf.contains(Word2VecVariables.TABLE)) { ByteArrayInputStream bis = new ByteArrayInputStream(conf.get(Word2VecVariables.TABLE).getBytes()); DataInputStream dis = new DataInputStream(bis); table = Nd4j.read(dis); } }
Example 6
Source File: HBaseIndex.java From hudi with Apache License 2.0 | 5 votes |
private void setPutBatchSize(JavaRDD<WriteStatus> writeStatusRDD, HBaseIndexQPSResourceAllocator hBaseIndexQPSResourceAllocator, final JavaSparkContext jsc) { if (config.getHbaseIndexPutBatchSizeAutoCompute()) { SparkConf conf = jsc.getConf(); int maxExecutors = conf.getInt(DEFAULT_SPARK_EXECUTOR_INSTANCES_CONFIG_NAME, 1); if (conf.getBoolean(DEFAULT_SPARK_DYNAMIC_ALLOCATION_ENABLED_CONFIG_NAME, false)) { maxExecutors = Math.max(maxExecutors, conf.getInt(DEFAULT_SPARK_DYNAMIC_ALLOCATION_MAX_EXECUTORS_CONFIG_NAME, 1)); } /* * Each writeStatus represents status information from a write done in one of the IOHandles. If a writeStatus has * any insert, it implies that the corresponding task contacts HBase for doing puts, since we only do puts for * inserts from HBaseIndex. */ final Tuple2<Long, Integer> numPutsParallelismTuple = getHBasePutAccessParallelism(writeStatusRDD); final long numPuts = numPutsParallelismTuple._1; final int hbasePutsParallelism = numPutsParallelismTuple._2; this.numRegionServersForTable = getNumRegionServersAliveForTable(); final float desiredQPSFraction = hBaseIndexQPSResourceAllocator.calculateQPSFractionForPutsTime(numPuts, this.numRegionServersForTable); LOG.info("Desired QPSFraction :" + desiredQPSFraction); LOG.info("Number HBase puts :" + numPuts); LOG.info("Hbase Puts Parallelism :" + hbasePutsParallelism); final float availableQpsFraction = hBaseIndexQPSResourceAllocator.acquireQPSResources(desiredQPSFraction, numPuts); LOG.info("Allocated QPS Fraction :" + availableQpsFraction); multiPutBatchSize = putBatchSizeCalculator.getBatchSize(numRegionServersForTable, maxQpsPerRegionServer, hbasePutsParallelism, maxExecutors, SLEEP_TIME_MILLISECONDS, availableQpsFraction); LOG.info("multiPutBatchSize :" + multiPutBatchSize); } }
Example 7
Source File: GryoSerializer.java From tinkerpop with Apache License 2.0 | 5 votes |
public GryoSerializer(final SparkConf sparkConfiguration) { final long bufferSizeKb = sparkConfiguration.getSizeAsKb("spark.kryoserializer.buffer", "64k"); final long maxBufferSizeMb = sparkConfiguration.getSizeAsMb("spark.kryoserializer.buffer.max", "64m"); this.referenceTracking = sparkConfiguration.getBoolean("spark.kryo.referenceTracking", true); this.registrationRequired = sparkConfiguration.getBoolean(Constants.SPARK_KRYO_REGISTRATION_REQUIRED, false); if (bufferSizeKb >= ByteUnit.GiB.toKiB(2L)) { throw new IllegalArgumentException("spark.kryoserializer.buffer must be less than 2048 mb, got: " + bufferSizeKb + " mb."); } else { this.bufferSize = (int) ByteUnit.KiB.toBytes(bufferSizeKb); if (maxBufferSizeMb >= ByteUnit.GiB.toMiB(2L)) { throw new IllegalArgumentException("spark.kryoserializer.buffer.max must be less than 2048 mb, got: " + maxBufferSizeMb + " mb."); } else { this.maxBufferSize = (int) ByteUnit.MiB.toBytes(maxBufferSizeMb); //this.userRegistrator = sparkConfiguration.getOption("spark.kryo.registrator"); } } // create a GryoPool and store it in static HadoopPools final List<Object> ioRegistries = new ArrayList<>(); ioRegistries.addAll(makeApacheConfiguration(sparkConfiguration).getList(IoRegistry.IO_REGISTRY, Collections.emptyList())); ioRegistries.add(SparkIoRegistry.class.getCanonicalName().replace("." + SparkIoRegistry.class.getSimpleName(), "$" + SparkIoRegistry.class.getSimpleName())); HadoopPools.initialize(GryoPool.build(). version(GryoVersion.valueOf(sparkConfiguration.get(GryoPool.CONFIG_IO_GRYO_VERSION, GryoPool.CONFIG_IO_GRYO_POOL_VERSION_DEFAULT.name()))). poolSize(sparkConfiguration.getInt(GryoPool.CONFIG_IO_GRYO_POOL_SIZE, GryoPool.CONFIG_IO_GRYO_POOL_SIZE_DEFAULT)). ioRegistries(ioRegistries). initializeMapper(builder -> builder.referenceTracking(this.referenceTracking). registrationRequired(this.registrationRequired)). create()); }