Java Code Examples for org.apache.hadoop.conf.Configuration#setInt()
The following examples show how to use
org.apache.hadoop.conf.Configuration#setInt() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: Test20772.java From marklogic-contentpump with Apache License 2.0 | 6 votes |
public static void main(String[] args) throws Exception { Configuration conf = new Configuration(); if (args.length < 2) { System.err.println("Usage: Test20772 outputpath"); System.exit(2); } Job job = Job.getInstance(conf); job.setJarByClass(Test20772.class); // Map related configuration job.setInputFormatClass(NodeInputFormat.class); job.setMapperClass(MyMapper.class); job.setMapOutputKeyClass(NodePath.class); job.setMapOutputValueClass(MarkLogicNode.class); job.setReducerClass(MyReducer.class); FileOutputFormat.setOutputPath(job, new Path(args[1])); conf.setInt("mapred.reduce.tasks", 0); conf = job.getConfiguration(); conf.addResource(args[0]); System.exit(job.waitForCompletion(true) ? 0 : 1); }
Example 2
Source File: HalvadeConf.java From halvade with GNU General Public License v3.0 | 6 votes |
public static void setKnownSitesOnHDFS(Configuration conf, String[] val) throws IOException, URISyntaxException { conf.setInt(numberOfSites, val.length); FileSystem fs; for(int i = 0; i < val.length;i ++) { // check if dir add all files! fs = FileSystem.get(new URI(val[i]), conf); if(fs.isFile(new Path(val[i]))) { conf.set(sitesOnHDFSName + i, val[i]); } else { FileStatus[] files = fs.listStatus(new Path(val[i])); for(FileStatus file : files) { if (!file.isDir()) { conf.set(sitesOnHDFSName + i, file.getPath().toString()); } } } } }
Example 3
Source File: TestDFSUpgradeFromImage.java From hadoop-gpu with Apache License 2.0 | 6 votes |
public void testUpgradeFromImage() throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); if (System.getProperty("test.build.data") == null) { // to allow test to be run outside of Ant System.setProperty("test.build.data", "build/test/data"); } conf.setInt("dfs.datanode.scan.period.hours", -1); // block scanning off cluster = new MiniDFSCluster(0, conf, numDataNodes, false, true, StartupOption.UPGRADE, null); cluster.waitActive(); DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", cluster.getNameNodePort()), conf); //Safemode will be off only after upgrade is complete. Wait for it. while ( dfsClient.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_GET) ) { LOG.info("Waiting for SafeMode to be OFF."); try { Thread.sleep(1000); } catch (InterruptedException ignored) {} } verifyFileSystem(dfsClient); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 4
Source File: TestProtoBufRpc.java From hadoop with Apache License 2.0 | 6 votes |
@Before public void setUp() throws IOException { // Setup server for both protocols conf = new Configuration(); conf.setInt(CommonConfigurationKeys.IPC_MAXIMUM_DATA_LENGTH, 1024); // Set RPC engine to protobuf RPC engine RPC.setProtocolEngine(conf, TestRpcService.class, ProtobufRpcEngine.class); // Create server side implementation PBServerImpl serverImpl = new PBServerImpl(); BlockingService service = TestProtobufRpcProto .newReflectiveBlockingService(serverImpl); // Get RPC server for server side implementation server = new RPC.Builder(conf).setProtocol(TestRpcService.class) .setInstance(service).setBindAddress(ADDRESS).setPort(PORT).build(); addr = NetUtils.getConnectAddress(server); // now the second protocol PBServer2Impl server2Impl = new PBServer2Impl(); BlockingService service2 = TestProtobufRpc2Proto .newReflectiveBlockingService(server2Impl); server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, TestRpcService2.class, service2); server.start(); }
Example 5
Source File: TestBootstrapStandbyWithBKJM.java From big-c with Apache License 2.0 | 6 votes |
@Before public void setUp() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 5); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, BKJMUtil .createJournalURI("/bootstrapStandby").toString()); BKJMUtil.addJournalManagerDefinition(conf); conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true); conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY, SlowCodec.class.getCanonicalName()); CompressionCodecFactory.setCodecClasses(conf, ImmutableList.<Class> of(SlowCodec.class)); MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1").addNN( new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)).addNN( new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology) .numDataNodes(1).manageNameDfsSharedDirs(false).build(); cluster.waitActive(); }
Example 6
Source File: TestWebHDFS.java From big-c with Apache License 2.0 | 6 votes |
/** * Test for catching "no datanode" IOException, when to create a file * but datanode is not running for some reason. */ @Test(timeout=300000) public void testCreateWithNoDN() throws Exception { MiniDFSCluster cluster = null; final Configuration conf = WebHdfsTestUtil.createConf(); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build(); conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); cluster.waitActive(); FileSystem fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf, WebHdfsFileSystem.SCHEME); fs.create(new Path("/testnodatanode")); Assert.fail("No exception was thrown"); } catch (IOException ex) { GenericTestUtils.assertExceptionContains("Failed to find datanode", ex); } finally { if (cluster != null) { cluster.shutdown(); } } }
Example 7
Source File: ConnectionUtils.java From hbase with Apache License 2.0 | 5 votes |
/** * Changes the configuration to set the number of retries needed when using Connection internally, * e.g. for updating catalog tables, etc. Call this method before we create any Connections. * @param c The Configuration instance to set the retries into. * @param log Used to log what we set in here. */ public static void setServerSideHConnectionRetriesConfig(final Configuration c, final String sn, final Logger log) { // TODO: Fix this. Not all connections from server side should have 10 times the retries. int hcRetries = c.getInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, HConstants.DEFAULT_HBASE_CLIENT_RETRIES_NUMBER); // Go big. Multiply by 10. If we can't get to meta after this many retries // then something seriously wrong. int serversideMultiplier = c.getInt(HConstants.HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER, HConstants.DEFAULT_HBASE_CLIENT_SERVERSIDE_RETRIES_MULTIPLIER); int retries = hcRetries * serversideMultiplier; c.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, retries); log.info(sn + " server-side Connection retries=" + retries); }
Example 8
Source File: MiniYARNCluster.java From hadoop with Apache License 2.0 | 5 votes |
protected synchronized void serviceInit(Configuration conf) throws Exception { Configuration config = new YarnConfiguration(conf); // create nm-local-dirs and configure them for the nodemanager String localDirsString = prepareDirs("local", numLocalDirs); config.set(YarnConfiguration.NM_LOCAL_DIRS, localDirsString); // create nm-log-dirs and configure them for the nodemanager String logDirsString = prepareDirs("log", numLogDirs); config.set(YarnConfiguration.NM_LOG_DIRS, logDirsString); config.setInt(YarnConfiguration.NM_PMEM_MB, config.getInt( YarnConfiguration.YARN_MINICLUSTER_NM_PMEM_MB, YarnConfiguration.DEFAULT_YARN_MINICLUSTER_NM_PMEM_MB)); config.set(YarnConfiguration.NM_ADDRESS, MiniYARNCluster.getHostname() + ":0"); config.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, MiniYARNCluster.getHostname() + ":0"); WebAppUtils .setNMWebAppHostNameAndPort(config, MiniYARNCluster.getHostname(), 0); // Disable resource checks by default if (!config.getBoolean( YarnConfiguration.YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING, YarnConfiguration. DEFAULT_YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING)) { config.setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false); config.setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false); } LOG.info("Starting NM: " + index); nodeManagers[index].init(config); super.serviceInit(config); }
Example 9
Source File: TestCompactionFileNotFound.java From hbase with Apache License 2.0 | 5 votes |
@BeforeClass public static void setupBeforeClass() throws Exception { Configuration conf = util.getConfiguration(); conf.setInt("hbase.hfile.compaction.discharger.interval", Integer.MAX_VALUE); util.startMiniCluster(3); }
Example 10
Source File: TestIPC.java From big-c with Apache License 2.0 | 5 votes |
@Test(timeout=60000) public void testConnectionRetriesOnSocketTimeoutExceptions() throws IOException { Configuration conf = new Configuration(); // set max retries to 0 conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, 0); assertRetriesOnSocketTimeouts(conf, 1); // set max retries to 3 conf.setInt( CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY, 3); assertRetriesOnSocketTimeouts(conf, 4); }
Example 11
Source File: TestHFlush.java From big-c with Apache License 2.0 | 5 votes |
@Test public void hSyncEndBlock_02() throws IOException { Configuration conf = new HdfsConfiguration(); int customPerChecksumSize = 512; int customBlockSize = customPerChecksumSize * 3; // Modify defaul filesystem settings conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, customPerChecksumSize); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, customBlockSize); doTheJob(conf, fName, customBlockSize, (short) 2, true, EnumSet.of(SyncFlag.END_BLOCK)); }
Example 12
Source File: HbaseTestUtil.java From kafka-connect-hbase with Apache License 2.0 | 5 votes |
/** * Returns a new HBaseTestingUtility instance. */ private static HBaseTestingUtility createTestingUtility() { final Configuration hbaseConf = HBaseConfiguration.create(); hbaseConf.setInt("replication.stats.thread.period.seconds", 5); hbaseConf.setLong("replication.sleep.before.failover", 2000); hbaseConf.setInt("replication.source.maxretriesmultiplier", 10); return new HBaseTestingUtility(hbaseConf); }
Example 13
Source File: RegionMover.java From hbase with Apache License 2.0 | 5 votes |
/** * Creates a new configuration and sets region mover specific overrides */ private static Configuration createConf() { Configuration conf = HBaseConfiguration.create(); conf.setInt("hbase.client.prefetch.limit", 1); conf.setInt("hbase.client.pause", 500); conf.setInt("hbase.client.retries.number", 100); return conf; }
Example 14
Source File: TestLineRecordReader.java From big-c with Apache License 2.0 | 5 votes |
@Test public void testStripBOM() throws IOException { // the test data contains a BOM at the start of the file // confirm the BOM is skipped by LineRecordReader String UTF8_BOM = "\uFEFF"; URL testFileUrl = getClass().getClassLoader().getResource("testBOM.txt"); assertNotNull("Cannot find testBOM.txt", testFileUrl); File testFile = new File(testFileUrl.getFile()); Path testFilePath = new Path(testFile.getAbsolutePath()); long testFileSize = testFile.length(); Configuration conf = new Configuration(); conf.setInt(org.apache.hadoop.mapreduce.lib.input. LineRecordReader.MAX_LINE_LENGTH, Integer.MAX_VALUE); // read the data and check whether BOM is skipped FileSplit split = new FileSplit(testFilePath, 0, testFileSize, (String[])null); LineRecordReader reader = new LineRecordReader(conf, split); LongWritable key = new LongWritable(); Text value = new Text(); int numRecords = 0; boolean firstLine = true; boolean skipBOM = true; while (reader.next(key, value)) { if (firstLine) { firstLine = false; if (value.toString().startsWith(UTF8_BOM)) { skipBOM = false; } } ++numRecords; } reader.close(); assertTrue("BOM is not skipped", skipBOM); }
Example 15
Source File: TestStandbyCheckpoints.java From hadoop with Apache License 2.0 | 5 votes |
@SuppressWarnings("rawtypes") @Before public void setupCluster() throws Exception { Configuration conf = setupCommonConfig(); // Dial down the retention of extra edits and checkpoints. This is to // help catch regressions of HDFS-4238 (SBN should not purge shared edits) conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY, 1); conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0); int retryCount = 0; while (true) { try { int basePort = 10060 + random.nextInt(100) * 2; MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort)) .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(1) .build(); cluster.waitActive(); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); fs = HATestUtil.configureFailoverFs(cluster, conf); cluster.transitionToActive(0); ++retryCount; break; } catch (BindException e) { LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry " + retryCount + " times"); } } }
Example 16
Source File: HalvadeConf.java From halvade with GNU General Public License v3.0 | 4 votes |
public static void setVcores(Configuration conf, int val) { conf.setInt(vCores, val); }
Example 17
Source File: MapFile.java From hadoop with Apache License 2.0 | 4 votes |
/** Sets the index interval and stores it in conf * @see #getIndexInterval() */ public static void setIndexInterval(Configuration conf, int interval) { conf.setInt(INDEX_INTERVAL, interval); }
Example 18
Source File: TestCheckpoint.java From big-c with Apache License 2.0 | 4 votes |
/** * Test case where the name node is reformatted while the secondary namenode * is running. The secondary should shut itself down if if talks to a NN * with the wrong namespace. */ @Test public void testReformatNNBetweenCheckpoints() throws IOException { MiniDFSCluster cluster = null; SecondaryNameNode secondary = null; Configuration conf = new HdfsConfiguration(); conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 1); try { cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) .format(true).build(); int origPort = cluster.getNameNodePort(); int origHttpPort = cluster.getNameNode().getHttpAddress().getPort(); Configuration snnConf = new Configuration(conf); File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(), "namesecondary"); snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointDir.getAbsolutePath()); secondary = startSecondaryNameNode(snnConf); // secondary checkpoints once secondary.doCheckpoint(); // we reformat primary NN cluster.shutdown(); cluster = null; // Brief sleep to make sure that the 2NN's IPC connection to the NN // is dropped. try { Thread.sleep(100); } catch (InterruptedException ie) { } // Start a new NN with the same host/port. cluster = new MiniDFSCluster.Builder(conf) .numDataNodes(0) .nameNodePort(origPort) .nameNodeHttpPort(origHttpPort) .format(true).build(); try { secondary.doCheckpoint(); fail("Should have failed checkpoint against a different namespace"); } catch (IOException ioe) { LOG.info("Got expected failure", ioe); assertTrue(ioe.toString().contains("Inconsistent checkpoint")); } } finally { cleanup(secondary); secondary = null; cleanup(cluster); cluster = null; } }
Example 19
Source File: TestFileAppend.java From RDFS with Apache License 2.0 | 4 votes |
/** This creates a slow writer and check to see * if pipeline heartbeats work fine */ public void testPipelineHeartbeat() throws Exception { final int DATANODE_NUM = 2; final int fileLen = 6; Configuration conf = new Configuration(); final int timeout = 2000; conf.setInt("dfs.socket.timeout",timeout); final Path p = new Path("/pipelineHeartbeat/foo"); System.out.println("p=" + p); MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null); DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem(); initBuffer(fileLen); try { // create a new file. FSDataOutputStream stm = createFile(fs, p, DATANODE_NUM); stm.write(fileContents, 0, 1); Thread.sleep(timeout); stm.sync(); System.out.println("Wrote 1 byte and hflush " + p); // write another byte Thread.sleep(timeout); stm.write(fileContents, 1, 1); stm.sync(); stm.write(fileContents, 2, 1); Thread.sleep(timeout); stm.sync(); stm.write(fileContents, 3, 1); Thread.sleep(timeout); stm.write(fileContents, 4, 1); stm.sync(); stm.write(fileContents, 5, 1); Thread.sleep(timeout); stm.close(); // verify that entire file is good checkFullFile(fs, p); } finally { fs.close(); cluster.shutdown(); } }
Example 20
Source File: BulkInputFormat.java From datawave with Apache License 2.0 | 2 votes |
/** * Sets the log level for this configuration object. * * @param conf * the Hadoop configuration object * @param level * the logging level */ public static void setLogLevel(Configuration conf, Level level) { ArgumentChecker.notNull(level); log.setLevel(level); conf.setInt(LOGLEVEL, level.toInt()); }