Java Code Examples for org.apache.hadoop.fs.FileSystem#setWorkingDirectory()
The following examples show how to use
org.apache.hadoop.fs.FileSystem#setWorkingDirectory() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ShardedTableMapFileTest.java From datawave with Apache License 2.0 | 6 votes |
@Test(expected = IOException.class) public void testGetAllShardedTableMapFilesWithoutPath() throws Exception { Configuration conf = new Configuration(); File tempWorkDir = Files.createTempDir(); conf.set(FileSystem.FS_DEFAULT_NAME_KEY, tempWorkDir.toURI().toString()); FileSystem fs = FileSystem.get(tempWorkDir.toURI(), conf); fs.setWorkingDirectory(new Path(tempWorkDir.toString())); Path workDir = fs.makeQualified(new Path("work")); conf.set(ShardedTableMapFile.SPLIT_WORK_DIR, workDir.toString()); conf.set(ShardedDataTypeHandler.SHARDED_TNAMES, "shard_ingest_unit_test_table_1,shard_ingest_unit_test_table_2,shard_ingest_unit_test_table_3"); String[] tableNames = new String[] {TABLE_NAME}; conf.set(ShardedTableMapFile.TABLE_NAMES, StringUtils.join(",", tableNames)); ShardedTableMapFile.setupFile(conf); ShardedTableMapFile.getShardIdToLocations(conf, TABLE_NAME); }
Example 2
Source File: BaseTestHttpFSWith.java From hadoop with Apache License 2.0 | 6 votes |
private void testWorkingdirectory() throws Exception { FileSystem fs = FileSystem.get(getProxiedFSConf()); Path workingDir = fs.getWorkingDirectory(); fs.close(); fs = getHttpFSFileSystem(); if (isLocalFS()) { fs.setWorkingDirectory(workingDir); } Path httpFSWorkingDir = fs.getWorkingDirectory(); fs.close(); Assert.assertEquals(httpFSWorkingDir.toUri().getPath(), workingDir.toUri().getPath()); fs = getHttpFSFileSystem(); fs.setWorkingDirectory(new Path("/tmp")); workingDir = fs.getWorkingDirectory(); fs.close(); Assert.assertEquals(workingDir.toUri().getPath(), new Path("/tmp").toUri().getPath()); }
Example 3
Source File: BaseTestHttpFSWith.java From big-c with Apache License 2.0 | 6 votes |
private void testWorkingdirectory() throws Exception { FileSystem fs = FileSystem.get(getProxiedFSConf()); Path workingDir = fs.getWorkingDirectory(); fs.close(); fs = getHttpFSFileSystem(); if (isLocalFS()) { fs.setWorkingDirectory(workingDir); } Path httpFSWorkingDir = fs.getWorkingDirectory(); fs.close(); Assert.assertEquals(httpFSWorkingDir.toUri().getPath(), workingDir.toUri().getPath()); fs = getHttpFSFileSystem(); fs.setWorkingDirectory(new Path("/tmp")); workingDir = fs.getWorkingDirectory(); fs.close(); Assert.assertEquals(workingDir.toUri().getPath(), new Path("/tmp").toUri().getPath()); }
Example 4
Source File: TestLocalDFS.java From hadoop-gpu with Apache License 2.0 | 5 votes |
/** * Tests get/set working directory in DFS. */ public void testWorkingDirectory() throws IOException { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fileSys = cluster.getFileSystem(); try { Path orig_path = fileSys.getWorkingDirectory(); assertTrue(orig_path.isAbsolute()); Path file1 = new Path("somewhat/random.txt"); writeFile(fileSys, file1); assertTrue(fileSys.exists(new Path(orig_path, file1.toString()))); fileSys.delete(file1, true); Path subdir1 = new Path("/somewhere"); fileSys.setWorkingDirectory(subdir1); writeFile(fileSys, file1); cleanupFile(fileSys, new Path(subdir1, file1.toString())); Path subdir2 = new Path("else"); fileSys.setWorkingDirectory(subdir2); writeFile(fileSys, file1); readFile(fileSys, file1); cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()), file1.toString())); // test home directory Path home = new Path("/user/" + getUserName(fileSys)) .makeQualified(fileSys); Path fsHome = fileSys.getHomeDirectory(); assertEquals(home, fsHome); } finally { fileSys.close(); cluster.shutdown(); } }
Example 5
Source File: ShardedTableMapFileTest.java From datawave with Apache License 2.0 | 5 votes |
private FileSystem setWorkingDirectory(Configuration conf) throws IOException { FileSystem fs = FileSystem.getLocal(conf); File tempWorkDir = Files.createTempDir(); fs.setWorkingDirectory(new Path(tempWorkDir.toString())); conf.set(ShardedTableMapFile.SPLIT_WORK_DIR, tempWorkDir.toString()); return fs; }
Example 6
Source File: TestLocalDFS.java From RDFS with Apache License 2.0 | 5 votes |
/** * Tests get/set working directory in DFS. */ public void testWorkingDirectory() throws IOException { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null); FileSystem fileSys = cluster.getFileSystem(); try { Path orig_path = fileSys.getWorkingDirectory(); assertTrue(orig_path.isAbsolute()); Path file1 = new Path("somewhat/random.txt"); writeFile(fileSys, file1); assertTrue(fileSys.exists(new Path(orig_path, file1.toString()))); fileSys.delete(file1, true); Path subdir1 = new Path("/somewhere"); fileSys.setWorkingDirectory(subdir1); writeFile(fileSys, file1); cleanupFile(fileSys, new Path(subdir1, file1.toString())); Path subdir2 = new Path("else"); fileSys.setWorkingDirectory(subdir2); writeFile(fileSys, file1); readFile(fileSys, file1); cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()), file1.toString())); // test home directory Path home = new Path("/user/" + getUserName(fileSys)) .makeQualified(fileSys); Path fsHome = fileSys.getHomeDirectory(); assertEquals(home, fsHome); } finally { fileSys.close(); cluster.shutdown(); } }
Example 7
Source File: HadoopBasicFileSystemFactoryDelegate.java From ignite with Apache License 2.0 | 5 votes |
/** * Internal file system creation routine, invoked in correct class loader context. * * @param usrName User name. * @return File system. * @throws IOException If failed. * @throws InterruptedException if the current thread is interrupted. */ protected FileSystem create(String usrName) throws IOException, InterruptedException { FileSystem fs = FileSystem.get(fullUri, cfg, usrName); if (workDir != null) fs.setWorkingDirectory(workDir); return fs; }
Example 8
Source File: ViewFileSystemTestSetup.java From big-c with Apache License 2.0 | 5 votes |
/** * * @param fsTarget - the target fs of the view fs. * @return return the ViewFS File context to be used for tests * @throws Exception */ static public FileSystem setupForViewFileSystem(Configuration conf, FileSystemTestHelper fileSystemTestHelper, FileSystem fsTarget) throws Exception { /** * create the test root on local_fs - the mount table will point here */ Path targetOfTests = fileSystemTestHelper.getTestRootPath(fsTarget); // In case previous test was killed before cleanup fsTarget.delete(targetOfTests, true); fsTarget.mkdirs(targetOfTests); // Set up viewfs link for test dir as described above String testDir = fileSystemTestHelper.getTestRootPath(fsTarget).toUri() .getPath(); linkUpFirstComponents(conf, testDir, fsTarget, "test dir"); // Set up viewfs link for home dir as described above setUpHomeDir(conf, fsTarget); // the test path may be relative to working dir - we need to make that work: // Set up viewfs link for wd as described above String wdDir = fsTarget.getWorkingDirectory().toUri().getPath(); linkUpFirstComponents(conf, wdDir, fsTarget, "working dir"); FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf); fsView.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd. Log.info("Working dir is: " + fsView.getWorkingDirectory()); return fsView; }
Example 9
Source File: TestLocalDFS.java From big-c with Apache License 2.0 | 5 votes |
/** * Tests get/set working directory in DFS. */ @Test public void testWorkingDirectory() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fileSys = cluster.getFileSystem(); try { Path orig_path = fileSys.getWorkingDirectory(); assertTrue(orig_path.isAbsolute()); Path file1 = new Path("somewhat/random.txt"); writeFile(fileSys, file1); assertTrue(fileSys.exists(new Path(orig_path, file1.toString()))); fileSys.delete(file1, true); Path subdir1 = new Path("/somewhere"); fileSys.setWorkingDirectory(subdir1); writeFile(fileSys, file1); cleanupFile(fileSys, new Path(subdir1, file1.toString())); Path subdir2 = new Path("else"); fileSys.setWorkingDirectory(subdir2); writeFile(fileSys, file1); readFile(fileSys, file1); cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()), file1.toString())); // test home directory Path home = fileSys.makeQualified( new Path(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT + "/" + getUserName(fileSys))); Path fsHome = fileSys.getHomeDirectory(); assertEquals(home, fsHome); } finally { fileSys.close(); cluster.shutdown(); } }
Example 10
Source File: ViewFileSystemTestSetup.java From hadoop with Apache License 2.0 | 5 votes |
/** * * @param fsTarget - the target fs of the view fs. * @return return the ViewFS File context to be used for tests * @throws Exception */ static public FileSystem setupForViewFileSystem(Configuration conf, FileSystemTestHelper fileSystemTestHelper, FileSystem fsTarget) throws Exception { /** * create the test root on local_fs - the mount table will point here */ Path targetOfTests = fileSystemTestHelper.getTestRootPath(fsTarget); // In case previous test was killed before cleanup fsTarget.delete(targetOfTests, true); fsTarget.mkdirs(targetOfTests); // Set up viewfs link for test dir as described above String testDir = fileSystemTestHelper.getTestRootPath(fsTarget).toUri() .getPath(); linkUpFirstComponents(conf, testDir, fsTarget, "test dir"); // Set up viewfs link for home dir as described above setUpHomeDir(conf, fsTarget); // the test path may be relative to working dir - we need to make that work: // Set up viewfs link for wd as described above String wdDir = fsTarget.getWorkingDirectory().toUri().getPath(); linkUpFirstComponents(conf, wdDir, fsTarget, "working dir"); FileSystem fsView = FileSystem.get(FsConstants.VIEWFS_URI, conf); fsView.setWorkingDirectory(new Path(wdDir)); // in case testdir relative to wd. Log.info("Working dir is: " + fsView.getWorkingDirectory()); return fsView; }
Example 11
Source File: TestLocalDFS.java From hadoop with Apache License 2.0 | 5 votes |
/** * Tests get/set working directory in DFS. */ @Test public void testWorkingDirectory() throws IOException { Configuration conf = new HdfsConfiguration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); FileSystem fileSys = cluster.getFileSystem(); try { Path orig_path = fileSys.getWorkingDirectory(); assertTrue(orig_path.isAbsolute()); Path file1 = new Path("somewhat/random.txt"); writeFile(fileSys, file1); assertTrue(fileSys.exists(new Path(orig_path, file1.toString()))); fileSys.delete(file1, true); Path subdir1 = new Path("/somewhere"); fileSys.setWorkingDirectory(subdir1); writeFile(fileSys, file1); cleanupFile(fileSys, new Path(subdir1, file1.toString())); Path subdir2 = new Path("else"); fileSys.setWorkingDirectory(subdir2); writeFile(fileSys, file1); readFile(fileSys, file1); cleanupFile(fileSys, new Path(new Path(subdir1, subdir2.toString()), file1.toString())); // test home directory Path home = fileSys.makeQualified( new Path(DFSConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT + "/" + getUserName(fileSys))); Path fsHome = fileSys.getHomeDirectory(); assertEquals(home, fsHome); } finally { fileSys.close(); cluster.shutdown(); } }
Example 12
Source File: AbstractPolicy.java From kafka-connect-fs with Apache License 2.0 | 5 votes |
private void configFs(Map<String, Object> customConfigs) throws IOException { for (String uri : this.conf.getFsUris()) { Configuration fsConfig = new Configuration(); customConfigs.entrySet().stream() .filter(entry -> entry.getKey().startsWith(FsSourceTaskConfig.POLICY_PREFIX_FS)) .forEach(entry -> fsConfig.set(entry.getKey().replace(FsSourceTaskConfig.POLICY_PREFIX_FS, ""), (String) entry.getValue())); Path workingDir = new Path(convert(uri)); FileSystem fs = FileSystem.newInstance(workingDir.toUri(), fsConfig); fs.setWorkingDirectory(workingDir); this.fileSystems.add(fs); } }
Example 13
Source File: TestJobCounters.java From big-c with Apache License 2.0 | 4 votes |
/** * Tests {@link TaskCounter}'s {@link TaskCounter.COMMITTED_HEAP_BYTES}. * The test consists of running a low-memory job which consumes less heap * memory and then running a high-memory job which consumes more heap memory, * and then ensuring that COMMITTED_HEAP_BYTES of low-memory job is smaller * than that of the high-memory job. * @throws IOException */ @Test @SuppressWarnings("deprecation") public void testHeapUsageCounter() throws Exception { JobConf conf = new JobConf(); // create a local filesystem handle FileSystem fileSystem = FileSystem.getLocal(conf); // define test root directories Path rootDir = new Path(System.getProperty("test.build.data", "/tmp")); Path testRootDir = new Path(rootDir, "testHeapUsageCounter"); // cleanup the test root directory fileSystem.delete(testRootDir, true); // set the current working directory fileSystem.setWorkingDirectory(testRootDir); fileSystem.deleteOnExit(testRootDir); // create a mini cluster using the local file system MiniMRCluster mrCluster = new MiniMRCluster(1, fileSystem.getUri().toString(), 1); try { conf = mrCluster.createJobConf(); JobClient jobClient = new JobClient(conf); // define job input Path inDir = new Path(testRootDir, "in"); // create input data createWordsFile(inDir, conf); // configure and run a low memory job which will run without loading the // jvm's heap RunningJob lowMemJob = runHeapUsageTestJob(conf, testRootDir, "-Xms32m -Xmx1G", 0, 0, fileSystem, jobClient, inDir); JobID lowMemJobID = lowMemJob.getID(); long lowMemJobMapHeapUsage = getTaskCounterUsage(jobClient, lowMemJobID, 1, 0, TaskType.MAP); System.out.println("Job1 (low memory job) map task heap usage: " + lowMemJobMapHeapUsage); long lowMemJobReduceHeapUsage = getTaskCounterUsage(jobClient, lowMemJobID, 1, 0, TaskType.REDUCE); System.out.println("Job1 (low memory job) reduce task heap usage: " + lowMemJobReduceHeapUsage); // configure and run a high memory job which will load the jvm's heap RunningJob highMemJob = runHeapUsageTestJob(conf, testRootDir, "-Xms32m -Xmx1G", lowMemJobMapHeapUsage + 256*1024*1024, lowMemJobReduceHeapUsage + 256*1024*1024, fileSystem, jobClient, inDir); JobID highMemJobID = highMemJob.getID(); long highMemJobMapHeapUsage = getTaskCounterUsage(jobClient, highMemJobID, 1, 0, TaskType.MAP); System.out.println("Job2 (high memory job) map task heap usage: " + highMemJobMapHeapUsage); long highMemJobReduceHeapUsage = getTaskCounterUsage(jobClient, highMemJobID, 1, 0, TaskType.REDUCE); System.out.println("Job2 (high memory job) reduce task heap usage: " + highMemJobReduceHeapUsage); assertTrue("Incorrect map heap usage reported by the map task", lowMemJobMapHeapUsage < highMemJobMapHeapUsage); assertTrue("Incorrect reduce heap usage reported by the reduce task", lowMemJobReduceHeapUsage < highMemJobReduceHeapUsage); } finally { // shutdown the mr cluster mrCluster.shutdown(); try { fileSystem.delete(testRootDir, true); } catch (IOException ioe) {} } }
Example 14
Source File: IsolationRunner.java From RDFS with Apache License 2.0 | 4 votes |
/** * Run a single task * @param args the first argument is the task directory */ public static void main(String[] args ) throws ClassNotFoundException, IOException, InterruptedException { if (args.length != 1) { System.out.println("Usage: IsolationRunner <path>/job.xml"); System.exit(1); } File jobFilename = new File(args[0]); if (!jobFilename.exists() || !jobFilename.isFile()) { System.out.println(jobFilename + " is not a valid job file."); System.exit(1); } JobConf conf = new JobConf(new Path(jobFilename.toString())); TaskAttemptID taskId = TaskAttemptID.forName(conf.get("mapred.task.id")); boolean isMap = conf.getBoolean("mapred.task.is.map", true); int partition = conf.getInt("mapred.task.partition", 0); // setup the local and user working directories FileSystem local = FileSystem.getLocal(conf); LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir"); File workDirName = new File(lDirAlloc.getLocalPathToRead( TaskTracker.getLocalTaskDir( taskId.getJobID().toString(), taskId.toString()) + Path.SEPARATOR + "work", conf). toString()); local.setWorkingDirectory(new Path(workDirName.toString())); FileSystem.get(conf).setWorkingDirectory(conf.getWorkingDirectory()); // set up a classloader with the right classpath ClassLoader classLoader = makeClassLoader(conf, workDirName); Thread.currentThread().setContextClassLoader(classLoader); conf.setClassLoader(classLoader); Task task; if (isMap) { Path localSplit = new Path(new Path(jobFilename.toString()).getParent(), "split.dta"); DataInputStream splitFile = FileSystem.getLocal(conf).open(localSplit); String splitClass = Text.readString(splitFile); BytesWritable split = new BytesWritable(); split.readFields(splitFile); splitFile.close(); task = new MapTask(jobFilename.toString(), taskId, partition, splitClass, split, 1, conf.getUser()); } else { int numMaps = conf.getNumMapTasks(); fillInMissingMapOutputs(local, taskId, numMaps, conf); task = new ReduceTask(jobFilename.toString(), taskId, partition, numMaps, 1, conf.getUser()); } task.setConf(conf); task.run(conf, new FakeUmbilical()); }
Example 15
Source File: TestJobCounters.java From hadoop with Apache License 2.0 | 4 votes |
/** * Tests {@link TaskCounter}'s {@link TaskCounter.COMMITTED_HEAP_BYTES}. * The test consists of running a low-memory job which consumes less heap * memory and then running a high-memory job which consumes more heap memory, * and then ensuring that COMMITTED_HEAP_BYTES of low-memory job is smaller * than that of the high-memory job. * @throws IOException */ @Test @SuppressWarnings("deprecation") public void testHeapUsageCounter() throws Exception { JobConf conf = new JobConf(); // create a local filesystem handle FileSystem fileSystem = FileSystem.getLocal(conf); // define test root directories Path rootDir = new Path(System.getProperty("test.build.data", "/tmp")); Path testRootDir = new Path(rootDir, "testHeapUsageCounter"); // cleanup the test root directory fileSystem.delete(testRootDir, true); // set the current working directory fileSystem.setWorkingDirectory(testRootDir); fileSystem.deleteOnExit(testRootDir); // create a mini cluster using the local file system MiniMRCluster mrCluster = new MiniMRCluster(1, fileSystem.getUri().toString(), 1); try { conf = mrCluster.createJobConf(); JobClient jobClient = new JobClient(conf); // define job input Path inDir = new Path(testRootDir, "in"); // create input data createWordsFile(inDir, conf); // configure and run a low memory job which will run without loading the // jvm's heap RunningJob lowMemJob = runHeapUsageTestJob(conf, testRootDir, "-Xms32m -Xmx1G", 0, 0, fileSystem, jobClient, inDir); JobID lowMemJobID = lowMemJob.getID(); long lowMemJobMapHeapUsage = getTaskCounterUsage(jobClient, lowMemJobID, 1, 0, TaskType.MAP); System.out.println("Job1 (low memory job) map task heap usage: " + lowMemJobMapHeapUsage); long lowMemJobReduceHeapUsage = getTaskCounterUsage(jobClient, lowMemJobID, 1, 0, TaskType.REDUCE); System.out.println("Job1 (low memory job) reduce task heap usage: " + lowMemJobReduceHeapUsage); // configure and run a high memory job which will load the jvm's heap RunningJob highMemJob = runHeapUsageTestJob(conf, testRootDir, "-Xms32m -Xmx1G", lowMemJobMapHeapUsage + 256*1024*1024, lowMemJobReduceHeapUsage + 256*1024*1024, fileSystem, jobClient, inDir); JobID highMemJobID = highMemJob.getID(); long highMemJobMapHeapUsage = getTaskCounterUsage(jobClient, highMemJobID, 1, 0, TaskType.MAP); System.out.println("Job2 (high memory job) map task heap usage: " + highMemJobMapHeapUsage); long highMemJobReduceHeapUsage = getTaskCounterUsage(jobClient, highMemJobID, 1, 0, TaskType.REDUCE); System.out.println("Job2 (high memory job) reduce task heap usage: " + highMemJobReduceHeapUsage); assertTrue("Incorrect map heap usage reported by the map task", lowMemJobMapHeapUsage < highMemJobMapHeapUsage); assertTrue("Incorrect reduce heap usage reported by the reduce task", lowMemJobReduceHeapUsage < highMemJobReduceHeapUsage); } finally { // shutdown the mr cluster mrCluster.shutdown(); try { fileSystem.delete(testRootDir, true); } catch (IOException ioe) {} } }
Example 16
Source File: EntityOutputFormatIT.java From accumulo-recipes with Apache License 2.0 | 4 votes |
public void runJob(Job job, EntityStore entityStore) throws IOException, AccumuloSecurityException, ClassNotFoundException, InterruptedException, TableExistsException, AccumuloException, TableNotFoundException { File dir = temporaryFolder.newFolder("input"); FileOutputStream fileOutputStream = new FileOutputStream(new File(dir,"uuids.txt")); PrintWriter printWriter = new PrintWriter(fileOutputStream); int countTotalResults = 1000; try { for (int i = 0; i < countTotalResults; i++) { printWriter.println(i+""); } } finally { printWriter.flush(); fileOutputStream.close(); } Configuration conf = new Configuration(); FileSystem fs = FileSystem.getLocal(conf); fs.setWorkingDirectory(new Path(dir.getAbsolutePath())); Path inputPath = fs.makeQualified(new Path(dir.getAbsolutePath())); // local path EntityOutputFomat.setZooKeeperInstance(job, accumuloMiniClusterDriver.getClientConfiguration()); EntityOutputFomat.setConnectorInfo(job, PRINCIPAL, new PasswordToken(accumuloMiniClusterDriver.getRootPassword())); job.setJarByClass(getClass()); job.setMapperClass(TestMapper.class); job.setInputFormatClass(TextInputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(EntityWritable.class); job.setOutputFormatClass(EntityOutputFomat.class); FileInputFormat.setInputPaths(job, inputPath); job.submit(); job.waitForCompletion(true); Node query = QueryBuilder.create().and().eq(KEY_1, VAL_1).end().build(); Iterable<Entity> itr = entityStore.query(Collections.singleton(TYPE), query, null, new Auths("A")); List<Entity> queryResults = Lists.newArrayList(itr); assertEquals(countTotalResults,queryResults.size()); }
Example 17
Source File: EventOutputFormatIT.java From accumulo-recipes with Apache License 2.0 | 4 votes |
public void runJob(Job job, EventStore eventStore) throws IOException, AccumuloSecurityException, ClassNotFoundException, InterruptedException, TableExistsException, AccumuloException, TableNotFoundException { File dir = temporaryFolder.newFolder("input"); FileOutputStream fileOutputStream = new FileOutputStream(new File(dir,"uuids.txt")); PrintWriter printWriter = new PrintWriter(fileOutputStream); int countTotalResults = 100; try { for (int i = 0; i < countTotalResults; i++) { printWriter.println(""+i); } } finally { printWriter.flush(); fileOutputStream.close(); } Configuration conf = new Configuration(); FileSystem fs = FileSystem.getLocal(conf); fs.setWorkingDirectory(new Path(dir.getAbsolutePath())); Path inputPath = fs.makeQualified(new Path(dir.getAbsolutePath())); // local path EventOutputFormat.setZooKeeperInstance(job, accumuloMiniClusterDriver.getClientConfiguration()); EventOutputFormat.setConnectorInfo(job, PRINCIPAL, new PasswordToken(accumuloMiniClusterDriver.getRootPassword())); job.setJarByClass(getClass()); job.setMapperClass(TestMapper.class); job.setInputFormatClass(TextInputFormat.class); job.setMapOutputKeyClass(Text.class); job.setMapOutputValueClass(EventWritable.class); job.setOutputFormatClass(EventOutputFormat.class); FileInputFormat.setInputPaths(job, inputPath); job.submit(); job.waitForCompletion(true); Iterable<Event> itr = eventStore.query(new Date(currentTimeMillis() - 25000), new Date(), Collections.singleton(TYPE), QueryBuilder.create().and().eq(KEY_1, VAL_1).end().build(), null, DEFAULT_AUTHS); List<Event> queryResults = Lists.newArrayList(itr); assertEquals(countTotalResults,queryResults.size()); }
Example 18
Source File: IsolationRunner.java From hadoop-gpu with Apache License 2.0 | 4 votes |
/** * Run a single task * @param args the first argument is the task directory */ public static void main(String[] args ) throws ClassNotFoundException, IOException, InterruptedException { if (args.length != 1) { System.out.println("Usage: IsolationRunner <path>/job.xml"); System.exit(1); } File jobFilename = new File(args[0]); if (!jobFilename.exists() || !jobFilename.isFile()) { System.out.println(jobFilename + " is not a valid job file."); System.exit(1); } JobConf conf = new JobConf(new Path(jobFilename.toString())); TaskAttemptID taskId = TaskAttemptID.forName(conf.get("mapred.task.id")); boolean isMap = conf.getBoolean("mapred.task.is.map", true); int partition = conf.getInt("mapred.task.partition", 0); // setup the local and user working directories FileSystem local = FileSystem.getLocal(conf); LocalDirAllocator lDirAlloc = new LocalDirAllocator("mapred.local.dir"); File workDirName = new File(lDirAlloc.getLocalPathToRead( TaskTracker.getLocalTaskDir( taskId.getJobID().toString(), taskId.toString()) + Path.SEPARATOR + "work", conf). toString()); local.setWorkingDirectory(new Path(workDirName.toString())); FileSystem.get(conf).setWorkingDirectory(conf.getWorkingDirectory()); // set up a classloader with the right classpath ClassLoader classLoader = makeClassLoader(conf, workDirName); Thread.currentThread().setContextClassLoader(classLoader); conf.setClassLoader(classLoader); Task task; if (isMap) { Path localSplit = new Path(new Path(jobFilename.toString()).getParent(), "split.dta"); DataInputStream splitFile = FileSystem.getLocal(conf).open(localSplit); String splitClass = Text.readString(splitFile); BytesWritable split = new BytesWritable(); split.readFields(splitFile); splitFile.close(); task = new MapTask(jobFilename.toString(), taskId, partition, splitClass, split); } else { int numMaps = conf.getNumMapTasks(); fillInMissingMapOutputs(local, taskId, numMaps, conf); task = new ReduceTask(jobFilename.toString(), taskId, partition, numMaps); } task.setConf(conf); task.run(conf, new FakeUmbilical()); }