org.apache.hadoop.fs.FSDataOutputStream Java Examples
The following examples show how to use
org.apache.hadoop.fs.FSDataOutputStream.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestOutOfBandAzureBlobOperationsLive.java From big-c with Apache License 2.0 | 7 votes |
@Test public void outOfBandFolder_siblingCreate() throws Exception { // NOTE: manual use of CloubBlockBlob targets working directory explicitly. // WASB driver methods prepend working directory implicitly. String workingDir = "user/" + UserGroupInformation.getCurrentUser().getShortUserName() + "/"; CloudBlockBlob blob = testAccount.getBlobReference(workingDir + "testFolder3/a/input/file"); BlobOutputStream s = blob.openOutputStream(); s.close(); assertTrue(fs.exists(new Path("testFolder3/a/input/file"))); Path targetFile = new Path("testFolder3/a/input/file2"); FSDataOutputStream s2 = fs.create(targetFile); s2.close(); }
Example #2
Source File: TestFiHftp.java From hadoop with Apache License 2.0 | 6 votes |
private static byte[] createFile(FileSystem fs, Path name, long length, short replication, long blocksize) throws IOException { final FSDataOutputStream out = fs.create(name, false, 4096, replication, blocksize); try { for(long n = length; n > 0; ) { ran.nextBytes(buffer); final int w = n < buffer.length? (int)n: buffer.length; out.write(buffer, 0, w); md5.update(buffer, 0, w); n -= w; } } finally { IOUtils.closeStream(out); } return md5.digest(); }
Example #3
Source File: Hdfs.java From pxf with Apache License 2.0 | 6 votes |
private void writeTableToStream(FSDataOutputStream stream, Table dataTable, String delimiter, Charset encoding) throws Exception { BufferedWriter bufferedWriter = new BufferedWriter( new OutputStreamWriter(stream, encoding)); List<List<String>> data = dataTable.getData(); for (int i = 0, flushThreshold = 0; i < data.size(); i++, flushThreshold++) { List<String> row = data.get(i); StringBuilder sBuilder = new StringBuilder(); for (int j = 0; j < row.size(); j++) { sBuilder.append(row.get(j)); if (j != row.size() - 1) { sBuilder.append(delimiter); } } if (i != data.size() - 1) { sBuilder.append("\n"); } bufferedWriter.append(sBuilder.toString()); if (flushThreshold > ROW_BUFFER) { bufferedWriter.flush(); } } bufferedWriter.close(); }
Example #4
Source File: TestBlocksScheduledCounter.java From RDFS with Apache License 2.0 | 6 votes |
public void testBlocksScheduledCounter() throws IOException { MiniDFSCluster cluster = new MiniDFSCluster(new Configuration(), 1, true, null); cluster.waitActive(); FileSystem fs = cluster.getFileSystem(); //open a file an write a few bytes: FSDataOutputStream out = fs.create(new Path("/testBlockScheduledCounter")); for (int i=0; i<1024; i++) { out.write(i); } // flush to make sure a block is allocated. ((DFSOutputStream)(out.getWrappedStream())).sync(); ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>(); cluster.getNameNode().namesystem.DFSNodesStatus(dnList, dnList); DatanodeDescriptor dn = dnList.get(0); assertEquals(1, dn.getBlocksScheduled()); // close the file and the counter should go to zero. out.close(); assertEquals(0, dn.getBlocksScheduled()); }
Example #5
Source File: TestOverwriteFileUnderConstruction.java From hbase with Apache License 2.0 | 6 votes |
@Test public void testOverwrite() throws IOException { Path file = new Path("/" + name.getMethodName()); FSDataOutputStream out1 = FS.create(file); FSDataOutputStream out2 = FS.create(file, true); out1.write(2); out2.write(1); try { out1.close(); // a successful close is also OK for us so no assertion here, we just need to confirm that the // data in the file are correct. } catch (FileNotFoundException fnfe) { // hadoop3 throws one of these. } catch (RemoteException e) { // expected assertThat(e.unwrapRemoteException(), instanceOf(LeaseExpiredException.class)); } out2.close(); try (FSDataInputStream in = FS.open(file)) { assertEquals(1, in.read()); assertEquals(-1, in.read()); } }
Example #6
Source File: CreateHTableJob.java From kylin with Apache License 2.0 | 6 votes |
private void exportHBaseConfiguration(String hbaseTableName) throws IOException { Configuration hbaseConf = HBaseConnection.getCurrentHBaseConfiguration(); HadoopUtil.healSickConfig(hbaseConf); Job job = Job.getInstance(hbaseConf, hbaseTableName); HTable table = new HTable(hbaseConf, hbaseTableName); HFileOutputFormat3.configureIncrementalLoadMap(job, table); logger.info("Saving HBase configuration to {}", hbaseConfPath); FileSystem fs = HadoopUtil.getWorkingFileSystem(); FSDataOutputStream out = null; try { out = fs.create(new Path(hbaseConfPath)); job.getConfiguration().writeXml(out); } finally { IOUtils.closeQuietly(out); } }
Example #7
Source File: TestStickyBit.java From big-c with Apache License 2.0 | 6 votes |
/** * Ensure that even if a file is in a directory with the sticky bit on, * another user can write to that file (assuming correct permissions). */ private void confirmCanAppend(Configuration conf, Path p) throws Exception { // Write a file to the new tmp directory as a regular user Path file = new Path(p, "foo"); writeFile(hdfsAsUser1, file); hdfsAsUser1.setPermission(file, new FsPermission((short) 0777)); // Log onto cluster as another user and attempt to append to file Path file2 = new Path(p, "foo"); FSDataOutputStream h = null; try { h = hdfsAsUser2.append(file2); h.write("Some more data".getBytes()); h.close(); h = null; } finally { IOUtils.cleanup(null, h); } }
Example #8
Source File: TestFileConcurrentReader.java From RDFS with Apache License 2.0 | 6 votes |
/** * test case: if the BlockSender decides there is only one packet to send, * the previous computation of the pktSize based on transferToAllowed * would result in too small a buffer to do the buffer-copy needed * for partial chunks. */ public void testUnfinishedBlockPacketBufferOverrun() throws IOException { // check that / exists Path path = new Path("/"); System.out.println("Path : \"" + path.toString() + "\""); System.out.println(fileSystem.getFileStatus(path).isDir()); assertTrue("/ should be a directory", fileSystem.getFileStatus(path).isDir()); // create a new file in the root, write data, do no close Path file1 = new Path("/unfinished-block"); final FSDataOutputStream stm = TestFileCreation.createFile(fileSystem, file1, 1); // write partial block and sync final int bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512); final int partialBlockSize = bytesPerChecksum - 1; writeFileAndSync(stm, partialBlockSize); // Make sure a client can read it before it is closed checkCanRead(fileSystem, file1, partialBlockSize); stm.close(); }
Example #9
Source File: TestMergeManager.java From tez with Apache License 2.0 | 6 votes |
private SrcFileInfo createFile(Configuration conf, FileSystem fs, Path path, int numPartitions, int numKeysPerPartition, int startKey) throws IOException { FSDataOutputStream outStream = fs.create(path); int currentKey = startKey; SrcFileInfo srcFileInfo = new SrcFileInfo(); srcFileInfo.indexedRecords = new TezIndexRecord[numPartitions]; srcFileInfo.path = path; for (int i = 0; i < numPartitions; i++) { long pos = outStream.getPos(); IFile.Writer writer = new IFile.Writer(conf, outStream, IntWritable.class, IntWritable.class, null, null, null); for (int j = 0; j < numKeysPerPartition; j++) { writer.append(new IntWritable(currentKey), new IntWritable(currentKey)); currentKey++; } writer.close(); srcFileInfo.indexedRecords[i] = new TezIndexRecord(pos, writer.getRawLength(), writer.getCompressedLength()); } outStream.close(); return srcFileInfo; }
Example #10
Source File: HoodieTestUtils.java From hudi with Apache License 2.0 | 6 votes |
public static void createCleanFiles(HoodieTableMetaClient metaClient, String basePath, String instantTime, Configuration configuration) throws IOException { createPendingCleanFiles(metaClient, instantTime); Path commitFile = new Path( basePath + "/" + HoodieTableMetaClient.METAFOLDER_NAME + "/" + HoodieTimeline.makeCleanerFileName(instantTime)); FileSystem fs = FSUtils.getFs(basePath, configuration); try (FSDataOutputStream os = fs.create(commitFile, true)) { HoodieCleanStat cleanStats = new HoodieCleanStat(HoodieCleaningPolicy.KEEP_LATEST_FILE_VERSIONS, DEFAULT_PARTITION_PATHS[rand.nextInt(DEFAULT_PARTITION_PATHS.length)], new ArrayList<>(), new ArrayList<>(), new ArrayList<>(), instantTime); // Create the clean metadata HoodieCleanMetadata cleanMetadata = CleanerUtils.convertCleanMetadata(instantTime, Option.of(0L), Collections.singletonList(cleanStats)); // Write empty clean metadata os.write(TimelineMetadataUtils.serializeCleanMetadata(cleanMetadata).get()); } }
Example #11
Source File: Util.java From spork with Apache License 2.0 | 6 votes |
static public void createInputFile(FileSystem fs, String fileName, String[] inputData) throws IOException { if(Util.WINDOWS){ fileName = fileName.replace('\\','/'); } if(fs.exists(new Path(fileName))) { throw new IOException("File " + fileName + " already exists on the FileSystem"); } FSDataOutputStream stream = fs.create(new Path(fileName)); PrintWriter pw = new PrintWriter(new OutputStreamWriter(stream, "UTF-8")); for (int i=0; i<inputData.length; i++){ pw.print(inputData[i]); pw.print("\n"); } pw.close(); }
Example #12
Source File: TestSwiftFileSystemRename.java From sahara-extra with Apache License 2.0 | 6 votes |
@Test(timeout = SWIFT_TEST_TIMEOUT) public void testRenameFile() throws Exception { assumeRenameSupported(); final Path old = new Path("/test/alice/file"); final Path newPath = new Path("/test/bob/file"); fs.mkdirs(newPath.getParent()); final FSDataOutputStream fsDataOutputStream = fs.create(old); final byte[] message = "Some data".getBytes(); fsDataOutputStream.write(message); fsDataOutputStream.close(); assertTrue(fs.exists(old)); rename(old, newPath, true, false, true); final FSDataInputStream bobStream = fs.open(newPath); final byte[] bytes = new byte[512]; final int read = bobStream.read(bytes); bobStream.close(); final byte[] buffer = new byte[read]; System.arraycopy(bytes, 0, buffer, 0, read); assertEquals(new String(message), new String(buffer)); }
Example #13
Source File: IgniteHadoopFileSystemAbstractSelfTest.java From ignite with Apache License 2.0 | 6 votes |
/** @throws Exception If failed. */ @Test public void testSetWorkingDirectory() throws Exception { Path dir = new Path("/tmp/nested/dir"); Path file = new Path("file"); fs.mkdirs(dir); fs.setWorkingDirectory(dir); FSDataOutputStream os = fs.create(file); os.close(); String filePath = fs.getFileStatus(new Path(dir, file)).getPath().toString(); assertTrue(filePath.contains("/tmp/nested/dir/file")); }
Example #14
Source File: StringToDataSetExportFunction.java From deeplearning4j with Apache License 2.0 | 6 votes |
private void processBatchIfRequired(List<List<Writable>> list, boolean finalRecord) throws Exception { if (list.isEmpty()) return; if (list.size() < batchSize && !finalRecord) return; RecordReader rr = new CollectionRecordReader(list); RecordReaderDataSetIterator iter = new RecordReaderDataSetIterator(rr, null, batchSize, labelIndex, labelIndex, numPossibleLabels, -1, regression); DataSet ds = iter.next(); String filename = "dataset_" + uid + "_" + (outputCount++) + ".bin"; URI uri = new URI(outputDir.getPath() + "/" + filename); Configuration c = conf == null ? DefaultHadoopConfig.get() : conf.getValue().getConfiguration(); FileSystem file = FileSystem.get(uri, c); try (FSDataOutputStream out = file.create(new Path(uri))) { ds.save(out); } list.clear(); }
Example #15
Source File: TestBlockUnderConstruction.java From big-c with Apache License 2.0 | 6 votes |
void writeFile(Path file, FSDataOutputStream stm, int size) throws IOException { long blocksBefore = stm.getPos() / BLOCK_SIZE; TestFileCreation.writeFile(stm, BLOCK_SIZE); // need to make sure the full block is completely flushed to the DataNodes // (see FSOutputSummer#flush) stm.flush(); int blocksAfter = 0; // wait until the block is allocated by DataStreamer BlockLocation[] locatedBlocks; while(blocksAfter <= blocksBefore) { locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations( file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS); blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length; } }
Example #16
Source File: TestEvents.java From big-c with Apache License 2.0 | 6 votes |
private byte[] getEvents() throws Exception { ByteArrayOutputStream output = new ByteArrayOutputStream(); FSDataOutputStream fsOutput = new FSDataOutputStream(output, new FileSystem.Statistics("scheme")); EventWriter writer = new EventWriter(fsOutput); writer.write(getJobPriorityChangedEvent()); writer.write(getJobStatusChangedEvent()); writer.write(getTaskUpdatedEvent()); writer.write(getReduceAttemptKilledEvent()); writer.write(getJobKilledEvent()); writer.write(getSetupAttemptStartedEvent()); writer.write(getTaskAttemptFinishedEvent()); writer.write(getSetupAttemptFieledEvent()); writer.write(getSetupAttemptKilledEvent()); writer.write(getCleanupAttemptStartedEvent()); writer.write(getCleanupAttemptFinishedEvent()); writer.write(getCleanupAttemptFiledEvent()); writer.write(getCleanupAttemptKilledEvent()); writer.flush(); writer.close(); return output.toByteArray(); }
Example #17
Source File: PrestoS3FileSystem.java From presto with Apache License 2.0 | 6 votes |
@Override public FSDataOutputStream create(Path path, FsPermission permission, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { // Ignore the overwrite flag, since Presto always writes to unique file names. // Checking for file existence can break read-after-write consistency. if (!stagingDirectory.exists()) { createDirectories(stagingDirectory.toPath()); } if (!stagingDirectory.isDirectory()) { throw new IOException("Configured staging path is not a directory: " + stagingDirectory); } File tempFile = createTempFile(stagingDirectory.toPath(), "presto-s3-", ".tmp").toFile(); String key = keyFromPath(qualifiedPath(path)); return new FSDataOutputStream( new PrestoS3OutputStream(s3, getBucketName(uri), key, tempFile, sseEnabled, sseType, sseKmsKeyId, multiPartUploadMinFileSize, multiPartUploadMinPartSize, s3AclType, requesterPaysEnabled, s3StorageClass), statistics); }
Example #18
Source File: HadoopIgfs20FileSystemAbstractSelfTest.java From ignite with Apache License 2.0 | 6 votes |
/** @throws Exception If failed. */ @Test public void testSetPermissionCheckNonRecursiveness() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); os.close(); Path tmpDir = new Path(fsHome, "/tmp"); FsPermission perm = new FsPermission((short)123); fs.setPermission(tmpDir, perm); assertEquals(perm, fs.getFileStatus(tmpDir).getPermission()); assertEquals(FsPermission.getDefault(), fs.getFileStatus(file).getPermission()); }
Example #19
Source File: TestTFileSplit.java From hadoop-gpu with Apache License 2.0 | 6 votes |
void createFile(int count, String compress) throws IOException { conf = new Configuration(); path = new Path(ROOT, outputFile + "." + compress); fs = path.getFileSystem(conf); FSDataOutputStream out = fs.create(path); Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf); int nx; for (nx = 0; nx < count; nx++) { byte[] key = composeSortedKey(KEY, count, nx).getBytes(); byte[] value = (VALUE + nx).getBytes(); writer.append(key, value); } writer.close(); out.close(); }
Example #20
Source File: PersistedHDFSManagerTest.java From Knowage-Server with GNU Affero General Public License v3.0 | 6 votes |
@Test public void testPersistDataStore() { IDataStore dataStore = Mockito.mock(IDataStore.class); IMetaData metaData = Mockito.mock(IMetaData.class); IRecord record = Mockito.mock(IRecord.class); IField fieldInt = Mockito.mock(IField.class); IField fieldStr = Mockito.mock(IField.class); Mockito.when(dataStore.getMetaData()).thenReturn(metaData); Mockito.when(dataStore.getRecordAt(Mockito.anyInt())).thenReturn(record); Mockito.when(dataStore.getRecordsCount()).thenReturn(10L); Mockito.when(metaData.getFieldCount()).thenReturn(2); Mockito.when(metaData.getFieldName(1)).thenReturn("column_Int"); Mockito.when(metaData.getFieldName(2)).thenReturn("column_Str"); Mockito.when(metaData.getFieldType(1)).thenReturn(Integer.class); Mockito.when(metaData.getFieldType(2)).thenReturn(String.class); Mockito.when(record.getFieldAt(1)).thenReturn(fieldInt); Mockito.when(record.getFieldAt(2)).thenReturn(fieldStr); Mockito.when(fieldInt.getValue()).thenReturn(new Integer(1)); Mockito.when(fieldStr.getValue()).thenReturn(new String("test")); FSDataOutputStream fsOS = (FSDataOutputStream) hdfsManager.persistDataStore(dataStore, "test_table", "signature_xyz"); assertNotNull(fsOS); assertEquals(fsOS.size(), 232); }
Example #21
Source File: HDFSStorage.java From incubator-heron with Apache License 2.0 | 6 votes |
@Override public void storeCheckpoint(CheckpointInfo info, Checkpoint checkpoint) throws StatefulStorageException { Path path = new Path(getCheckpointPath(info.getCheckpointId(), info.getComponent(), info.getInstanceId())); // We need to ensure the existence of directories structure, // since it is not guaranteed that FileSystem.create(..) always creates parents' dirs. String checkpointDir = getCheckpointDir(info.getCheckpointId(), info.getComponent()); createDir(checkpointDir); FSDataOutputStream out = null; try { out = fileSystem.create(path); checkpoint.getCheckpoint().writeTo(out); } catch (IOException e) { throw new StatefulStorageException("Failed to persist", e); } finally { SysUtils.closeIgnoringExceptions(out); } }
Example #22
Source File: TestDatanodeDeath.java From big-c with Apache License 2.0 | 6 votes |
@Override public void run() { System.out.println("Workload starting "); for (int i = 0; i < numberOfFiles; i++) { Path filename = new Path(id + "." + i); try { System.out.println("Workload processing file " + filename); FSDataOutputStream stm = createFile(fs, filename, replication); DFSOutputStream dfstream = (DFSOutputStream) (stm.getWrappedStream()); dfstream.setArtificialSlowdown(1000); writeFile(stm, myseed); stm.close(); checkFile(fs, filename, replication, numBlocks, fileSize, myseed); } catch (Throwable e) { System.out.println("Workload exception " + e); assertTrue(e.toString(), false); } // increment the stamp to indicate that another file is done. synchronized (this) { stamp++; } } }
Example #23
Source File: HadoopIgfs20FileSystemAbstractSelfTest.java From ignite with Apache License 2.0 | 6 votes |
/** @throws Exception If failed. */ @Test public void testSetOwnerCheckNonRecursiveness() throws Exception { Path fsHome = new Path(primaryFsUri); Path file = new Path(fsHome, "/tmp/my"); FSDataOutputStream os = fs.create(file, EnumSet.noneOf(CreateFlag.class), Options.CreateOpts.perms(FsPermission.getDefault())); os.close(); Path tmpDir = new Path(fsHome, "/tmp"); fs.setOwner(file, "fUser", "fGroup"); fs.setOwner(tmpDir, "dUser", "dGroup"); assertEquals("dUser", fs.getFileStatus(tmpDir).getOwner()); assertEquals("dGroup", fs.getFileStatus(tmpDir).getGroup()); assertEquals("fUser", fs.getFileStatus(file).getOwner()); assertEquals("fGroup", fs.getFileStatus(file).getGroup()); }
Example #24
Source File: TestVLong.java From RDFS with Apache License 2.0 | 6 votes |
private long writeAndVerify(int shift) throws IOException { FSDataOutputStream out = fs.create(path); for (int i = Short.MIN_VALUE; i <= Short.MAX_VALUE; ++i) { Utils.writeVLong(out, ((long) i) << shift); } out.close(); FSDataInputStream in = fs.open(path); for (int i = Short.MIN_VALUE; i <= Short.MAX_VALUE; ++i) { long n = Utils.readVLong(in); Assert.assertEquals(n, ((long) i) << shift); } in.close(); long ret = fs.getFileStatus(path).getLen(); fs.delete(path, false); return ret; }
Example #25
Source File: RegexBulkLoadToolIT.java From phoenix with Apache License 2.0 | 5 votes |
@Test public void testBasicImport() throws Exception { Statement stmt = conn.createStatement(); stmt.execute("CREATE TABLE S.TABLE1 (ID INTEGER NOT NULL PRIMARY KEY, NAME VARCHAR, T DATE) SPLIT ON (1,2)"); FileSystem fs = FileSystem.get(getUtility().getConfiguration()); FSDataOutputStream outputStream = fs.create(new Path("/tmp/input1.csv")); PrintWriter printWriter = new PrintWriter(outputStream); printWriter.println("1,Name 1,1970/01/01"); printWriter.println("2,Name 2,1970/01/02"); printWriter.close(); RegexBulkLoadTool regexBulkLoadTool = new RegexBulkLoadTool(); regexBulkLoadTool.setConf(getUtility().getConfiguration()); regexBulkLoadTool.getConf().set(DATE_FORMAT_ATTRIB,"yyyy/MM/dd"); int exitCode = regexBulkLoadTool.run(new String[] { "--input", "/tmp/input1.csv", "--table", "table1", "--schema", "s", "--regex", "([^,]*),([^,]*),([^,]*)", "--zookeeper", zkQuorum}); assertEquals(0, exitCode); ResultSet rs = stmt.executeQuery("SELECT id, name, t FROM s.table1 ORDER BY id"); assertTrue(rs.next()); assertEquals(1, rs.getInt(1)); assertEquals("Name 1", rs.getString(2)); assertEquals(DateUtil.parseDate("1970-01-01"), rs.getDate(3)); assertTrue(rs.next()); assertEquals(2, rs.getInt(1)); assertEquals("Name 2", rs.getString(2)); assertEquals(DateUtil.parseDate("1970-01-02"), rs.getDate(3)); assertFalse(rs.next()); rs.close(); stmt.close(); }
Example #26
Source File: AbstractFileOutputOperator.java From attic-apex-malhar with Apache License 2.0 | 5 votes |
/** * Opens the stream for the specified file path in either append mode or create mode. * * @param filepath this is the path of either the actual file or the corresponding temporary file. * @param append true for opening the file in append mode; false otherwise. * @return output stream. * @throws IOException */ protected FSDataOutputStream openStream(Path filepath, boolean append) throws IOException { FSDataOutputStream fsOutput; if (append) { fsOutput = openStreamInAppendMode(filepath); } else { fsOutput = fs.create(filepath, (short)replication); fs.setPermission(filepath, FsPermission.createImmutable(filePermission)); } return fsOutput; }
Example #27
Source File: TestCopyMapper.java From hadoop with Apache License 2.0 | 5 votes |
/** * Append specified length of bytes to a given file */ private static void appendFile(Path p, int length) throws IOException { byte[] toAppend = new byte[length]; Random random = new Random(); random.nextBytes(toAppend); FSDataOutputStream out = cluster.getFileSystem().append(p); try { out.write(toAppend); } finally { IOUtils.closeStream(out); } }
Example #28
Source File: IFile.java From hadoop with Apache License 2.0 | 5 votes |
public Writer(Configuration conf, FSDataOutputStream out, Class<K> keyClass, Class<V> valueClass, CompressionCodec codec, Counters.Counter writesCounter, boolean ownOutputStream) throws IOException { this.writtenRecordsCounter = writesCounter; this.checksumOut = new IFileOutputStream(out); this.rawOut = out; this.start = this.rawOut.getPos(); if (codec != null) { this.compressor = CodecPool.getCompressor(codec); if (this.compressor != null) { this.compressor.reset(); this.compressedOut = codec.createOutputStream(checksumOut, compressor); this.out = new FSDataOutputStream(this.compressedOut, null); this.compressOutput = true; } else { LOG.warn("Could not obtain compressor from CodecPool"); this.out = new FSDataOutputStream(checksumOut,null); } } else { this.out = new FSDataOutputStream(checksumOut,null); } this.keyClass = keyClass; this.valueClass = valueClass; if (keyClass != null) { SerializationFactory serializationFactory = new SerializationFactory(conf); this.keySerializer = serializationFactory.getSerializer(keyClass); this.keySerializer.open(buffer); this.valueSerializer = serializationFactory.getSerializer(valueClass); this.valueSerializer.open(buffer); } this.ownOutputStream = ownOutputStream; }
Example #29
Source File: TestLargeBlock.java From RDFS with Apache License 2.0 | 5 votes |
/** * Writes pattern to file */ static void writeFile(FSDataOutputStream stm, final long fileSize) throws IOException { final int writeSize = pattern.length * 8 * 1024 * 1024; // write in chunks of 64 MB final int writeCount = (int) ((fileSize / ((long) writeSize)) + ((fileSize % ((long) writeSize) == 0L) ? 0L : 1L)); if (writeSize > Integer.MAX_VALUE) { throw new IOException("A single write is too large " + writeSize); } long bytesToWrite = fileSize; byte[] b = new byte[writeSize]; // initialize buffer for (int j = 0; j < writeSize; j++) { b[j] = pattern[j % pattern.length]; } int i = 0; while (bytesToWrite > 0) { int thiswrite = (int) Math.min(writeSize, bytesToWrite); // how many bytes we are writing in this iteration stm.write(b, 0, thiswrite); // System.out.println("Wrote[" + i + "/" + writeCount + "] " + thiswrite + " bytes."); bytesToWrite -= thiswrite; i++; } }
Example #30
Source File: HdfsDirFile.java From spliceengine with GNU Affero General Public License v3.0 | 5 votes |
@Override public boolean createNewFile() throws IOException { FSDataOutputStream os = null; try { FileSystem fs = getFileSystem(); os = fs.create(new Path(path), false); return true; } catch (IOException e) { LOG.error(String.format("An exception occurred while creating the path '%s'.", path), e); return false; } finally { if (os != null) { os.close(); } } }