Java Code Examples for org.apache.lucene.util.IOUtils#fsync()
The following examples show how to use
org.apache.lucene.util.IOUtils#fsync() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: FsBlobContainer.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { final Path file = path.resolve(blobName); try (OutputStream outputStream = Files.newOutputStream(file)) { Streams.copy(inputStream, outputStream, new byte[blobStore.bufferSizeInBytes()]); } IOUtils.fsync(file, false); IOUtils.fsync(path, true); }
Example 2
Source File: FsBlobContainer.java From Elasticsearch with Apache License 2.0 | 5 votes |
@Override public void writeBlob(String blobName, BytesReference data) throws IOException { final Path file = path.resolve(blobName); try (OutputStream outputStream = Files.newOutputStream(file)) { data.writeTo(outputStream); } IOUtils.fsync(file, false); IOUtils.fsync(path, true); }
Example 3
Source File: MultiDataPathUpgrader.java From Elasticsearch with Apache License 2.0 | 5 votes |
private void upgradeFiles(ShardId shard, ShardPath targetPath, final Path targetDir, String folderName, Path[] paths) throws IOException { List<Path> movedFiles = new ArrayList<>(); for (Path path : paths) { if (path.equals(targetPath.getDataPath()) == false) { final Path sourceDir = path.resolve(folderName); if (Files.exists(sourceDir)) { logger.info("{} upgrading [{}] from [{}] to [{}]", shard, folderName, sourceDir, targetDir); try (DirectoryStream<Path> stream = Files.newDirectoryStream(sourceDir)) { Files.createDirectories(targetDir); for (Path file : stream) { if (IndexWriter.WRITE_LOCK_NAME.equals(file.getFileName().toString()) || Files.isDirectory(file)) { continue; // skip write.lock } logger.info("{} move file [{}] size: [{}]", shard, file.getFileName(), Files.size(file)); final Path targetFile = targetDir.resolve(file.getFileName()); /* We are pessimistic and do a copy first to the other path and then and atomic move to rename it such that in the worst case the file exists twice but is never lost or half written.*/ final Path targetTempFile = Files.createTempFile(targetDir, "upgrade_", "_" + file.getFileName().toString()); Files.copy(file, targetTempFile, StandardCopyOption.COPY_ATTRIBUTES, StandardCopyOption.REPLACE_EXISTING); Files.move(targetTempFile, targetFile, StandardCopyOption.ATOMIC_MOVE); // we are on the same FS - this must work otherwise all bets are off Files.delete(file); movedFiles.add(targetFile); } } } } } if (movedFiles.isEmpty() == false) { // fsync later it might be on disk already logger.info("{} fsync files", shard); for (Path moved : movedFiles) { logger.info("{} syncing [{}]", shard, moved.getFileName()); IOUtils.fsync(moved, false); } logger.info("{} syncing directory [{}]", shard, targetDir); IOUtils.fsync(targetDir, true); } }
Example 4
Source File: FSDirectory.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public void syncMetaData() throws IOException { // TODO: to improve listCommits(), IndexFileDeleter could call this after deleting segments_Ns ensureOpen(); IOUtils.fsync(directory, true); maybeDeletePendingFiles(); }
Example 5
Source File: DistribPackageStore.java From lucene-solr with Apache License 2.0 | 5 votes |
/** * Internal API */ public static void _persistToFile(Path solrHome, String path, ByteBuffer data, ByteBuffer meta) throws IOException { Path realpath = _getRealPath(path, solrHome); File file = realpath.toFile(); File parent = file.getParentFile(); if (!parent.exists()) { parent.mkdirs(); } @SuppressWarnings({"rawtypes"}) Map m = (Map) Utils.fromJSON(meta.array(), meta.arrayOffset(), meta.limit()); if (m == null || m.isEmpty()) { throw new SolrException(SERVER_ERROR, "invalid metadata , discarding : " + path); } File metdataFile = _getRealPath(_getMetapath(path), solrHome).toFile(); try (FileOutputStream fos = new FileOutputStream(metdataFile)) { fos.write(meta.array(), 0, meta.limit()); } IOUtils.fsync(metdataFile.toPath(), false); try (FileOutputStream fos = new FileOutputStream(file)) { fos.write(data.array(), 0, data.limit()); } IOUtils.fsync(file.toPath(), false); }
Example 6
Source File: Translog.java From Elasticsearch with Apache License 2.0 | 4 votes |
/** recover all translog files found on disk */ private final ArrayList<ImmutableTranslogReader> recoverFromFiles(TranslogGeneration translogGeneration, Checkpoint checkpoint) throws IOException { boolean success = false; ArrayList<ImmutableTranslogReader> foundTranslogs = new ArrayList<>(); final Path tempFile = Files.createTempFile(location, TRANSLOG_FILE_PREFIX, TRANSLOG_FILE_SUFFIX); // a temp file to copy checkpoint to - note it must be in on the same FS otherwise atomic move won't work boolean tempFileRenamed = false; try (ReleasableLock lock = writeLock.acquire()) { logger.debug("open uncommitted translog checkpoint {}", checkpoint); final String checkpointTranslogFile = getFilename(checkpoint.generation); for (long i = translogGeneration.translogFileGeneration; i < checkpoint.generation; i++) { Path committedTranslogFile = location.resolve(getFilename(i)); if (Files.exists(committedTranslogFile) == false) { throw new IllegalStateException("translog file doesn't exist with generation: " + i + " lastCommitted: " + lastCommittedTranslogFileGeneration + " checkpoint: " + checkpoint.generation + " - translog ids must be consecutive"); } final ImmutableTranslogReader reader = openReader(committedTranslogFile, Checkpoint.read(location.resolve(getCommitCheckpointFileName(i)))); foundTranslogs.add(reader); logger.debug("recovered local translog from checkpoint {}", checkpoint); } foundTranslogs.add(openReader(location.resolve(checkpointTranslogFile), checkpoint)); Path commitCheckpoint = location.resolve(getCommitCheckpointFileName(checkpoint.generation)); if (Files.exists(commitCheckpoint)) { Checkpoint checkpointFromDisk = Checkpoint.read(commitCheckpoint); if (checkpoint.equals(checkpointFromDisk) == false) { throw new IllegalStateException("Checkpoint file " + commitCheckpoint.getFileName() + " already exists but has corrupted content expected: " + checkpoint + " but got: " + checkpointFromDisk); } } else { // we first copy this into the temp-file and then fsync it followed by an atomic move into the target file // that way if we hit a disk-full here we are still in an consistent state. Files.copy(location.resolve(CHECKPOINT_FILE_NAME), tempFile, StandardCopyOption.REPLACE_EXISTING); IOUtils.fsync(tempFile, false); Files.move(tempFile, commitCheckpoint, StandardCopyOption.ATOMIC_MOVE); tempFileRenamed = true; // we only fsync the directory the tempFile was already fsynced IOUtils.fsync(commitCheckpoint.getParent(), true); } success = true; } finally { if (success == false) { IOUtils.closeWhileHandlingException(foundTranslogs); } if (tempFileRenamed == false) { try { Files.delete(tempFile); } catch (IOException ex) { logger.warn("failed to delete temp file {}", ex, tempFile); } } } return foundTranslogs; }
Example 7
Source File: FSDirectory.java From lucene-solr with Apache License 2.0 | 4 votes |
protected void fsync(String name) throws IOException { IOUtils.fsync(directory.resolve(name), false); }