org.apache.lucene.store.NoLockFactory Java Examples
The following examples show how to use
org.apache.lucene.store.NoLockFactory.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: TestUtil.java From lucene-solr with Apache License 2.0 | 6 votes |
/** If failFast is true, then throw the first exception when index corruption is hit, instead of moving on to other fields/segments to * look for any other corruption. */ public static CheckIndex.Status checkIndex(Directory dir, boolean doSlowChecks, boolean failFast, ByteArrayOutputStream output) throws IOException { if (output == null) { output = new ByteArrayOutputStream(1024); } // TODO: actually use the dir's locking, unless test uses a special method? // some tests e.g. exception tests become much more complicated if they have to close the writer try (CheckIndex checker = new CheckIndex(dir, NoLockFactory.INSTANCE.obtainLock(dir, "bogus"))) { checker.setDoSlowChecks(doSlowChecks); checker.setFailFast(failFast); checker.setInfoStream(new PrintStream(output, false, IOUtils.UTF_8), false); CheckIndex.Status indexStatus = checker.checkIndex(null); if (indexStatus == null || indexStatus.clean == false) { System.out.println("CheckIndex failed"); System.out.println(output.toString(IOUtils.UTF_8)); throw new RuntimeException("CheckIndex failed"); } else { if (LuceneTestCase.INFOSTREAM) { System.out.println(output.toString(IOUtils.UTF_8)); } return indexStatus; } } }
Example #2
Source File: MixedDirectory.java From RDFS with Apache License 2.0 | 6 votes |
public MixedDirectory(FileSystem readFs, Path readPath, FileSystem writeFs, Path writePath, Configuration conf) throws IOException { try { readDir = new FileSystemDirectory(readFs, readPath, false, conf); // check writeFS is a local FS? writeDir = FSDirectory.getDirectory(writePath.toString()); } catch (IOException e) { try { close(); } catch (IOException e1) { // ignore this one, throw the original one } throw e; } lockFactory = new NoLockFactory(); }
Example #3
Source File: StandardDirectoryFactory.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override protected LockFactory createLockFactory(String rawLockType) throws IOException { if (null == rawLockType) { rawLockType = DirectoryFactory.LOCK_TYPE_NATIVE; log.warn("No lockType configured, assuming '{}'.", rawLockType); } final String lockType = rawLockType.toLowerCase(Locale.ROOT).trim(); switch (lockType) { case DirectoryFactory.LOCK_TYPE_SIMPLE: return SimpleFSLockFactory.INSTANCE; case DirectoryFactory.LOCK_TYPE_NATIVE: return NativeFSLockFactory.INSTANCE; case DirectoryFactory.LOCK_TYPE_SINGLE: return new SingleInstanceLockFactory(); case DirectoryFactory.LOCK_TYPE_NONE: return NoLockFactory.INSTANCE; default: throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unrecognized lockType: " + rawLockType); } }
Example #4
Source File: HdfsDirectoryFactory.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override protected LockFactory createLockFactory(String rawLockType) throws IOException { if (null == rawLockType) { rawLockType = DirectoryFactory.LOCK_TYPE_HDFS; log.warn("No lockType configured, assuming '{}'.", rawLockType); } final String lockType = rawLockType.toLowerCase(Locale.ROOT).trim(); switch (lockType) { case DirectoryFactory.LOCK_TYPE_HDFS: return HdfsLockFactory.INSTANCE; case DirectoryFactory.LOCK_TYPE_SINGLE: return new SingleInstanceLockFactory(); case DirectoryFactory.LOCK_TYPE_NONE: return NoLockFactory.INSTANCE; default: throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unrecognized lockType: " + rawLockType); } }
Example #5
Source File: MixedDirectory.java From hadoop-gpu with Apache License 2.0 | 6 votes |
public MixedDirectory(FileSystem readFs, Path readPath, FileSystem writeFs, Path writePath, Configuration conf) throws IOException { try { readDir = new FileSystemDirectory(readFs, readPath, false, conf); // check writeFS is a local FS? writeDir = FSDirectory.getDirectory(writePath.toString()); } catch (IOException e) { try { close(); } catch (IOException e1) { // ignore this one, throw the original one } throw e; } lockFactory = new NoLockFactory(); }
Example #6
Source File: TestCrash.java From lucene-solr with Apache License 2.0 | 6 votes |
public void testCrashAfterCloseNoWait() throws IOException { Random random = random(); MockDirectoryWrapper dir = newMockDirectory(random, NoLockFactory.INSTANCE); IndexWriter writer = initIndex(random, dir, false, false); try { writer.commit(); } finally { writer.close(); } dir.crash(); /* String[] l = dir.list(); Arrays.sort(l); for(int i=0;i<l.length;i++) System.out.println("file " + i + " = " + l[i] + " " + dir.fileLength(l[i]) + " bytes"); */ IndexReader reader = DirectoryReader.open(dir); assertEquals(157, reader.numDocs()); reader.close(); dir.close(); }
Example #7
Source File: TestIndexWriter.java From lucene-solr with Apache License 2.0 | 5 votes |
public void testEmptyFSDirWithNoLock() throws Exception { // Tests that if FSDir is opened w/ a NoLockFactory (or SingleInstanceLF), // then IndexWriter ctor succeeds. Previously (LUCENE-2386) it failed // when listAll() was called in IndexFileDeleter. Directory dir = newFSDirectory(createTempDir("emptyFSDirNoLock"), NoLockFactory.INSTANCE); new IndexWriter(dir, newIndexWriterConfig(new MockAnalyzer(random()))).close(); dir.close(); }
Example #8
Source File: ProductIndex.java From arcusplatform with Apache License 2.0 | 5 votes |
public ProductIndex(ProductCatalog prodcat) throws IOException { this.prodcat = prodcat; dir = new RAMDirectory(NoLockFactory.INSTANCE); Analyzer analyzer = new SimpleAnalyzer(); IndexWriterConfig iwc = new IndexWriterConfig(analyzer); iwc.setOpenMode(OpenMode.CREATE); IndexWriter iw = new IndexWriter(dir, iwc); indexProducts(iw, prodcat); iw.close(); }
Example #9
Source File: HdfsBackupRepository.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public void copyFileFrom(Directory sourceDir, String fileName, URI dest) throws IOException { try (HdfsDirectory dir = new HdfsDirectory(new Path(dest), NoLockFactory.INSTANCE, hdfsConfig, copyBufferSize)) { dir.copyFrom(sourceDir, fileName, fileName, DirectoryFactory.IOCONTEXT_NO_CACHE); } }
Example #10
Source File: HdfsBackupRepository.java From lucene-solr with Apache License 2.0 | 5 votes |
@Override public void copyFileTo(URI sourceRepo, String fileName, Directory dest) throws IOException { try (HdfsDirectory dir = new HdfsDirectory(new Path(sourceRepo), NoLockFactory.INSTANCE, hdfsConfig, copyBufferSize)) { dest.copyFrom(dir, fileName, fileName, DirectoryFactory.IOCONTEXT_NO_CACHE); } }
Example #11
Source File: GenericBlurRecordWriter.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
private void copyAndOptimizeInFlightDir() throws IOException { CopyRateDirectory copyRateDirectory = new CopyRateDirectory(_finalDir, _copyRateCounter); copyRateDirectory.setLockFactory(NoLockFactory.getNoLockFactory()); DirectoryReader reader = DirectoryReader.open(_localDir); IndexWriter writer = new IndexWriter(copyRateDirectory, _conf.clone()); writer.addIndexes(reader); writer.setCommitData(getInternalMarker()); writer.close(); rm(_localPath); }
Example #12
Source File: HdfsDirectory.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
public HdfsDirectory(Configuration configuration, Path path, SequentialReadControl sequentialReadControl, boolean resourceTracking) throws IOException { _resourceTracking = resourceTracking; if (sequentialReadControl == null) { _sequentialReadControl = new SequentialReadControl(new BlurConfiguration()); } else { _sequentialReadControl = sequentialReadControl; } _fileSystem = path.getFileSystem(configuration); _path = _fileSystem.makeQualified(path); if (_path.toUri().getScheme().equals(HDFS_SCHEMA)) { _asyncClosing = true; } else { _asyncClosing = false; } _fileSystem.mkdirs(path); setLockFactory(NoLockFactory.getNoLockFactory()); synchronized (_metricsGroupMap) { URI uri = _fileSystem.getUri(); MetricsGroup metricsGroup = _metricsGroupMap.get(uri); if (metricsGroup == null) { String scope = uri.toString(); metricsGroup = createNewMetricsGroup(scope); _metricsGroupMap.put(uri, metricsGroup); } _metricsGroup = metricsGroup; } if (_useCache) { _fileStatusCache = new FStatusCache(_fileSystem, _path); if (!_fileStatusCache.loadCacheFromManifest()) { FileStatus[] listStatus = _fileSystem.listStatus(_path); addToCache(listStatus); } } else { _fileStatusCache = null; } }
Example #13
Source File: FastHdfsKeyValueDirectory.java From incubator-retired-blur with Apache License 2.0 | 5 votes |
public FastHdfsKeyValueDirectory(boolean readOnly, Timer hdfsKeyValueTimer, Configuration configuration, Path path, long maxAmountAllowedPerFile, long maxOpenForWriting) throws IOException { _path = path; _readOnly = readOnly; _store = new HdfsKeyValueStore(readOnly, hdfsKeyValueTimer, configuration, path, maxAmountAllowedPerFile, maxOpenForWriting); MemoryLeakDetector.record(_store, "HdfsKeyValueStore", path.toString()); BytesRef value = new BytesRef(); if (_store.get(FILES, value)) { String filesString = value.utf8ToString(); // System.out.println("Open Files String [" + filesString + "]"); if (!filesString.isEmpty()) { String[] files = filesString.split("\\" + SEP); for (String file : files) { if (file.isEmpty()) { throw new IOException("Empty file names should not occur [" + filesString + "]"); } BytesRef key = new BytesRef(file + LENGTH); if (_store.get(key, value)) { _files.put(file, Long.parseLong(value.utf8ToString())); } else { LOG.warn(MISSING_METADATA_MESSAGE, file); } } } } setLockFactory(NoLockFactory.getNoLockFactory()); if (!_readOnly) { writeFileNamesAndSync(); gc(); } }
Example #14
Source File: MixedDirectory.java From hadoop-gpu with Apache License 2.0 | 4 votes |
MixedDirectory(Directory readDir, Directory writeDir) throws IOException { this.readDir = readDir; this.writeDir = writeDir; lockFactory = new NoLockFactory(); }
Example #15
Source File: MixedDirectory.java From RDFS with Apache License 2.0 | 4 votes |
MixedDirectory(Directory readDir, Directory writeDir) throws IOException { this.readDir = readDir; this.writeDir = writeDir; lockFactory = new NoLockFactory(); }
Example #16
Source File: IndexStoreTests.java From crate with Apache License 2.0 | 4 votes |
private void doTestStoreDirectory(Index index, Path tempDir, String typeSettingValue, IndexModule.Type type) throws IOException { Settings.Builder settingsBuilder = Settings.builder() .put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT); if (typeSettingValue != null) { settingsBuilder.put(IndexModule.INDEX_STORE_TYPE_SETTING.getKey(), typeSettingValue); } Settings settings = settingsBuilder.build(); IndexSettings indexSettings = IndexSettingsModule.newIndexSettings("foo", settings); FsDirectoryService service = new FsDirectoryService( indexSettings, new ShardPath(false, tempDir, tempDir, new ShardId(index, 0))); try (Directory directory = service.newFSDirectory(tempDir, NoLockFactory.INSTANCE)) { switch (type) { case HYBRIDFS: assertHybridDirectory(directory); break; case NIOFS: assertTrue(type + " " + directory.toString(), directory instanceof NIOFSDirectory); break; case MMAPFS: assertTrue(type + " " + directory.toString(), directory instanceof MMapDirectory); break; case SIMPLEFS: assertTrue(type + " " + directory.toString(), directory instanceof SimpleFSDirectory); break; case FS: if (Constants.JRE_IS_64BIT && MMapDirectory.UNMAP_SUPPORTED) { assertHybridDirectory(directory); } else if (Constants.WINDOWS) { assertTrue(directory.toString(), directory instanceof SimpleFSDirectory); } else { assertTrue(directory.toString(), directory instanceof NIOFSDirectory); } break; default: fail(); } } }
Example #17
Source File: HdfsDirectoryFactoryTest.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test public void testLocalityReporter() throws Exception { Random r = random(); try(HdfsDirectoryFactory factory = new HdfsDirectoryFactory()) { SolrMetricManager metricManager = new SolrMetricManager(); String registry = TestUtil.randomSimpleString(r, 2, 10); String scope = TestUtil.randomSimpleString(r, 2, 10); Map<String, String> props = new HashMap<>(); props.put(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr"); props.put(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false"); props.put(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_ENABLE, "false"); props.put(HdfsDirectoryFactory.LOCALITYMETRICS_ENABLED, "true"); factory.init(new NamedList<>(props)); factory.initializeMetrics(new SolrMetricsContext(metricManager, registry, "foo"), scope); // get the metrics map for the locality bean MetricsMap metrics = (MetricsMap) ((SolrMetricManager.GaugeWrapper) metricManager.registry(registry).getMetrics().get("OTHER." + scope + ".hdfsLocality")).getGauge(); // We haven't done anything, so there should be no data Map<String, Object> statistics = metrics.getValue(); assertEquals("Saw bytes that were not written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), 0L, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL)); assertEquals( "Counted bytes as local when none written: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO), 0, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_RATIO)); // create a directory and a file String path = HdfsTestUtil.getURI(dfsCluster) + "/solr3/"; try (Directory dir = factory.create(path, NoLockFactory.INSTANCE, DirContext.DEFAULT)) { try (IndexOutput writer = dir.createOutput("output", null)) { writer.writeLong(42L); } final long long_bytes = Long.SIZE / Byte.SIZE; // no locality because hostname not set factory.setHost("bogus"); statistics = metrics.getValue(); assertEquals("Wrong number of total bytes counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL), long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_TOTAL)); assertEquals("Wrong number of total blocks counted: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL), 1, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_TOTAL)); assertEquals( "Counted block as local when bad hostname set: " + statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL), 0, statistics.get(HdfsLocalityReporter.LOCALITY_BLOCKS_LOCAL)); // set hostname and check again factory.setHost("127.0.0.1"); statistics = metrics.getValue(); assertEquals( "Did not count block as local after setting hostname: " + statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL), long_bytes, statistics.get(HdfsLocalityReporter.LOCALITY_BYTES_LOCAL)); } } }
Example #18
Source File: HdfsDirectoryFactoryTest.java From lucene-solr with Apache License 2.0 | 4 votes |
@Test @SuppressWarnings({"try"}) public void testInitArgsOrSysPropConfig() throws Exception { try(HdfsDirectoryFactory hdfsFactory = new HdfsDirectoryFactory()) { // test sys prop config System.setProperty(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr1"); hdfsFactory.init(new NamedList<>()); String dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor()); assertTrue(dataHome.endsWith("/solr1/mock/data")); System.clearProperty(HdfsDirectoryFactory.HDFS_HOME); // test init args config NamedList<Object> nl = new NamedList<>(); nl.add(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr2"); hdfsFactory.init(nl); dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor()); assertTrue(dataHome.endsWith("/solr2/mock/data")); // test sys prop and init args config - init args wins System.setProperty(HdfsDirectoryFactory.HDFS_HOME, HdfsTestUtil.getURI(dfsCluster) + "/solr1"); hdfsFactory.init(nl); dataHome = hdfsFactory.getDataHome(new MockCoreDescriptor()); assertTrue(dataHome.endsWith("/solr2/mock/data")); System.clearProperty(HdfsDirectoryFactory.HDFS_HOME); // set conf dir by sys prop Path confDir = createTempDir(); System.setProperty(HdfsDirectoryFactory.CONFIG_DIRECTORY, confDir.toString()); try (Directory dir = hdfsFactory .create(HdfsTestUtil.getURI(dfsCluster) + "/solr", NoLockFactory.INSTANCE, DirContext.DEFAULT)) { assertEquals(confDir.toString(), hdfsFactory.getConfDir()); } // check bool and int getConf impls nl = new NamedList<>(); nl.add(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 4); System.setProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, "3"); nl.add(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, true); System.setProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "false"); hdfsFactory.init(nl); assertEquals(4, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0)); assertTrue(hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false)); nl = new NamedList<>(); hdfsFactory.init(nl); System.setProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, "true"); assertEquals(3, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0)); assertTrue(hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false)); System.clearProperty(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB); System.clearProperty(HdfsDirectoryFactory.BLOCKCACHE_ENABLED); assertEquals(0, hdfsFactory.getConfig(HdfsDirectoryFactory.NRTCACHINGDIRECTORY_MAXMERGESIZEMB, 0)); assertFalse(hdfsFactory.getConfig(HdfsDirectoryFactory.BLOCKCACHE_ENABLED, false)); } }
Example #19
Source File: LocalFileSystemRepository.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override public void copyFileTo(URI sourceDir, String fileName, Directory dest) throws IOException { try (FSDirectory dir = new NIOFSDirectory(Paths.get(sourceDir), NoLockFactory.INSTANCE)) { dest.copyFrom(dir, fileName, fileName, DirectoryFactory.IOCONTEXT_NO_CACHE); } }
Example #20
Source File: LocalFileSystemRepository.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override public void copyFileFrom(Directory sourceDir, String fileName, URI dest) throws IOException { try (FSDirectory dir = new NIOFSDirectory(Paths.get(dest), NoLockFactory.INSTANCE)) { dir.copyFrom(sourceDir, fileName, fileName, DirectoryFactory.IOCONTEXT_NO_CACHE); } }
Example #21
Source File: LocalFileSystemRepository.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override public String[] listAll(URI dirPath) throws IOException { try (FSDirectory dir = new NIOFSDirectory(Paths.get(dirPath), NoLockFactory.INSTANCE)) { return dir.listAll(); } }
Example #22
Source File: LocalFileSystemRepository.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override public IndexInput openInput(URI dirPath, String fileName, IOContext ctx) throws IOException { try (FSDirectory dir = new NIOFSDirectory(Paths.get(dirPath), NoLockFactory.INSTANCE)) { return dir.openInput(fileName, ctx); } }
Example #23
Source File: MockDirectoryFactory.java From lucene-solr with Apache License 2.0 | 4 votes |
@Override protected LockFactory createLockFactory(String rawLockType) throws IOException { return NoLockFactory.INSTANCE; // dummy, actually unused }
Example #24
Source File: TestIndexWriterMaxDocs.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * LUCENE-6299: Test if addindexes(CodecReader[]) prevents exceeding max docs. */ public void testAddTooManyIndexesCodecReader() throws Exception { // we cheat and add the same one over again... IW wants a write lock on each Directory dir = newDirectory(random(), NoLockFactory.INSTANCE); Document doc = new Document(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); for (int i = 0; i < 100000; i++) { w.addDocument(doc); } w.forceMerge(1); w.commit(); w.close(); // wrap this with disk full, so test fails faster and doesn't fill up real disks. MockDirectoryWrapper dir2 = newMockDirectory(); w = new IndexWriter(dir2, new IndexWriterConfig(null)); w.commit(); // don't confuse checkindex dir2.setMaxSizeInBytes(dir2.sizeInBytes() + 65536); // 64KB IndexReader r = DirectoryReader.open(dir); CodecReader segReader = (CodecReader) r.leaves().get(0).reader(); CodecReader readers[] = new CodecReader[1 + (IndexWriter.MAX_DOCS / 100000)]; for (int i = 0; i < readers.length; i++) { readers[i] = segReader; } try { w.addIndexes(readers); fail("didn't get expected exception"); } catch (IllegalArgumentException expected) { // pass } catch (IOException fakeDiskFull) { final Exception e; if (fakeDiskFull.getMessage() != null && fakeDiskFull.getMessage().startsWith("fake disk full")) { e = new RuntimeException("test failed: IW checks aren't working and we are executing addIndexes"); e.addSuppressed(fakeDiskFull); } else { e = fakeDiskFull; } throw e; } r.close(); w.close(); dir.close(); dir2.close(); }
Example #25
Source File: TestIndexWriterMaxDocs.java From lucene-solr with Apache License 2.0 | 4 votes |
/** * LUCENE-6299: Test if addindexes(Dir[]) prevents exceeding max docs. */ // TODO: can we use the setter to lower the amount of docs to be written here? @Nightly public void testAddTooManyIndexesDir() throws Exception { // we cheat and add the same one over again... IW wants a write lock on each Directory dir = newDirectory(random(), NoLockFactory.INSTANCE); Document doc = new Document(); IndexWriter w = new IndexWriter(dir, new IndexWriterConfig(null)); for (int i = 0; i < 100000; i++) { w.addDocument(doc); } w.forceMerge(1); w.commit(); w.close(); // wrap this with disk full, so test fails faster and doesn't fill up real disks. MockDirectoryWrapper dir2 = newMockDirectory(); w = new IndexWriter(dir2, new IndexWriterConfig(null)); w.commit(); // don't confuse checkindex dir2.setMaxSizeInBytes(dir2.sizeInBytes() + 65536); // 64KB Directory dirs[] = new Directory[1 + (IndexWriter.MAX_DOCS / 100000)]; for (int i = 0; i < dirs.length; i++) { // bypass iw check for duplicate dirs dirs[i] = new FilterDirectory(dir) {}; } try { w.addIndexes(dirs); fail("didn't get expected exception"); } catch (IllegalArgumentException expected) { // pass } catch (IOException fakeDiskFull) { final Exception e; if (fakeDiskFull.getMessage() != null && fakeDiskFull.getMessage().startsWith("fake disk full")) { e = new RuntimeException("test failed: IW checks aren't working and we are executing addIndexes"); e.addSuppressed(fakeDiskFull); } else { e = fakeDiskFull; } throw e; } w.close(); dir.close(); dir2.close(); }
Example #26
Source File: TestCrash.java From lucene-solr with Apache License 2.0 | 4 votes |
private IndexWriter initIndex(Random random, boolean initialCommit) throws IOException { return initIndex(random, newMockDirectory(random, NoLockFactory.INSTANCE), initialCommit, true); }