Java Code Examples for org.apache.commons.lang.mutable.MutableBoolean#setValue()
The following examples show how to use
org.apache.commons.lang.mutable.MutableBoolean#setValue() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: CronServiceTest.java From aion-germany with GNU General Public License v3.0 | 6 votes |
@Test public void testCancelledRunableGC() throws Exception { final MutableBoolean collected = new MutableBoolean(); Runnable r = new Runnable() { @Override public void run() { cronService.cancel(this); } @Override public void finalize() throws Throwable { collected.setValue(true); super.finalize(); } }; cronService.schedule(r, "0/2 * * * * ?"); r = null; sleep(5); for (int i = 0; i < 100; i++) { System.gc(); } assertEquals(collected.booleanValue(), true); }
Example 2
Source File: FrameObject.java From systemds with Apache License 2.0 | 5 votes |
@Override protected FrameBlock readBlobFromRDD(RDDObject rdd, MutableBoolean status) throws IOException { //note: the read of a frame block from an RDD might trigger //lazy evaluation of pending transformations. RDDObject lrdd = rdd; //prepare return status (by default only collect) status.setValue(false); MetaDataFormat iimd = (MetaDataFormat) _metaData; DataCharacteristics dc = iimd.getDataCharacteristics(); int rlen = (int)dc.getRows(); int clen = (int)dc.getCols(); //handle missing schema if necessary ValueType[] lschema = (_schema!=null) ? _schema : UtilFunctions.nCopies(clen>=1 ? (int)clen : 1, ValueType.STRING); FrameBlock fb = null; try { //prevent unnecessary collect through rdd checkpoint if( rdd.allowsShortCircuitCollect() ) { lrdd = (RDDObject)rdd.getLineageChilds().get(0); } //collect frame block from binary block RDD fb = SparkExecutionContext.toFrameBlock(lrdd, lschema, rlen, clen); } catch(DMLRuntimeException ex) { throw new IOException(ex); } //sanity check correct output if( fb == null ) throw new IOException("Unable to load frame from rdd."); return fb; }
Example 3
Source File: TensorObject.java From systemds with Apache License 2.0 | 5 votes |
@Override @SuppressWarnings("unchecked") protected TensorBlock readBlobFromRDD(RDDObject rdd, MutableBoolean status) { status.setValue(false); TensorCharacteristics tc = (TensorCharacteristics) _metaData.getDataCharacteristics(); // TODO correct blocksize; // TODO read from RDD return SparkExecutionContext.toTensorBlock((JavaPairRDD<TensorIndexes, TensorBlock>) rdd.getRDD(), tc); }
Example 4
Source File: ItalianTokenizer.java From tint with GNU General Public License v3.0 | 5 votes |
private void addToken(TokenGroup group, int start, int end, String charString, MutableBoolean isNewLine, Token lastToken) { Token token = new Token(start, end, charString); if (isNewLine.booleanValue()) { group.addNewLine(start); isNewLine.setValue(false); } token.setPreceedBySpace(start - lastToken.getEnd() > 0); int spaces = 0; if (lastToken != null && lastToken.getEnd() != 0) { int endLast = lastToken.getEnd(); spaces = lastToken.getSpaceOffset(); if (start == endLast) { spaces++; } else { spaces -= Math.max(0, start - endLast - 1); } } token.setSpaceOffset(spaces); // Normalization String n; if (charString.length() == 1) { int c = charString.charAt(0); n = normalizedChars.get(c); } else { n = normalizedStrings.get(charString); } if (n != null) { token.setNormForm(n); } lastToken.updateByToken(token); group.addToken(token); }
Example 5
Source File: FrameObject.java From systemds with Apache License 2.0 | 5 votes |
@Override protected FrameBlock readBlobFromRDD(RDDObject rdd, MutableBoolean status) throws IOException { //note: the read of a frame block from an RDD might trigger //lazy evaluation of pending transformations. RDDObject lrdd = rdd; //prepare return status (by default only collect) status.setValue(false); MetaDataFormat iimd = (MetaDataFormat) _metaData; DataCharacteristics dc = iimd.getDataCharacteristics(); int rlen = (int)dc.getRows(); int clen = (int)dc.getCols(); //handle missing schema if necessary ValueType[] lschema = (_schema!=null) ? _schema : UtilFunctions.nCopies(clen>=1 ? (int)clen : 1, ValueType.STRING); FrameBlock fb = null; try { //prevent unnecessary collect through rdd checkpoint if( rdd.allowsShortCircuitCollect() ) { lrdd = (RDDObject)rdd.getLineageChilds().get(0); } //collect frame block from binary block RDD fb = SparkExecutionContext.toFrameBlock(lrdd, lschema, rlen, clen); } catch(DMLRuntimeException ex) { throw new IOException(ex); } //sanity check correct output if( fb == null ) throw new IOException("Unable to load frame from rdd."); return fb; }
Example 6
Source File: TensorObject.java From systemds with Apache License 2.0 | 5 votes |
@Override @SuppressWarnings("unchecked") protected TensorBlock readBlobFromRDD(RDDObject rdd, MutableBoolean status) { status.setValue(false); TensorCharacteristics tc = (TensorCharacteristics) _metaData.getDataCharacteristics(); // TODO correct blocksize; // TODO read from RDD return SparkExecutionContext.toTensorBlock((JavaPairRDD<TensorIndexes, TensorBlock>) rdd.getRDD(), tc); }
Example 7
Source File: HttpProtocol.java From storm-crawler with Apache License 2.0 | 5 votes |
private static final byte[] toByteArray(final HttpEntity entity, int maxContent, MutableBoolean trimmed) throws IOException { if (entity == null) return new byte[] {}; final InputStream instream = entity.getContent(); if (instream == null) { return null; } Args.check(entity.getContentLength() <= Integer.MAX_VALUE, "HTTP entity too large to be buffered in memory"); int reportedLength = (int) entity.getContentLength(); // set default size for buffer: 100 KB int bufferInitSize = 102400; if (reportedLength != -1) { bufferInitSize = reportedLength; } // avoid init of too large a buffer when we will trim anyway if (maxContent != -1 && bufferInitSize > maxContent) { bufferInitSize = maxContent; } final ByteArrayBuffer buffer = new ByteArrayBuffer(bufferInitSize); final byte[] tmp = new byte[4096]; int lengthRead; while ((lengthRead = instream.read(tmp)) != -1) { // check whether we need to trim if (maxContent != -1 && buffer.length() + lengthRead > maxContent) { buffer.append(tmp, 0, maxContent - buffer.length()); trimmed.setValue(true); break; } buffer.append(tmp, 0, lengthRead); } return buffer.toByteArray(); }
Example 8
Source File: MatrixObject.java From systemds with Apache License 2.0 | 4 votes |
@Override protected MatrixBlock readBlobFromRDD(RDDObject rdd, MutableBoolean writeStatus) throws IOException { //note: the read of a matrix block from an RDD might trigger //lazy evaluation of pending transformations. RDDObject lrdd = rdd; //prepare return status (by default only collect) writeStatus.setValue(false); MetaDataFormat iimd = (MetaDataFormat) _metaData; DataCharacteristics mc = iimd.getDataCharacteristics(); InputInfo ii = iimd.getInputInfo(); MatrixBlock mb = null; try { //prevent unnecessary collect through rdd checkpoint if( rdd.allowsShortCircuitCollect() ) { lrdd = (RDDObject)rdd.getLineageChilds().get(0); } //obtain matrix block from RDD int rlen = (int)mc.getRows(); int clen = (int)mc.getCols(); int blen = mc.getBlocksize(); long nnz = mc.getNonZerosBound(); //guarded rdd collect if( ii == InputInfo.BinaryBlockInputInfo && //guarded collect not for binary cell !OptimizerUtils.checkSparkCollectMemoryBudget(mc, getPinnedSize()+getBroadcastSize(), true) ) { //write RDD to hdfs and read to prevent invalid collect mem consumption //note: lazy, partition-at-a-time collect (toLocalIterator) was significantly slower if( !HDFSTool.existsFileOnHDFS(_hdfsFileName) ) { //prevent overwrite existing file long newnnz = SparkExecutionContext.writeRDDtoHDFS(lrdd, _hdfsFileName, iimd.getOutputInfo()); _metaData.getDataCharacteristics().setNonZeros(newnnz); rdd.setPending(false); //mark rdd as non-pending (for export) rdd.setHDFSFile(true); //mark rdd as hdfs file (for restore) writeStatus.setValue(true); //mark for no cache-write on read //note: the flag hdfsFile is actually not entirely correct because we still hold an rdd //reference to the input not to an rdd of the hdfs file but the resulting behavior is correct } mb = readBlobFromHDFS(_hdfsFileName); } else if( ii == InputInfo.BinaryCellInputInfo ) { //collect matrix block from binary block RDD mb = SparkExecutionContext.toMatrixBlock(lrdd, rlen, clen, nnz); } else { //collect matrix block from binary cell RDD mb = SparkExecutionContext.toMatrixBlock(lrdd, rlen, clen, blen, nnz); } } catch(DMLRuntimeException ex) { throw new IOException(ex); } //sanity check correct output if( mb == null ) throw new IOException("Unable to load matrix from rdd."); return mb; }
Example 9
Source File: DfsClientShmManager.java From hadoop with Apache License 2.0 | 4 votes |
/** * Allocate a new shared memory slot connected to this datanode. * * Must be called with the EndpointShmManager lock held. * * @param peer The peer to use to talk to the DataNode. * @param usedPeer (out param) Will be set to true if we used the peer. * When a peer is used * * @param clientName The client name. * @param blockId The block ID to use. * @return null if the DataNode does not support shared memory * segments, or experienced an error creating the * shm. The shared memory segment itself on success. * @throws IOException If there was an error communicating over the socket. */ Slot allocSlot(DomainPeer peer, MutableBoolean usedPeer, String clientName, ExtendedBlockId blockId) throws IOException { while (true) { if (closed) { if (LOG.isTraceEnabled()) { LOG.trace(this + ": the DfsClientShmManager has been closed."); } return null; } if (disabled) { if (LOG.isTraceEnabled()) { LOG.trace(this + ": shared memory segment access is disabled."); } return null; } // Try to use an existing slot. Slot slot = allocSlotFromExistingShm(blockId); if (slot != null) { return slot; } // There are no free slots. If someone is loading more slots, wait // for that to finish. if (loading) { if (LOG.isTraceEnabled()) { LOG.trace(this + ": waiting for loading to finish..."); } finishedLoading.awaitUninterruptibly(); } else { // Otherwise, load the slot ourselves. loading = true; lock.unlock(); DfsClientShm shm; try { shm = requestNewShm(clientName, peer); if (shm == null) continue; // See #{DfsClientShmManager#domainSocketWatcher} for details // about why we do this before retaking the manager lock. domainSocketWatcher.add(peer.getDomainSocket(), shm); // The DomainPeer is now our responsibility, and should not be // closed by the caller. usedPeer.setValue(true); } finally { lock.lock(); loading = false; finishedLoading.signalAll(); } if (shm.isDisconnected()) { // If the peer closed immediately after the shared memory segment // was created, the DomainSocketWatcher callback might already have // fired and marked the shm as disconnected. In this case, we // obviously don't want to add the SharedMemorySegment to our list // of valid not-full segments. if (LOG.isDebugEnabled()) { LOG.debug(this + ": the UNIX domain socket associated with " + "this short-circuit memory closed before we could make " + "use of the shm."); } } else { notFull.put(shm.getShmId(), shm); } } } }
Example 10
Source File: DfsClientShmManager.java From big-c with Apache License 2.0 | 4 votes |
/** * Allocate a new shared memory slot connected to this datanode. * * Must be called with the EndpointShmManager lock held. * * @param peer The peer to use to talk to the DataNode. * @param usedPeer (out param) Will be set to true if we used the peer. * When a peer is used * * @param clientName The client name. * @param blockId The block ID to use. * @return null if the DataNode does not support shared memory * segments, or experienced an error creating the * shm. The shared memory segment itself on success. * @throws IOException If there was an error communicating over the socket. */ Slot allocSlot(DomainPeer peer, MutableBoolean usedPeer, String clientName, ExtendedBlockId blockId) throws IOException { while (true) { if (closed) { if (LOG.isTraceEnabled()) { LOG.trace(this + ": the DfsClientShmManager has been closed."); } return null; } if (disabled) { if (LOG.isTraceEnabled()) { LOG.trace(this + ": shared memory segment access is disabled."); } return null; } // Try to use an existing slot. Slot slot = allocSlotFromExistingShm(blockId); if (slot != null) { return slot; } // There are no free slots. If someone is loading more slots, wait // for that to finish. if (loading) { if (LOG.isTraceEnabled()) { LOG.trace(this + ": waiting for loading to finish..."); } finishedLoading.awaitUninterruptibly(); } else { // Otherwise, load the slot ourselves. loading = true; lock.unlock(); DfsClientShm shm; try { shm = requestNewShm(clientName, peer); if (shm == null) continue; // See #{DfsClientShmManager#domainSocketWatcher} for details // about why we do this before retaking the manager lock. domainSocketWatcher.add(peer.getDomainSocket(), shm); // The DomainPeer is now our responsibility, and should not be // closed by the caller. usedPeer.setValue(true); } finally { lock.lock(); loading = false; finishedLoading.signalAll(); } if (shm.isDisconnected()) { // If the peer closed immediately after the shared memory segment // was created, the DomainSocketWatcher callback might already have // fired and marked the shm as disconnected. In this case, we // obviously don't want to add the SharedMemorySegment to our list // of valid not-full segments. if (LOG.isDebugEnabled()) { LOG.debug(this + ": the UNIX domain socket associated with " + "this short-circuit memory closed before we could make " + "use of the shm."); } } else { notFull.put(shm.getShmId(), shm); } } } }
Example 11
Source File: JoinProcessor.java From sql-to-mongo-db-query-converter with Apache License 2.0 | 4 votes |
public static List<Document> toPipelineSteps(QueryConverter queryConverter, FromHolder tholder, List<Join> ljoins, Expression whereExpression) throws ParseException, net.sf.jsqlparser.parser.ParseException { List<Document> ldoc = new LinkedList<Document>(); MutableBoolean haveOrExpression = new MutableBoolean(); for(Join j : ljoins) { if(j.isInner() || j.isLeft()) { if(j.getRightItem() instanceof Table || j.getRightItem() instanceof SubSelect) { ExpressionHolder whereExpHolder; String joinTableAlias = j.getRightItem().getAlias().getName(); String joinTableName = tholder.getSQLHolder(j.getRightItem()).getBaseTableName(); whereExpHolder = new ExpressionHolder(null); if(whereExpression != null) { haveOrExpression.setValue(false); whereExpression.accept(new WhereVisitorMatchAndLookupPipelineMatchBuilder(joinTableAlias, whereExpHolder, haveOrExpression)); if(!haveOrExpression.booleanValue() && whereExpHolder.getExpression() != null) { whereExpHolder.getExpression().accept(new ExpVisitorEraseAliasTableBaseBuilder(joinTableAlias)); } else { whereExpHolder.setExpression(null); } } List<Document> subqueryDocs = new LinkedList<>(); if(j.getRightItem() instanceof SubSelect) { subqueryDocs = queryConverter.fromSQLCommandInfoHolderToAggregateSteps((SQLCommandInfoHolder)tholder.getSQLHolder(j.getRightItem())); } ldoc.add(generateLookupStep(tholder,joinTableName,joinTableAlias,j.getOnExpression(),whereExpHolder.getExpression(),subqueryDocs)); ldoc.add(generateUnwindStep(tholder,joinTableAlias,j.isLeft())); } else { throw new ParseException("From join not supported"); } } else { throw new ParseException("Only inner join and left supported"); } } if(haveOrExpression.booleanValue()) {//if there is some "or" we use this step for support this logic and no other match steps ldoc.add(generateMatchAfterJoin(tholder,whereExpression)); } return ldoc; }
Example 12
Source File: MatrixObject.java From systemds with Apache License 2.0 | 4 votes |
@Override protected MatrixBlock readBlobFromRDD(RDDObject rdd, MutableBoolean writeStatus) throws IOException { //note: the read of a matrix block from an RDD might trigger //lazy evaluation of pending transformations. RDDObject lrdd = rdd; //prepare return status (by default only collect) writeStatus.setValue(false); MetaDataFormat iimd = (MetaDataFormat) _metaData; DataCharacteristics mc = iimd.getDataCharacteristics(); FileFormat fmt = iimd.getFileFormat(); MatrixBlock mb = null; try { //prevent unnecessary collect through rdd checkpoint if( rdd.allowsShortCircuitCollect() ) { lrdd = (RDDObject)rdd.getLineageChilds().get(0); } //obtain matrix block from RDD int rlen = (int)mc.getRows(); int clen = (int)mc.getCols(); int blen = mc.getBlocksize(); long nnz = mc.getNonZerosBound(); //guarded rdd collect if( fmt == FileFormat.BINARY && //guarded collect not for binary cell !OptimizerUtils.checkSparkCollectMemoryBudget(mc, getPinnedSize()+getBroadcastSize(), true) ) { //write RDD to hdfs and read to prevent invalid collect mem consumption //note: lazy, partition-at-a-time collect (toLocalIterator) was significantly slower if( !HDFSTool.existsFileOnHDFS(_hdfsFileName) ) { //prevent overwrite existing file long newnnz = SparkExecutionContext.writeMatrixRDDtoHDFS(lrdd, _hdfsFileName, iimd.getFileFormat()); _metaData.getDataCharacteristics().setNonZeros(newnnz); rdd.setPending(false); //mark rdd as non-pending (for export) rdd.setHDFSFile(true); //mark rdd as hdfs file (for restore) writeStatus.setValue(true); //mark for no cache-write on read //note: the flag hdfsFile is actually not entirely correct because we still hold an rdd //reference to the input not to an rdd of the hdfs file but the resulting behavior is correct } mb = readBlobFromHDFS(_hdfsFileName); } else { //collect matrix block from binary cell RDD mb = SparkExecutionContext.toMatrixBlock(lrdd, rlen, clen, blen, nnz); } } catch(DMLRuntimeException ex) { throw new IOException(ex); } //sanity check correct output if( mb == null ) throw new IOException("Unable to load matrix from rdd."); return mb; }