Java Code Examples for org.apache.commons.lang.mutable.MutableBoolean#booleanValue()

The following examples show how to use org.apache.commons.lang.mutable.MutableBoolean#booleanValue() . You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: ContentFunctionsDescriptor.java    From datawave with Apache License 2.0 6 votes vote down vote up
public JexlNode getIndexQuery(Set<String> termFrequencyFields, Set<String> indexedFields, Set<String> contentFields) {
    
    LinkedList<JexlNode> nodes = Lists.newLinkedList();
    
    // get the cartesian product of all the fields and terms
    MutableBoolean oredFields = new MutableBoolean();
    Set<String>[] fieldsAndTerms = fieldsAndTerms(termFrequencyFields, indexedFields, contentFields, oredFields, true);
    if (!fieldsAndTerms[0].isEmpty()) {
        final JexlNode eq = new ASTEQNode(ParserTreeConstants.JJTEQNODE);
        
        for (String field : fieldsAndTerms[0]) {
            nodes.add(JexlNodeFactory.createNodeTreeFromFieldValues(ContainerType.AND_NODE, eq, null, field, fieldsAndTerms[1]));
        }
    }
    
    // A single field needs no wrapper node.
    if (1 == fieldsAndTerms[0].size()) {
        return nodes.iterator().next();
    } else if (oredFields.booleanValue()) {
        return JexlNodeFactory.createOrNode(nodes);
    } else {
        return JexlNodeFactory.createAndNode(nodes);
    }
}
 
Example 2
Source File: BatchScheduler.java    From incubator-nemo with Apache License 2.0 6 votes vote down vote up
@Override
public void onSpeculativeExecutionCheck() {
  MutableBoolean isNewCloneCreated = new MutableBoolean(false);

  BatchSchedulerUtils.selectEarliestSchedulableGroup(sortedScheduleGroups, planStateManager)
    .ifPresent(scheduleGroup ->
      scheduleGroup.stream().map(Stage::getId).forEach(stageId -> {
        final Stage stage = planStateManager.getPhysicalPlan().getStageDAG().getVertexById(stageId);

        // Only if the ClonedSchedulingProperty is set...
        stage.getPropertyValue(ClonedSchedulingProperty.class).ifPresent(cloneConf -> {
          if (!cloneConf.isUpFrontCloning()) { // Upfront cloning is already handled.
            isNewCloneCreated.setValue(doSpeculativeExecution(stage, cloneConf));
          }
        });
      }));

  if (isNewCloneCreated.booleanValue()) {
    doSchedule(); // Do schedule the new clone.
  }
}
 
Example 3
Source File: QueryConverter.java    From sql-to-mongo-db-query-converter with Apache License 2.0 6 votes vote down vote up
/**
  * Erase table base alias and get where part of main table when joins
  * @param exp
  * @param tholder
  * @return
  */
 private Expression preprocessWhere(Expression exp, FromHolder tholder){
 	if(sqlCommandInfoHolder.getJoins()!=null && !sqlCommandInfoHolder.getJoins().isEmpty()) {
 		ExpressionHolder partialWhereExpHolder = new ExpressionHolder(null);
 		MutableBoolean haveOrExpression = new MutableBoolean(false);
exp.accept(new WhereVisitorMatchAndLookupPipelineMatchBuilder(tholder.getBaseAliasTable(), partialWhereExpHolder, haveOrExpression));
if(haveOrExpression.booleanValue()) {
	return null;//with or exp we can't use match first step
}
exp = partialWhereExpHolder.getExpression();
     }
 	if(exp != null) {
 		exp.accept(new ExpVisitorEraseAliasTableBaseBuilder(tholder.getBaseAliasTable()));
 	}
 	return exp;
 }
 
Example 4
Source File: ItalianTokenizer.java    From tint with GNU General Public License v3.0 5 votes vote down vote up
private void addToken(TokenGroup group, int start, int end, String charString, MutableBoolean isNewLine,
                      Token lastToken) {
    Token token = new Token(start, end, charString);
    if (isNewLine.booleanValue()) {
        group.addNewLine(start);
        isNewLine.setValue(false);
    }
    token.setPreceedBySpace(start - lastToken.getEnd() > 0);

    int spaces = 0;
    if (lastToken != null && lastToken.getEnd() != 0) {
        int endLast = lastToken.getEnd();
        spaces = lastToken.getSpaceOffset();
        if (start == endLast) {
            spaces++;
        } else {
            spaces -= Math.max(0, start - endLast - 1);
        }
    }
    token.setSpaceOffset(spaces);

    // Normalization
    String n;
    if (charString.length() == 1) {
        int c = charString.charAt(0);
        n = normalizedChars.get(c);
    } else {
        n = normalizedStrings.get(charString);
    }
    if (n != null) {
        token.setNormForm(n);
    }

    lastToken.updateByToken(token);
    group.addToken(token);
}
 
Example 5
Source File: CacheableData.java    From systemds with Apache License 2.0 4 votes vote down vote up
private synchronized T acquireReadIntern() {
	if ( !isAvailableToRead() )
		throw new DMLRuntimeException("MatrixObject not available to read.");
	
	//get object from cache
	if( _data == null )
		getCache();
	
	//call acquireHostRead if gpuHandle is set as well as is allocated
	if( DMLScript.USE_ACCELERATOR && _gpuObjects != null ) {
		boolean copiedFromGPU = false;
		for (Map.Entry<GPUContext, GPUObject> kv : _gpuObjects.entrySet()) {
			GPUObject gObj = kv.getValue();
			if (gObj != null && copiedFromGPU && gObj.isDirty())
				throw new DMLRuntimeException("Internal Error : Inconsistent internal state, A copy of this CacheableData was dirty on more than 1 GPU");
			else if (gObj != null) {
				copiedFromGPU = gObj.acquireHostRead(null);
				if( _data == null )
					getCache();
			}
		}
	}
	
	//read data from HDFS/RDD if required
	//(probe data for cache_nowrite / jvm_reuse)
	if( _data==null && isEmpty(true) ) {
		try {
			if( DMLScript.STATISTICS )
				CacheStatistics.incrementHDFSHits();
			
			if( getRDDHandle()==null || getRDDHandle().allowsShortCircuitRead() ) {
				//check filename
				if( _hdfsFileName == null )
					throw new DMLRuntimeException("Cannot read matrix for empty filename.");
				
				//read cacheable data from hdfs
				_data = readBlobFromHDFS( _hdfsFileName );
				
				//mark for initial local write despite read operation
				_requiresLocalWrite = CACHING_WRITE_CACHE_ON_READ;
			}
			else {
				//read matrix from rdd (incl execute pending rdd operations)
				MutableBoolean writeStatus = new MutableBoolean();
				_data = readBlobFromRDD( getRDDHandle(), writeStatus );
				
				//mark for initial local write (prevent repeated execution of rdd operations)
				_requiresLocalWrite = writeStatus.booleanValue() ? 
					CACHING_WRITE_CACHE_ON_READ : true;
			}
			
			setDirty(false);
		}
		catch (IOException e) {
			throw new DMLRuntimeException("Reading of " + _hdfsFileName + " ("+hashCode()+") failed.", e);
		}
		_isAcquireFromEmpty = true;
	}
	else if( _data!=null && DMLScript.STATISTICS ) {
		CacheStatistics.incrementMemHits();
	}
	
	//cache status maintenance
	acquire( false, _data==null );
	return _data;
}
 
Example 6
Source File: JoinProcessor.java    From sql-to-mongo-db-query-converter with Apache License 2.0 4 votes vote down vote up
public static List<Document> toPipelineSteps(QueryConverter queryConverter, FromHolder tholder, List<Join> ljoins, Expression whereExpression) throws ParseException, net.sf.jsqlparser.parser.ParseException {
	List<Document> ldoc = new LinkedList<Document>();
	MutableBoolean haveOrExpression = new MutableBoolean();
	for(Join j : ljoins) {
		if(j.isInner() || j.isLeft()) {
			
			if(j.getRightItem() instanceof Table || j.getRightItem() instanceof SubSelect) {
				ExpressionHolder whereExpHolder;
				String joinTableAlias = j.getRightItem().getAlias().getName();
				String joinTableName = tholder.getSQLHolder(j.getRightItem()).getBaseTableName();
				
				whereExpHolder = new ExpressionHolder(null);
				
				if(whereExpression != null) {
					haveOrExpression.setValue(false);
					whereExpression.accept(new WhereVisitorMatchAndLookupPipelineMatchBuilder(joinTableAlias, whereExpHolder, haveOrExpression));
					if(!haveOrExpression.booleanValue() && whereExpHolder.getExpression() != null) {
						whereExpHolder.getExpression().accept(new ExpVisitorEraseAliasTableBaseBuilder(joinTableAlias));
					}
					else {
						whereExpHolder.setExpression(null);
					}
				}
				
				List<Document> subqueryDocs = new LinkedList<>();
				
				if(j.getRightItem() instanceof SubSelect) {
					subqueryDocs = queryConverter.fromSQLCommandInfoHolderToAggregateSteps((SQLCommandInfoHolder)tholder.getSQLHolder(j.getRightItem()));
				}
				
				ldoc.add(generateLookupStep(tholder,joinTableName,joinTableAlias,j.getOnExpression(),whereExpHolder.getExpression(),subqueryDocs));
				ldoc.add(generateUnwindStep(tholder,joinTableAlias,j.isLeft()));
			}
			else {
				throw new ParseException("From join not supported");
			}
		}
		else {
			throw new ParseException("Only inner join and left supported");
		}
		
	}
	if(haveOrExpression.booleanValue()) {//if there is some "or" we use this step for support this logic and no other match steps
		ldoc.add(generateMatchAfterJoin(tholder,whereExpression));
	}
	return ldoc;
}
 
Example 7
Source File: CacheableData.java    From systemds with Apache License 2.0 4 votes vote down vote up
private synchronized T acquireReadIntern() {
	if ( !isAvailableToRead() )
		throw new DMLRuntimeException("MatrixObject not available to read.");
	
	//get object from cache
	if( _data == null )
		getCache();
	
	//call acquireHostRead if gpuHandle is set as well as is allocated
	if( DMLScript.USE_ACCELERATOR && _gpuObjects != null ) {
		boolean copiedFromGPU = false;
		for (Map.Entry<GPUContext, GPUObject> kv : _gpuObjects.entrySet()) {
			GPUObject gObj = kv.getValue();
			if (gObj != null && copiedFromGPU && gObj.isDirty())
				throw new DMLRuntimeException("Internal Error : Inconsistent internal state, A copy of this CacheableData was dirty on more than 1 GPU");
			else if (gObj != null) {
				copiedFromGPU = gObj.acquireHostRead(null);
				if( _data == null )
					getCache();
			}
		}
	}
	
	//read data from HDFS/RDD if required
	//(probe data for cache_nowrite / jvm_reuse)
	if( _data==null && isEmpty(true) ) {
		try {
			if( DMLScript.STATISTICS )
				CacheStatistics.incrementHDFSHits();
			
			if( getRDDHandle()==null || getRDDHandle().allowsShortCircuitRead() ) {
				//check filename
				if( _hdfsFileName == null )
					throw new DMLRuntimeException("Cannot read matrix for empty filename.");
				
				//read cacheable data from hdfs
				_data = readBlobFromHDFS( _hdfsFileName );
				
				//mark for initial local write despite read operation
				_requiresLocalWrite = CACHING_WRITE_CACHE_ON_READ;
			}
			else {
				//read matrix from rdd (incl execute pending rdd operations)
				MutableBoolean writeStatus = new MutableBoolean();
				_data = readBlobFromRDD( getRDDHandle(), writeStatus );
				
				//mark for initial local write (prevent repeated execution of rdd operations)
				_requiresLocalWrite = writeStatus.booleanValue() ? 
					CACHING_WRITE_CACHE_ON_READ : true;
			}
			
			setDirty(false);
		}
		catch (IOException e) {
			throw new DMLRuntimeException("Reading of " + _hdfsFileName + " ("+hashCode()+") failed.", e);
		}
		_isAcquireFromEmpty = true;
	}
	else if( _data!=null && DMLScript.STATISTICS ) {
		CacheStatistics.incrementMemHits();
	}
	
	//cache status maintenance
	acquire( false, _data==null );
	return _data;
}
 
Example 8
Source File: HttpProtocol.java    From storm-crawler with Apache License 2.0 4 votes vote down vote up
@Override
public ProtocolResponse handleResponse(HttpResponse response)
        throws IOException {

    StatusLine statusLine = response.getStatusLine();
    int status = statusLine.getStatusCode();

    StringBuilder verbatim = new StringBuilder();
    if (storeHTTPHeaders) {
        verbatim.append(statusLine.toString()).append("\r\n");
    }

    Metadata metadata = new Metadata();
    HeaderIterator iter = response.headerIterator();
    while (iter.hasNext()) {
        Header header = iter.nextHeader();
        if (storeHTTPHeaders) {
            verbatim.append(header.toString()).append("\r\n");
        }
        metadata.addValue(header.getName().toLowerCase(Locale.ROOT),
                header.getValue());
    }

    MutableBoolean trimmed = new MutableBoolean();

    byte[] bytes = new byte[] {};

    if (!Status.REDIRECTION.equals(Status.fromHTTPCode(status))) {
        bytes = HttpProtocol.toByteArray(response.getEntity(), maxContent,
                trimmed);
        if (trimmed.booleanValue()) {
            metadata.setValue(ProtocolResponse.TRIMMED_RESPONSE_KEY, "true");
            LOG.warn("HTTP content trimmed to {}", bytes.length);
        }
    }

    if (storeHTTPHeaders) {
        verbatim.append("\r\n");
        metadata.setValue(ProtocolResponse.RESPONSE_HEADERS_KEY,
                verbatim.toString());
    }

    return new ProtocolResponse(bytes, status, metadata);
}