Java Code Examples for com.carrotsearch.hppc.IntHashSet#add()
The following examples show how to use
com.carrotsearch.hppc.IntHashSet#add() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: SortedIntDocSet.java From lucene-solr with Apache License 2.0 | 6 votes |
@Override public Bits getBits() { IntHashSet hashSet = new IntHashSet(docs.length); for (int doc : docs) { hashSet.add(doc); } return new Bits() { @Override public boolean get(int index) { return hashSet.contains(index); } @Override public int length() { return getLength(); } }; }
Example 2
Source File: TestRangeQuery.java From lucene-solr with Apache License 2.0 | 6 votes |
static boolean sameDocs(String msg, DocList a, DocList b) { assertEquals(msg, a.size(), b.size()); IntHashSet bIds = new IntHashSet(b.size()); DocIterator bIter = b.iterator(); while (bIter.hasNext()) { bIds.add(bIter.nextDoc()); } DocIterator aIter = a.iterator(); while (aIter.hasNext()) { int doc = aIter.nextDoc(); assertTrue(msg, bIds.contains(doc)); } return true; }
Example 3
Source File: BulkWriteResult.java From spliceengine with GNU Affero General Public License v3.0 | 6 votes |
@Override public BulkWriteResult read(Kryo kryo, Input input, Class<BulkWriteResult> type) { WriteResult globalStatus = kryo.readObject(input,WriteResult.class); int notRunSize = input.readInt(); IntHashSet notRunRows = new IntHashSet(notRunSize); for(int i=0;i<notRunSize;i++){ notRunRows.add(input.readInt()); } int failedSize = input.readInt(); IntObjectHashMap<WriteResult> failedRows = new IntObjectHashMap<>(failedSize,0.9f); for(int i=0;i<failedSize;i++){ int k = input.readInt(); WriteResult result = kryo.readObject(input,WriteResult.class); failedRows.put(k,result); } return new BulkWriteResult(globalStatus,notRunRows,failedRows); }
Example 4
Source File: IndicesClusterStateService.java From Elasticsearch with Apache License 2.0 | 5 votes |
private void applyDeletedShards(final ClusterChangedEvent event) { RoutingNodes.RoutingNodeIterator routingNode = event.state().getRoutingNodes().routingNodeIter(event.state().nodes().localNodeId()); if (routingNode == null) { return; } IntHashSet newShardIds = new IntHashSet(); for (IndexService indexService : indicesService) { String index = indexService.index().name(); IndexMetaData indexMetaData = event.state().metaData().index(index); if (indexMetaData == null) { continue; } // now, go over and delete shards that needs to get deleted newShardIds.clear(); for (ShardRouting shard : routingNode) { if (shard.index().equals(index)) { newShardIds.add(shard.id()); } } for (Integer existingShardId : indexService.shardIds()) { if (!newShardIds.contains(existingShardId)) { if (indexMetaData.getState() == IndexMetaData.State.CLOSE) { if (logger.isDebugEnabled()) { logger.debug("[{}][{}] removing shard (index is closed)", index, existingShardId); } indexService.removeShard(existingShardId, "removing shard (index is closed)"); } else { // we can just remove the shard, without cleaning it locally, since we will clean it // when all shards are allocated in the IndicesStore if (logger.isDebugEnabled()) { logger.debug("[{}][{}] removing shard (not allocated)", index, existingShardId); } indexService.removeShard(existingShardId, "removing shard (not allocated)"); } } } } }
Example 5
Source File: AddAllTest.java From hashmapTest with The Unlicense | 5 votes |
public static void main(String[] args) { final long start = System.currentTimeMillis(); final IntHashSet a = new com.carrotsearch.hppc.IntHashSet(); for( int i = 10000000; i-- != 0; ) a.add(i); IntHashSet b = new com.carrotsearch.hppc.IntHashSet(a.size()); b.addAll(a); b = new com.carrotsearch.hppc.IntHashSet(); b.addAll(a); final long time = System.currentTimeMillis() - start; System.out.println( time / 1000.0 ); System.out.println( b.size() ); }