Java Code Examples for org.apache.hadoop.mapreduce.Partitioner#getPartition()
The following examples show how to use
org.apache.hadoop.mapreduce.Partitioner#getPartition() .
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example 1
Source File: DelegatingPartitioner.java From datawave with Apache License 2.0 | 6 votes |
@Override // delegates partitioning public int getPartition(BulkIngestKey key, Value value, int numPartitions) { Text tableName = key.getTableName(); Partitioner<BulkIngestKey,Value> partitioner = partitionerCache.getPartitioner(tableName); int partition = partitioner.getPartition(key, value, numPartitions); Integer offset = this.tableOffsets.get(tableName); if (null != offset) { return (offset + partition) % numPartitions; } else { return partition % numPartitions; } }
Example 2
Source File: BalancedShardPartitionerTest.java From datawave with Apache License 2.0 | 6 votes |
public static void assertExpectedCollisions(Partitioner partitionerIn, int daysBack, int expectedCollisions) { String formattedDay = formatDay(daysBack); TreeSet<Integer> partitionsUsed = new TreeSet<>(); int collisions = 0; for (int i = 1; i < SHARDS_PER_DAY; i++) { String shardId = formattedDay + ("_" + i); int partition = partitionerIn.getPartition(new BulkIngestKey(new Text(TableName.SHARD), new Key(shardId)), new Value(), NUM_REDUCE_TASKS); if (partitionsUsed.contains(partition)) { collisions++; } partitionsUsed.add(partition); } // 9 is what we get by hashing the shardId Assert.assertTrue("For " + daysBack + " days ago, we had a different number of collisions: " + collisions, expectedCollisions >= collisions); // this // has // more to // do with // the // random // assignment // of the // tablets }
Example 3
Source File: TableShardCountCollapserTest.java From incubator-retired-blur with Apache License 2.0 | 6 votes |
private void assertData(int totalShardCount) throws IOException { Partitioner<IntWritable, IntWritable> partitioner = new HashPartitioner<IntWritable, IntWritable>(); for (int i = 0; i < totalShardCount; i++) { HdfsDirectory directory = new HdfsDirectory(configuration, new Path(path, ShardUtil.getShardName(i))); DirectoryReader reader = DirectoryReader.open(directory); int numDocs = reader.numDocs(); for (int d = 0; d < numDocs; d++) { Document document = reader.document(d); IndexableField field = document.getField("id"); Integer id = (Integer) field.numericValue(); int partition = partitioner.getPartition(new IntWritable(id), null, totalShardCount); assertEquals(i, partition); } reader.close(); } }
Example 4
Source File: TableShardCountCollapserTest.java From incubator-retired-blur with Apache License 2.0 | 6 votes |
private static void createShard(Configuration configuration, int i, Path path, int totalShardCount) throws IOException { HdfsDirectory hdfsDirectory = new HdfsDirectory(configuration, path); IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_43, new KeywordAnalyzer()); TieredMergePolicy mergePolicy = (TieredMergePolicy) conf.getMergePolicy(); mergePolicy.setUseCompoundFile(false); IndexWriter indexWriter = new IndexWriter(hdfsDirectory, conf); Partitioner<IntWritable, IntWritable> partitioner = new HashPartitioner<IntWritable, IntWritable>(); int partition = partitioner.getPartition(new IntWritable(i), null, totalShardCount); assertEquals(i, partition); Document doc = getDoc(i); indexWriter.addDocument(doc); indexWriter.close(); }
Example 5
Source File: MapFileOutputFormat.java From hadoop with Apache License 2.0 | 5 votes |
/** Get an entry from output generated by this class. */ public static <K extends WritableComparable<?>, V extends Writable> Writable getEntry(MapFile.Reader[] readers, Partitioner<K, V> partitioner, K key, V value) throws IOException { int part = partitioner.getPartition(key, value, readers.length); return readers[part].get(key, value); }
Example 6
Source File: MapFileOutputFormat.java From big-c with Apache License 2.0 | 5 votes |
/** Get an entry from output generated by this class. */ public static <K extends WritableComparable<?>, V extends Writable> Writable getEntry(MapFile.Reader[] readers, Partitioner<K, V> partitioner, K key, V value) throws IOException { int part = partitioner.getPartition(key, value, readers.length); return readers[part].get(key, value); }
Example 7
Source File: BaseUploader.java From terrapin with Apache License 2.0 | 5 votes |
/** * Validates the first non-empty partition hfile has right partitioning function. * It reads several keys, then calculates the partition according to the partitioning function * client offering. If the calculated partition number is different with actual partition number * an exception is thrown. If all partition hfiles are empty, an exception is thrown. * * @param parts full absolute path for all partitions * @param partitionerType type of paritioning function * @param numShards total number of partitions * @throws IOException if something goes wrong when reading the hfiles * @throws IllegalArgumentException if the partitioner type is wrong or all partitions are empty */ public void validate(List<Path> parts, PartitionerType partitionerType, int numShards) throws IOException { boolean hasNonEmptyPartition = false; HColumnDescriptor columnDescriptor = new HColumnDescriptor(); // Disable block cache to ensure it reads the actual file content. columnDescriptor.setBlockCacheEnabled(false); for (int shardIndex = 0; shardIndex < parts.size(); shardIndex++) { Path fileToBeValidated = parts.get(shardIndex); HFile.Reader reader = null; try { FileSystem fs = FileSystem.newInstance(fileToBeValidated.toUri(), conf); CacheConfig cc = new CacheConfig(conf, columnDescriptor); reader = HFile.createReader(fs, fileToBeValidated, cc); Partitioner partitioner = PartitionerFactory.getPartitioner(partitionerType); byte[] rowKey = reader.getFirstRowKey(); if (rowKey == null) { LOG.warn(String.format("empty partition %s", fileToBeValidated.toString())); reader.close(); continue; } hasNonEmptyPartition = true; BytesWritable key = new BytesWritable(rowKey); int partition = partitioner.getPartition(key, null, numShards); if (partition != shardIndex) { throw new IllegalArgumentException( String.format("wrong partition type %s for key %s in partition %d, expected %d", partitionerType.toString(), new String(key.getBytes()), shardIndex, partition) ); } } finally { if (reader != null) { reader.close(); } } } if (!hasNonEmptyPartition) { throw new IllegalArgumentException("all partitions are empty"); } }
Example 8
Source File: HFileGenerator.java From terrapin with Apache License 2.0 | 5 votes |
/** * Generate hfiles for testing purpose * * @param sourceFileSystem source file system * @param conf configuration for hfile * @param outputFolder output folder for generated hfiles * @param partitionerType partitioner type * @param numOfPartitions number of partitions * @param numOfKeys number of keys * @return list of generated hfiles * @throws IOException if hfile creation goes wrong */ public static List<Path> generateHFiles(FileSystem sourceFileSystem, Configuration conf, File outputFolder, PartitionerType partitionerType, int numOfPartitions, int numOfKeys) throws IOException { StoreFile.Writer[] writers = new StoreFile.Writer[numOfPartitions]; for (int i = 0; i < numOfPartitions; i++) { writers[i] = new StoreFile.WriterBuilder(conf, new CacheConfig(conf), sourceFileSystem, 4096) .withFilePath(new Path(String.format("%s/%s", outputFolder.getAbsoluteFile(), TerrapinUtil.formatPartitionName(i)))) .withCompression(Compression.Algorithm.NONE) .build(); } Partitioner partitioner = PartitionerFactory.getPartitioner(partitionerType); for (int i = 0; i < numOfKeys; i++) { byte[] key = String.format("%06d", i).getBytes(); byte[] value; if (i <= 1) { value = "".getBytes(); } else { value = ("v" + (i + 1)).getBytes(); } KeyValue kv = new KeyValue(key, Bytes.toBytes("cf"), Bytes.toBytes(""), value); int partition = partitioner.getPartition(new BytesWritable(key), new BytesWritable(value), numOfPartitions); writers[partition].append(kv); } for (int i = 0; i < numOfPartitions; i++) { writers[i].close(); } return Lists.transform(Lists.newArrayList(writers), new Function<StoreFile.Writer, Path>() { @Override public Path apply(StoreFile.Writer writer) { return writer.getPath(); } }); }