org.apache.tinkerpop.gremlin.hadoop.structure.io.VertexWritable Java Examples
The following examples show how to use
org.apache.tinkerpop.gremlin.hadoop.structure.io.VertexWritable.
You can vote up the ones you like or vote down the ones you don't like,
and go to the original project or source file by following the links above each example. You may check out the related API usage on the sidebar.
Example #1
Source File: GraknSparkExecutor.java From grakn with GNU Affero General Public License v3.0 | 6 votes |
public static <M> JavaPairRDD<Object, VertexWritable> prepareFinalGraphRDD( final JavaPairRDD<Object, VertexWritable> graphRDD, final JavaPairRDD<Object, ViewIncomingPayload<M>> viewIncomingRDD, final Set<VertexComputeKey> vertexComputeKeys) { // the graphRDD and the viewRDD must have the same partitioner Preconditions.checkState(!graphRDD.partitioner().isPresent() || (graphRDD.partitioner().get().equals(viewIncomingRDD.partitioner().get()))); final String[] vertexComputeKeysArray = VertexProgramHelper.vertexComputeKeysAsArray(vertexComputeKeys); // the compute keys as an array return graphRDD.leftOuterJoin(viewIncomingRDD) .mapValues(tuple -> { final StarGraph.StarVertex vertex = tuple._1().get(); vertex.dropVertexProperties(vertexComputeKeysArray); // drop all existing compute keys // attach the final computed view to the cached graph final List<DetachedVertexProperty<Object>> view = tuple._2().isPresent() ? tuple._2().get().getView() : Collections.emptyList(); for (final DetachedVertexProperty<Object> property : view) { if (!VertexProgramHelper.isTransientVertexComputeKey(property.key(), vertexComputeKeys)){ property.attach(Attachable.Method.create(vertex));} } return tuple._1(); }); }
Example #2
Source File: ScriptRecordWriter.java From tinkerpop with Apache License 2.0 | 6 votes |
@Override public void write(final NullWritable key, final VertexWritable vertex) throws IOException { if (null != vertex) { try { final Bindings bindings = this.engine.createBindings(); bindings.put(VERTEX, vertex.get()); final String line = (String) engine.eval(WRITE_CALL, bindings); if (line != null) { this.out.write(line.getBytes(UTF8)); this.out.write(NEWLINE); } } catch (final ScriptException e) { throw new IOException(e.getMessage(), e); } } }
Example #3
Source File: GraphFilterRecordReader.java From tinkerpop with Apache License 2.0 | 6 votes |
@Override public boolean nextKeyValue() throws IOException, InterruptedException { if (null == this.graphFilter) { return this.recordReader.nextKeyValue(); } else { while (true) { if (this.recordReader.nextKeyValue()) { final VertexWritable vertexWritable = this.recordReader.getCurrentValue(); final Optional<StarGraph.StarVertex> vertex = vertexWritable.get().applyGraphFilter(this.graphFilter); if (vertex.isPresent()) { vertexWritable.set(vertex.get()); return true; } } else { return false; } } } }
Example #4
Source File: PersistedOutputRDD.java From tinkerpop with Apache License 2.0 | 6 votes |
@Override public void writeGraphRDD(final Configuration configuration, final JavaPairRDD<Object, VertexWritable> graphRDD) { if (!configuration.getBoolean(Constants.GREMLIN_SPARK_PERSIST_CONTEXT, false)) LOGGER.warn("The SparkContext should be persisted in order for the RDD to persist across jobs. To do so, set " + Constants.GREMLIN_SPARK_PERSIST_CONTEXT + " to true"); if (!configuration.containsKey(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION)) throw new IllegalArgumentException("There is no provided " + Constants.GREMLIN_HADOOP_OUTPUT_LOCATION + " to write the persisted RDD to"); SparkContextStorage.open(configuration).rm(configuration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION)); // this might be bad cause it unpersists the job RDD // determine which storage level to persist the RDD as with MEMORY_ONLY being the default cache() final StorageLevel storageLevel = StorageLevel.fromString(configuration.getString(Constants.GREMLIN_SPARK_PERSIST_STORAGE_LEVEL, "MEMORY_ONLY")); if (!configuration.getBoolean(Constants.GREMLIN_HADOOP_GRAPH_WRITER_HAS_EDGES, true)) graphRDD.mapValues(vertex -> { vertex.get().dropEdges(Direction.BOTH); return vertex; }).setName(Constants.getGraphLocation(configuration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION))).persist(storageLevel) // call action to eager store rdd .count(); else graphRDD.setName(Constants.getGraphLocation(configuration.getString(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION))).persist(storageLevel) // call action to eager store rdd .count(); Spark.refresh(); // necessary to do really fast so the Spark GC doesn't clear out the RDD }
Example #5
Source File: SparkExecutor.java From tinkerpop with Apache License 2.0 | 6 votes |
public static <M> JavaPairRDD<Object, VertexWritable> prepareFinalGraphRDD( final JavaPairRDD<Object, VertexWritable> graphRDD, final JavaPairRDD<Object, ViewIncomingPayload<M>> viewIncomingRDD, final Set<VertexComputeKey> vertexComputeKeys) { // the graphRDD and the viewRDD must have the same partitioner if (graphRDD.partitioner().isPresent()) assert (graphRDD.partitioner().get().equals(viewIncomingRDD.partitioner().get())); final String[] vertexComputeKeysArray = VertexProgramHelper.vertexComputeKeysAsArray(vertexComputeKeys); // the compute keys as an array return graphRDD.leftOuterJoin(viewIncomingRDD) .mapValues(tuple -> { final StarGraph.StarVertex vertex = tuple._1().get(); vertex.dropVertexProperties(vertexComputeKeysArray); // drop all existing compute keys // attach the final computed view to the cached graph final List<DetachedVertexProperty<Object>> view = tuple._2().isPresent() ? tuple._2().get().getView() : Collections.emptyList(); for (final DetachedVertexProperty<Object> property : view) { if (!VertexProgramHelper.isTransientVertexComputeKey(property.key(), vertexComputeKeys)) property.attach(Attachable.Method.create(vertex)); } return tuple._1(); }); }
Example #6
Source File: InputFormatHadoop.java From grakn with GNU Affero General Public License v3.0 | 6 votes |
@Override public boolean nextKeyValue() throws IOException, InterruptedException { while (reader.nextKeyValue()) { // TODO janusgraph05 integration -- the duplicate() call may be unnecessary TinkerVertex maybeNullTinkerVertex = deserializer.readHadoopVertex(reader.getCurrentKey(), reader.getCurrentValue()); if (null != maybeNullTinkerVertex) { vertex = new VertexWritable(maybeNullTinkerVertex); if (graphFilter == null) { return true; } else { final Optional<StarGraph.StarVertex> vertexWritable = vertex.get().applyGraphFilter(graphFilter); if (vertexWritable.isPresent()) { vertex.set(vertexWritable.get()); return true; } } } } return false; }
Example #7
Source File: SparkExecutor.java From tinkerpop with Apache License 2.0 | 5 votes |
public static <K, V> JavaPairRDD<K, V> executeMap( final JavaPairRDD<Object, VertexWritable> graphRDD, final MapReduce<K, V, ?, ?, ?> mapReduce, final Configuration graphComputerConfiguration) { JavaPairRDD<K, V> mapRDD = graphRDD.mapPartitionsToPair(partitionIterator -> { KryoShimServiceLoader.applyConfiguration(graphComputerConfiguration); return new MapIterator<>(MapReduce.<MapReduce<K, V, ?, ?, ?>>createMapReduce(HadoopGraph.open(graphComputerConfiguration), graphComputerConfiguration), partitionIterator); }); if (mapReduce.getMapKeySort().isPresent()) mapRDD = mapRDD.sortByKey(mapReduce.getMapKeySort().get(), true, 1); return mapRDD; }
Example #8
Source File: PersistedInputRDD.java From tinkerpop with Apache License 2.0 | 5 votes |
@Override public JavaPairRDD<Object, VertexWritable> readGraphRDD(final Configuration configuration, final JavaSparkContext sparkContext) { if (!configuration.containsKey(Constants.GREMLIN_HADOOP_INPUT_LOCATION)) throw new IllegalArgumentException("There is no provided " + Constants.GREMLIN_HADOOP_INPUT_LOCATION + " to read the persisted RDD from"); Spark.create(sparkContext.sc()); final Optional<String> graphLocation = Constants.getSearchGraphLocation(configuration.getString(Constants.GREMLIN_HADOOP_INPUT_LOCATION), SparkContextStorage.open()); return graphLocation.isPresent() ? JavaPairRDD.fromJavaRDD((JavaRDD) Spark.getRDD(graphLocation.get()).toJavaRDD()) : JavaPairRDD.fromJavaRDD(sparkContext.emptyRDD()); }
Example #9
Source File: ExampleOutputRDD.java From tinkerpop with Apache License 2.0 | 5 votes |
@Override public void writeGraphRDD(final Configuration configuration, final JavaPairRDD<Object, VertexWritable> graphRDD) { int totalAge = 0; final Iterator<VertexWritable> iterator = graphRDD.values().collect().iterator(); while (iterator.hasNext()) { final Vertex vertex = iterator.next().get(); if (vertex.label().equals("person")) totalAge = totalAge + vertex.<Integer>value("age"); } assertEquals(123, totalAge); }
Example #10
Source File: InputFormatRDD.java From tinkerpop with Apache License 2.0 | 5 votes |
@Override public JavaPairRDD<Object, VertexWritable> readGraphRDD(final Configuration configuration, final JavaSparkContext sparkContext) { final org.apache.hadoop.conf.Configuration hadoopConfiguration = ConfUtil.makeHadoopConfiguration(configuration); return sparkContext.newAPIHadoopRDD(hadoopConfiguration, (Class<InputFormat<NullWritable, VertexWritable>>) hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, InputFormat.class), NullWritable.class, VertexWritable.class) .mapToPair(tuple -> new Tuple2<>(tuple._2().get().id(), new VertexWritable(tuple._2().get()))); }
Example #11
Source File: OutputFormatRDD.java From tinkerpop with Apache License 2.0 | 5 votes |
@Override public void writeGraphRDD(final Configuration configuration, final JavaPairRDD<Object, VertexWritable> graphRDD) { final org.apache.hadoop.conf.Configuration hadoopConfiguration = ConfUtil.makeHadoopConfiguration(configuration); final String outputLocation = hadoopConfiguration.get(Constants.GREMLIN_HADOOP_OUTPUT_LOCATION); if (null != outputLocation) { // map back to a <nullwritable,vertexwritable> stream for output graphRDD.mapToPair(tuple -> new Tuple2<>(NullWritable.get(), tuple._2())) .saveAsNewAPIHadoopFile(Constants.getGraphLocation(outputLocation), NullWritable.class, VertexWritable.class, (Class<OutputFormat<NullWritable, VertexWritable>>) hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_WRITER, OutputFormat.class), hadoopConfiguration); } }
Example #12
Source File: GryoSerializer.java From tinkerpop with Apache License 2.0 | 5 votes |
private SparkIoRegistry() { try { super.register(GryoIo.class, Tuple2.class, new Tuple2Serializer()); super.register(GryoIo.class, Tuple2[].class, null); super.register(GryoIo.class, Tuple3.class, new Tuple3Serializer()); super.register(GryoIo.class, Tuple3[].class, null); super.register(GryoIo.class, CompactBuffer.class, new CompactBufferSerializer()); super.register(GryoIo.class, CompactBuffer[].class, null); super.register(GryoIo.class, CompressedMapStatus.class, null); super.register(GryoIo.class, BlockManagerId.class, null); super.register(GryoIo.class, HighlyCompressedMapStatus.class, new ExternalizableSerializer()); // externalizable implemented so its okay super.register(GryoIo.class, TorrentBroadcast.class, null); super.register(GryoIo.class, PythonBroadcast.class, null); super.register(GryoIo.class, BoxedUnit.class, null); super.register(GryoIo.class, Class.forName("scala.reflect.ClassTag$$anon$1"), new JavaSerializer()); super.register(GryoIo.class, Class.forName("scala.reflect.ManifestFactory$$anon$1"), new JavaSerializer()); super.register(GryoIo.class, Class.forName("org.apache.spark.internal.io.FileCommitProtocol$TaskCommitMessage"), new JavaSerializer()); super.register(GryoIo.class, Class.forName("org.apache.spark.internal.io.FileCommitProtocol$EmptyTaskCommitMessage$"), new JavaSerializer()); super.register(GryoIo.class, Class.forName("scala.collection.immutable.Map$EmptyMap$"), new JavaSerializer()); super.register(GryoIo.class, Class.forName("scala.collection.immutable.Map"), new JavaSerializer()); super.register(GryoIo.class, Class.forName("scala.None$"), new JavaSerializer()); super.register(GryoIo.class, Class.forName("scala.Some$"), new JavaSerializer()); super.register(GryoIo.class, Class.forName("scala.Some"), new JavaSerializer()); super.register(GryoIo.class, WrappedArray.ofRef.class, new WrappedArraySerializer()); super.register(GryoIo.class, MessagePayload.class, null); super.register(GryoIo.class, ViewIncomingPayload.class, null); super.register(GryoIo.class, ViewOutgoingPayload.class, null); super.register(GryoIo.class, ViewPayload.class, null); super.register(GryoIo.class, SerializableConfiguration.class, new JavaSerializer()); super.register(GryoIo.class, VertexWritable.class, new VertexWritableSerializer()); super.register(GryoIo.class, ObjectWritable.class, new ObjectWritableSerializer()); } catch (final ClassNotFoundException e) { throw new IllegalStateException(e); } }
Example #13
Source File: HadoopMap.java From tinkerpop with Apache License 2.0 | 5 votes |
@Override public void setup(final Mapper<NullWritable, VertexWritable, ObjectWritable, ObjectWritable>.Context context) { final Configuration apacheConfiguration = ConfUtil.makeApacheConfiguration(context.getConfiguration()); KryoShimServiceLoader.applyConfiguration(apacheConfiguration); this.mapReduce = MapReduce.createMapReduce(HadoopGraph.open(apacheConfiguration), apacheConfiguration); this.mapReduce.workerStart(MapReduce.Stage.MAP); }
Example #14
Source File: ToyGraphInputRDD.java From tinkerpop with Apache License 2.0 | 5 votes |
@Override public JavaPairRDD<Object, VertexWritable> readGraphRDD(final Configuration configuration, final JavaSparkContext sparkContext) { KryoShimServiceLoader.applyConfiguration(TinkerGraph.open().configuration()); final List<VertexWritable> vertices; if (configuration.getString(Constants.GREMLIN_HADOOP_INPUT_LOCATION).contains("modern")) vertices = IteratorUtils.list(IteratorUtils.map(TinkerFactory.createModern().vertices(), VertexWritable::new)); else if (configuration.getString(Constants.GREMLIN_HADOOP_INPUT_LOCATION).contains("classic")) vertices = IteratorUtils.list(IteratorUtils.map(TinkerFactory.createClassic().vertices(), VertexWritable::new)); else if (configuration.getString(Constants.GREMLIN_HADOOP_INPUT_LOCATION).contains("crew")) vertices = IteratorUtils.list(IteratorUtils.map(TinkerFactory.createTheCrew().vertices(), VertexWritable::new)); else if (configuration.getString(Constants.GREMLIN_HADOOP_INPUT_LOCATION).contains("sink")) vertices = IteratorUtils.list(IteratorUtils.map(TinkerFactory.createKitchenSink().vertices(), VertexWritable::new)); else if (configuration.getString(Constants.GREMLIN_HADOOP_INPUT_LOCATION).contains("grateful")) { try { final Graph graph = TinkerGraph.open(); final GraphReader reader = GryoReader.build().mapper(graph.io(GryoIo.build()).mapper().create()).create(); try (final InputStream stream = GryoResourceAccess.class.getResourceAsStream("grateful-dead-v3d0.kryo")) { reader.readGraph(stream, graph); } vertices = IteratorUtils.list(IteratorUtils.map(graph.vertices(), VertexWritable::new)); } catch (final IOException e) { throw new IllegalStateException(e.getMessage(), e); } } else throw new IllegalArgumentException("No legal toy graph was provided to load: " + configuration.getProperty(Constants.GREMLIN_HADOOP_INPUT_LOCATION)); return sparkContext.parallelize(vertices).mapToPair(vertex -> new Tuple2<>(vertex.get().id(), vertex)); }
Example #15
Source File: ExampleInputRDD.java From tinkerpop with Apache License 2.0 | 5 votes |
@Override public JavaPairRDD<Object, VertexWritable> readGraphRDD(final Configuration configuration, final JavaSparkContext sparkContext) { final List<Vertex> list = new ArrayList<>(); list.add(StarGraph.open().addVertex(T.id, 1l, T.label, "person", "age", 29)); list.add(StarGraph.open().addVertex(T.id, 2l, T.label, "person", "age", 27)); list.add(StarGraph.open().addVertex(T.id, 4l, T.label, "person", "age", 32)); list.add(StarGraph.open().addVertex(T.id, 6l, T.label, "person", "age", 35)); return sparkContext.parallelize(list).mapToPair(vertex -> new Tuple2<>(vertex.id(), new VertexWritable(vertex))); }
Example #16
Source File: GraphFilterRecordReader.java From tinkerpop with Apache License 2.0 | 5 votes |
@Override public void initialize(final InputSplit inputSplit, final TaskAttemptContext taskAttemptContext) throws IOException, InterruptedException { final Configuration configuration = taskAttemptContext.getConfiguration(); final InputFormat<NullWritable, VertexWritable> inputFormat = ReflectionUtils.newInstance(configuration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, InputFormat.class, InputFormat.class), configuration); if (!(inputFormat instanceof GraphFilterAware) && configuration.get(Constants.GREMLIN_HADOOP_GRAPH_FILTER, null) != null) this.graphFilter = VertexProgramHelper.deserialize(ConfUtil.makeApacheConfiguration(configuration), Constants.GREMLIN_HADOOP_GRAPH_FILTER); this.recordReader = inputFormat.createRecordReader(inputSplit, taskAttemptContext); this.recordReader.initialize(inputSplit, taskAttemptContext); }
Example #17
Source File: GraphSONRecordWriter.java From tinkerpop with Apache License 2.0 | 5 votes |
@Override public void write(final NullWritable key, final VertexWritable vertex) throws IOException { if (null != vertex) { if (this.hasEdges) { graphsonWriter.writeVertex(this.outputStream, vertex.get(), Direction.BOTH); this.outputStream.write(NEWLINE); } else { graphsonWriter.writeVertex(this.outputStream, vertex.get()); this.outputStream.write(NEWLINE); } } }
Example #18
Source File: GryoRecordWriter.java From tinkerpop with Apache License 2.0 | 5 votes |
@Override public void write(final NullWritable key, final VertexWritable vertex) throws IOException { if (null != vertex) { if (this.hasEdges) gryoWriter.writeVertex(this.outputStream, vertex.get(), Direction.BOTH); else gryoWriter.writeVertex(this.outputStream, vertex.get()); } }
Example #19
Source File: GraknSparkExecutor.java From grakn with GNU Affero General Public License v3.0 | 5 votes |
public static <K, V> JavaPairRDD<K, V> executeMap( final JavaPairRDD<Object, VertexWritable> graphRDD, final MapReduce<K, V, ?, ?, ?> mapReduce, final Configuration graphComputerConfiguration) { JavaPairRDD<K, V> mapRDD = graphRDD.mapPartitionsToPair(partitionIterator -> { KryoShimServiceLoader.applyConfiguration(graphComputerConfiguration); return new MapIterator<>(MapReduce.<MapReduce<K, V, ?, ?, ?>>createMapReduce(HadoopGraph.open(graphComputerConfiguration), graphComputerConfiguration), partitionIterator); }); if (mapReduce.getMapKeySort().isPresent()){ mapRDD = mapRDD.sortByKey(mapReduce.getMapKeySort().get(), true, 1);} return mapRDD; }
Example #20
Source File: GiraphRecordReader.java From titan1withtp3.1 with Apache License 2.0 | 5 votes |
@Override public boolean nextKeyValue() throws IOException, InterruptedException { while (reader.nextKeyValue()) { // TODO titan05 integration -- the duplicate() call may be unnecessary final TinkerVertex maybeNullTinkerVertex = deser.readHadoopVertex(reader.getCurrentKey(), reader.getCurrentValue()); if (null != maybeNullTinkerVertex) { vertex = new VertexWritable(maybeNullTinkerVertex); //vertexQuery.filterRelationsOf(vertex); // TODO reimplement vertexquery filtering return true; } } return false; }
Example #21
Source File: ConfUtil.java From tinkerpop with Apache License 2.0 | 5 votes |
public static InputFormat<NullWritable, VertexWritable> getReaderAsInputFormat(final Configuration hadoopConfiguration) { final Class<?> readerClass = hadoopConfiguration.getClass(Constants.GREMLIN_HADOOP_GRAPH_READER, Object.class); try { return InputFormat.class.isAssignableFrom(readerClass) ? (InputFormat<NullWritable, VertexWritable>) readerClass.newInstance() : (InputFormat<NullWritable, VertexWritable>) Class.forName("org.apache.tinkerpop.gremlin.spark.structure.io.InputRDDFormat").newInstance(); } catch (final Exception e) { throw new IllegalStateException(e.getMessage(), e); } }
Example #22
Source File: GiraphInputFormat.java From titan1withtp3.1 with Apache License 2.0 | 4 votes |
@Override public RecordReader<NullWritable, VertexWritable> createRecordReader(InputSplit split, TaskAttemptContext context) throws IOException, InterruptedException { return new GiraphRecordReader(refCounter, inputFormat.createRecordReader(split, context)); }
Example #23
Source File: GraphSONV2d0RecordReaderWriterTest.java From tinkerpop with Apache License 2.0 | 4 votes |
@Override protected Class<? extends InputFormat<NullWritable, VertexWritable>> getInputFormat() { return GraphSONInputFormat.class; }
Example #24
Source File: GraphSONV2d0RecordReaderWriterTest.java From tinkerpop with Apache License 2.0 | 4 votes |
@Override protected Class<? extends OutputFormat<NullWritable, VertexWritable>> getOutputFormat() { return GraphSONOutputFormat.class; }
Example #25
Source File: VertexWritableSerializer.java From tinkerpop with Apache License 2.0 | 4 votes |
@Override public <O extends OutputShim> void write(final KryoShim<?, O> kryo, final O output, final VertexWritable vertexWritable) { kryo.writeObject(output, vertexWritable.get().graph()); }
Example #26
Source File: VertexWritableSerializer.java From tinkerpop with Apache License 2.0 | 4 votes |
@Override public <I extends InputShim> VertexWritable read(final KryoShim<I, ?> kryo, final I input, final Class<VertexWritable> clazz) { return new VertexWritable(kryo.readObject(input, StarGraph.class).getStarVertex()); }
Example #27
Source File: ScriptOutputFormat.java From tinkerpop with Apache License 2.0 | 4 votes |
public RecordWriter<NullWritable, VertexWritable> getRecordWriter(final TaskAttemptContext job, final DataOutputStream outputStream) throws IOException, InterruptedException { return new ScriptRecordWriter(outputStream, job); }
Example #28
Source File: GryoRegistrator.java From tinkerpop with Apache License 2.0 | 4 votes |
private LinkedHashMap<Class<?>, Serializer<?>> getExtraRegistrations() { /* The map returned by this method MUST have a fixed iteration order! * * The order itself is irrelevant, so long as it is completely stable at runtime. * * LinkedHashMap satisfies this requirement (its contract specifies * iteration in key-insertion-order). */ final LinkedHashMap<Class<?>, Serializer<?>> m = new LinkedHashMap<>(); // The following entries were copied from GryoSerializer's constructor // This could be turned into a static collection on GryoSerializer to avoid // duplication, but it would be a bit cumbersome to do so without disturbing // the ordering of the existing entries in that constructor, since not all // of the entries are for TinkerPop (and the ordering is significant). try { m.put(Class.forName("scala.reflect.ClassTag$$anon$1"), new JavaSerializer()); m.put(Class.forName("scala.reflect.ManifestFactory$$anon$1"), new JavaSerializer()); m.put(Class.forName("org.apache.spark.internal.io.FileCommitProtocol$TaskCommitMessage"), new JavaSerializer()); m.put(Class.forName("org.apache.spark.internal.io.FileCommitProtocol$EmptyTaskCommitMessage$"), new JavaSerializer()); } catch (final ClassNotFoundException e) { throw new IllegalStateException(e.getMessage(), e); } m.put(WrappedArray.ofRef.class, null); m.put(MessagePayload.class, null); m.put(ViewIncomingPayload.class, null); m.put(ViewOutgoingPayload.class, null); m.put(ViewPayload.class, null); m.put(VertexWritable.class, new UnshadedSerializerAdapter<>(new VertexWritableSerializer())); m.put(ObjectWritable.class, new UnshadedSerializerAdapter<>(new ObjectWritableSerializer<>())); // m.put(HadoopVertex.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.VertexSerializer())); m.put(HadoopVertexProperty.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.VertexPropertySerializer())); m.put(HadoopProperty.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.PropertySerializer())); m.put(HadoopEdge.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.EdgeSerializer())); // m.put(ComputerGraph.ComputerVertex.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.VertexSerializer())); m.put(ComputerGraph.ComputerVertexProperty.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.VertexPropertySerializer())); m.put(ComputerGraph.ComputerProperty.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.PropertySerializer())); m.put(ComputerGraph.ComputerEdge.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.EdgeSerializer())); // m.put(StarGraph.StarEdge.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.EdgeSerializer())); m.put(StarGraph.StarVertex.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.VertexSerializer())); m.put(StarGraph.StarProperty.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.PropertySerializer())); m.put(StarGraph.StarVertexProperty.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.VertexPropertySerializer())); // m.put(MutablePath.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.PathSerializer())); m.put(ImmutablePath.class, new UnshadedSerializerAdapter<>(new GryoSerializersV1d0.PathSerializer())); // m.put(CompactBuffer[].class, null); // TODO: VoidSerializer is a default serializer and thus, may not be needed (if it is, you can't use FieldSerializer) // TODO: We will have to copy/paste the shaded DefaultSerializer.VoidSerializer into an unshaded form. //m.put(void.class, null); //m.put(Void.class, null); return m; }
Example #29
Source File: ScriptOutputFormat.java From tinkerpop with Apache License 2.0 | 4 votes |
@Override public RecordWriter<NullWritable, VertexWritable> getRecordWriter(final TaskAttemptContext job) throws IOException, InterruptedException { return getRecordWriter(job, getDataOutputStream(job)); }
Example #30
Source File: GraphSONV3d0RecordReaderWriterTest.java From tinkerpop with Apache License 2.0 | 4 votes |
@Override protected Class<? extends OutputFormat<NullWritable, VertexWritable>> getOutputFormat() { return GraphSONOutputFormat.class; }