dnet-hadoop/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/oa/provision/CreateRelatedEntitiesJob_ph...

195 lines
9.4 KiB
Java
Raw Normal View History

2020-04-04 14:03:43 +02:00
package eu.dnetlib.dhp.oa.provision;
2020-04-06 08:59:58 +02:00
import com.fasterxml.jackson.core.JsonProcessingException;
2020-04-04 14:03:43 +02:00
import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.common.HdfsSupport;
import eu.dnetlib.dhp.oa.provision.model.EntityRelEntity;
import eu.dnetlib.dhp.oa.provision.model.TypedRow;
import eu.dnetlib.dhp.schema.common.ModelSupport;
2020-04-04 14:03:43 +02:00
import eu.dnetlib.dhp.schema.oaf.*;
import org.apache.commons.io.IOUtils;
2020-04-06 08:59:58 +02:00
import org.apache.commons.lang3.StringUtils;
2020-04-04 14:03:43 +02:00
import org.apache.spark.SparkConf;
2020-04-06 08:59:58 +02:00
import org.apache.spark.api.java.function.FilterFunction;
2020-04-04 14:03:43 +02:00
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.Tuple2;
2020-04-06 08:59:58 +02:00
import scala.collection.JavaConverters;
import scala.collection.Seq;
2020-04-04 14:03:43 +02:00
2020-04-06 08:59:58 +02:00
import java.util.List;
2020-04-04 14:03:43 +02:00
import java.util.Optional;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
/**
* Joins the graph nodes by resolving the links of distance = 1 to create an adjacency list of linked objects.
* The operation considers all the entity types (publication, dataset, software, ORP, project, datasource, organization,
* and all the possible relationships (similarity links produced by the Dedup process are excluded).
*
* The operation is implemented by sequentially joining one entity type at time (E) with the relationships (R), and again
* by E, finally grouped by E.id;
*
* The workflow is organized in different parts aimed to to reduce the complexity of the operation
* 1) PrepareRelationsJob:
* only consider relationships that are not virtually deleted ($.dataInfo.deletedbyinference == false), each entity
* can be linked at most to 100 other objects
*
2020-04-06 15:33:31 +02:00
* 2) JoinRelationEntityByTargetJob:
* (phase 1): prepare tuples [relation - target entity] (R - T):
2020-04-04 14:03:43 +02:00
* for each entity type E_i
2020-04-06 15:33:31 +02:00
* map E_i as RelatedEntity T_i to simplify the model and extracting only the necessary information
* join (R.target = T_i.id)
* save the tuples (R_i, T_i)
* (phase 2):
* create the union of all the entity types E, hash by id
* read the tuples (R, T), hash by R.source
* join E.id = (R, T).source, where E becomes the Source Entity S
* save the tuples (S, R, T)
2020-04-04 14:03:43 +02:00
*
2020-04-06 15:33:31 +02:00
* 3) AdjacencyListBuilderJob:
* given the tuple (S - R - T) we need to group by S.id -> List [ R - T ], mapping the result as JoinedEntity
2020-04-04 14:03:43 +02:00
*
2020-04-06 15:33:31 +02:00
* 4) XmlConverterJob:
2020-04-04 14:03:43 +02:00
* convert the JoinedEntities as XML records
*/
public class CreateRelatedEntitiesJob_phase2 {
private static final Logger log = LoggerFactory.getLogger(CreateRelatedEntitiesJob_phase2.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils.toString(
PrepareRelationsJob.class
2020-04-06 08:59:58 +02:00
.getResourceAsStream("/eu/dnetlib/dhp/oa/provision/input_params_related_entities_pahase2.json"));
2020-04-04 14:03:43 +02:00
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
parser.parseArgument(args);
Boolean isSparkSessionManaged = Optional
.ofNullable(parser.get("isSparkSessionManaged"))
.map(Boolean::valueOf)
.orElse(Boolean.TRUE);
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
String inputRelatedEntitiesPath = parser.get("inputRelatedEntitiesPath");
log.info("inputRelatedEntitiesPath: {}", inputRelatedEntitiesPath);
2020-04-06 08:59:58 +02:00
String inputGraphRootPath = parser.get("inputGraphRootPath");
log.info("inputGraphRootPath: {}", inputGraphRootPath);
2020-04-04 14:03:43 +02:00
String outputPath = parser.get("outputPath");
log.info("outputPath: {}", outputPath);
2020-04-06 08:59:58 +02:00
int numPartitions = Integer.parseInt(parser.get("numPartitions"));
log.info("numPartitions: {}", numPartitions);
2020-04-04 14:03:43 +02:00
SparkConf conf = new SparkConf();
conf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer");
conf.registerKryoClasses(ModelSupport.getOafModelClasses());
2020-04-04 14:03:43 +02:00
runWithSparkSession(conf, isSparkSessionManaged,
spark -> {
removeOutputDir(spark, outputPath);
2020-04-06 08:59:58 +02:00
joinAllEntities(spark, inputRelatedEntitiesPath, inputGraphRootPath, outputPath, numPartitions);
2020-04-04 14:03:43 +02:00
});
}
2020-04-06 08:59:58 +02:00
private static void joinAllEntities(SparkSession spark, String inputRelatedEntitiesPath, String inputGraphRootPath, String outputPath, int numPartitions) {
2020-04-04 14:03:43 +02:00
2020-04-06 08:59:58 +02:00
Dataset<Tuple2<String, TypedRow>> entities = readAllEntities(spark, inputGraphRootPath, numPartitions);
2020-04-04 14:03:43 +02:00
Dataset<Tuple2<String, EntityRelEntity>> relsBySource = readRelatedEntities(spark, inputRelatedEntitiesPath);
entities
.joinWith(relsBySource, entities.col("_1").equalTo(relsBySource.col("_1")), "left_outer")
.map((MapFunction<Tuple2<Tuple2<String, TypedRow>, Tuple2<String, EntityRelEntity>>, EntityRelEntity>) value -> {
EntityRelEntity re = new EntityRelEntity();
re.setEntity(value._1()._2());
Optional<EntityRelEntity> related = Optional.ofNullable(value._2()).map(Tuple2::_2);
if (related.isPresent()) {
re.setRelation(related.get().getRelation());
re.setTarget(related.get().getTarget());
}
return re;
}, Encoders.bean(EntityRelEntity.class))
2020-04-06 08:59:58 +02:00
.repartition(numPartitions)
.filter((FilterFunction<EntityRelEntity>) value -> value.getEntity() != null && StringUtils.isNotBlank(value.getEntity().getId()))
2020-04-04 14:03:43 +02:00
.write()
2020-04-04 17:41:31 +02:00
.mode(SaveMode.Overwrite)
2020-04-04 14:03:43 +02:00
.parquet(outputPath);
}
2020-04-06 08:59:58 +02:00
private static Dataset<Tuple2<String, TypedRow>> readAllEntities(SparkSession spark, String inputGraphPath, int numPartitions) {
Dataset<TypedRow> publication = readPathEntity(spark, inputGraphPath + "/publication", Publication.class);
Dataset<TypedRow> dataset = readPathEntity(spark, inputGraphPath + "/dataset", eu.dnetlib.dhp.schema.oaf.Dataset.class);
Dataset<TypedRow> other = readPathEntity(spark, inputGraphPath + "/otherresearchproduct", OtherResearchProduct.class);
Dataset<TypedRow> software = readPathEntity(spark, inputGraphPath + "/software", Software.class);
Dataset<TypedRow> datasource = readPathEntity(spark, inputGraphPath + "/datasource", Datasource.class);
Dataset<TypedRow> organization = readPathEntity(spark, inputGraphPath + "/organization", Organization.class);
Dataset<TypedRow> project = readPathEntity(spark, inputGraphPath + "/project", Project.class);
return publication
.union(dataset)
.union(other)
.union(software)
.union(datasource)
.union(organization)
.union(project)
2020-04-04 14:03:43 +02:00
.map((MapFunction<TypedRow, Tuple2<String, TypedRow>>)
2020-04-06 08:59:58 +02:00
value -> new Tuple2<>(value.getId(), value),
Encoders.tuple(Encoders.STRING(), Encoders.kryo(TypedRow.class)))
.repartition(numPartitions);
2020-04-04 14:03:43 +02:00
}
private static Dataset<Tuple2<String, EntityRelEntity>> readRelatedEntities(SparkSession spark, String inputRelatedEntitiesPath) {
2020-04-06 08:59:58 +02:00
log.info("Reading related entities from: {}", inputRelatedEntitiesPath);
final List<String> paths = HdfsSupport.listFiles(inputRelatedEntitiesPath, spark.sparkContext().hadoopConfiguration());
log.info("Found paths: {}", String.join(",", paths));
2020-04-04 14:03:43 +02:00
return spark.read()
2020-04-06 08:59:58 +02:00
.load(toSeq(paths))
.as(Encoders.bean(EntityRelEntity.class))
2020-04-04 14:03:43 +02:00
.map((MapFunction<EntityRelEntity, Tuple2<String, EntityRelEntity>>)
2020-04-06 08:59:58 +02:00
value -> new Tuple2<>(value.getRelation().getSource(), value),
Encoders.tuple(Encoders.STRING(), Encoders.kryo(EntityRelEntity.class)));
2020-04-04 14:03:43 +02:00
}
2020-04-06 08:59:58 +02:00
private static <E extends OafEntity> Dataset<TypedRow> readPathEntity(SparkSession spark, String inputEntityPath, Class<E> entityClazz) {
2020-04-04 14:03:43 +02:00
log.info("Reading Graph table from: {}", inputEntityPath);
return spark
.read()
.textFile(inputEntityPath)
2020-04-06 08:59:58 +02:00
.map((MapFunction<String, E>) value -> OBJECT_MAPPER.readValue(value, entityClazz), Encoders.bean(entityClazz))
.map((MapFunction<E, TypedRow>) value -> getTypedRow(StringUtils.substringAfterLast(inputEntityPath, "/"), value), Encoders.bean(TypedRow.class));
}
private static TypedRow getTypedRow(String type, OafEntity entity) throws JsonProcessingException {
TypedRow t = new TypedRow();
t.setType(type);
t.setDeleted(entity.getDataInfo().getDeletedbyinference());
t.setId(entity.getId());
t.setOaf(OBJECT_MAPPER.writeValueAsString(entity));
return t;
2020-04-04 14:03:43 +02:00
}
private static void removeOutputDir(SparkSession spark, String path) {
HdfsSupport.remove(path, spark.sparkContext().hadoopConfiguration());
}
2020-04-06 08:59:58 +02:00
private static Seq<String> toSeq(List<String> list) {
return JavaConverters.asScalaIteratorConverter(list.iterator()).asScala().toSeq();
}
2020-04-04 14:03:43 +02:00
}