forked from D-Net/dnet-hadoop
oozie workflow aimed to build the adjacency lists representation of the graph, needed to build the records to be indexed
This commit is contained in:
parent
6a7bee5e43
commit
7ba586d2e5
|
@ -0,0 +1,3 @@
|
||||||
|
sparkDriverMemory=16G
|
||||||
|
sparkExecutorMemory=16G
|
||||||
|
hive_db_name=claudio
|
|
@ -0,0 +1,37 @@
|
||||||
|
<?xml version="1.0" encoding="UTF-8"?>
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<parent>
|
||||||
|
<artifactId>dhp-workflows</artifactId>
|
||||||
|
<groupId>eu.dnetlib.dhp</groupId>
|
||||||
|
<version>1.0.5-SNAPSHOT</version>
|
||||||
|
</parent>
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
|
||||||
|
<artifactId>dhp-graph-provision</artifactId>
|
||||||
|
|
||||||
|
<dependencies>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.spark</groupId>
|
||||||
|
<artifactId>spark-core_2.11</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.spark</groupId>
|
||||||
|
<artifactId>spark-sql_2.11</artifactId>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
<dependency>
|
||||||
|
<groupId>eu.dnetlib.dhp</groupId>
|
||||||
|
<artifactId>dhp-common</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>eu.dnetlib.dhp</groupId>
|
||||||
|
<artifactId>dhp-schemas</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
|
||||||
|
</dependencies>
|
||||||
|
|
||||||
|
|
||||||
|
</project>
|
|
@ -0,0 +1,4 @@
|
||||||
|
package eu.dnetlib.dhp.graph;
|
||||||
|
|
||||||
|
public class EntityNode {
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
package eu.dnetlib.dhp.graph;
|
||||||
|
|
||||||
|
import com.google.common.collect.Maps;
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.*;
|
||||||
|
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
public class GraphMappingUtils {
|
||||||
|
|
||||||
|
public final static Map<String, Class> types = Maps.newHashMap();
|
||||||
|
|
||||||
|
static {
|
||||||
|
types.put("datasource", Datasource.class);
|
||||||
|
types.put("organization", Organization.class);
|
||||||
|
types.put("project", Project.class);
|
||||||
|
types.put("dataset", Dataset.class);
|
||||||
|
types.put("otherresearchproduct", OtherResearchProduct.class);
|
||||||
|
types.put("software", Software.class);
|
||||||
|
types.put("publication", Publication.class);
|
||||||
|
types.put("relation", Relation.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,69 @@
|
||||||
|
package eu.dnetlib.dhp.graph;
|
||||||
|
|
||||||
|
import java.io.Serializable;
|
||||||
|
|
||||||
|
public class RelatedEntity implements Serializable {
|
||||||
|
|
||||||
|
private String relType;
|
||||||
|
|
||||||
|
private String subRelType;
|
||||||
|
|
||||||
|
private String relClass;
|
||||||
|
|
||||||
|
private String type;
|
||||||
|
|
||||||
|
private String payload;
|
||||||
|
|
||||||
|
public RelatedEntity(String relType, String subRelType, String relClass, String type, String payload) {
|
||||||
|
this.relType = relType;
|
||||||
|
this.subRelType = subRelType;
|
||||||
|
this.relClass = relClass;
|
||||||
|
this.type = type;
|
||||||
|
this.payload = payload;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getRelType() {
|
||||||
|
return relType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public RelatedEntity setRelType(String relType) {
|
||||||
|
this.relType = relType;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getSubRelType() {
|
||||||
|
return subRelType;
|
||||||
|
}
|
||||||
|
|
||||||
|
public RelatedEntity setSubRelType(String subRelType) {
|
||||||
|
this.subRelType = subRelType;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getRelClass() {
|
||||||
|
return relClass;
|
||||||
|
}
|
||||||
|
|
||||||
|
public RelatedEntity setRelClass(String relClass) {
|
||||||
|
this.relClass = relClass;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getType() {
|
||||||
|
return type;
|
||||||
|
}
|
||||||
|
|
||||||
|
public RelatedEntity setType(String type) {
|
||||||
|
this.type = type;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getPayload() {
|
||||||
|
return payload;
|
||||||
|
}
|
||||||
|
|
||||||
|
public RelatedEntity setPayload(String payload) {
|
||||||
|
this.payload = payload;
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,102 @@
|
||||||
|
package eu.dnetlib.dhp.graph;
|
||||||
|
|
||||||
|
import com.google.common.collect.Sets;
|
||||||
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
|
import eu.dnetlib.dhp.schema.common.EntityPayload;
|
||||||
|
import org.apache.commons.io.IOUtils;
|
||||||
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.io.compress.GzipCodec;
|
||||||
|
import org.apache.spark.api.java.function.MapFunction;
|
||||||
|
import org.apache.spark.sql.Dataset;
|
||||||
|
import org.apache.spark.sql.Row;
|
||||||
|
import org.apache.spark.sql.SaveMode;
|
||||||
|
import org.apache.spark.sql.SparkSession;
|
||||||
|
import scala.Tuple2;
|
||||||
|
import scala.runtime.AbstractFunction1;
|
||||||
|
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
import java.util.stream.Collectors;
|
||||||
|
|
||||||
|
import static org.apache.commons.lang3.StringUtils.substringAfter;
|
||||||
|
import static org.apache.commons.lang3.StringUtils.substringBefore;
|
||||||
|
import static org.apache.spark.sql.Encoders.bean;
|
||||||
|
|
||||||
|
public class SparkGraphIndexingJob {
|
||||||
|
|
||||||
|
private final static String ENTITY_NODES_PATH = "/tmp/entity_node";
|
||||||
|
private static final long LIMIT = 100;
|
||||||
|
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
|
||||||
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkGraphIndexingJob.class.getResourceAsStream("/eu/dnetlib/dhp/graph/input_graph_parameters.json")));
|
||||||
|
parser.parseArgument(args);
|
||||||
|
final SparkSession spark = SparkSession
|
||||||
|
.builder()
|
||||||
|
.appName(SparkGraphIndexingJob.class.getSimpleName())
|
||||||
|
.master(parser.get("master"))
|
||||||
|
.config("hive.metastore.uris", parser.get("hive_metastore_uris"))
|
||||||
|
.config("spark.driver.cores", 1)
|
||||||
|
.config("spark.executor.cores", 1)
|
||||||
|
.config("spark.yarn.executor.memoryOverhead", "4G")
|
||||||
|
.config("spark.yarn.driver.memoryOverhead", "4G")
|
||||||
|
.enableHiveSupport()
|
||||||
|
.getOrCreate();
|
||||||
|
|
||||||
|
final String hiveDbName = parser.get("hive_db_name");
|
||||||
|
|
||||||
|
final FileSystem fs = FileSystem.get(spark.sparkContext().hadoopConfiguration());
|
||||||
|
if (fs.exists(new Path(ENTITY_NODES_PATH))) {
|
||||||
|
fs.delete(new Path(ENTITY_NODES_PATH), true);
|
||||||
|
}
|
||||||
|
|
||||||
|
spark
|
||||||
|
.sql(getJoinEntitiesSQL(hiveDbName))
|
||||||
|
.transform(toEntityNode())
|
||||||
|
/*
|
||||||
|
.map((MapFunction<EntityNode, String>) r -> {
|
||||||
|
return null;
|
||||||
|
}, bean(String.class))
|
||||||
|
*/
|
||||||
|
.rdd()
|
||||||
|
|
||||||
|
.saveAsTextFile(ENTITY_NODES_PATH, GzipCodec.class);
|
||||||
|
}
|
||||||
|
|
||||||
|
private static AbstractFunction1<Dataset<Row>, Dataset<EntityNode>> toEntityNode() {
|
||||||
|
return new AbstractFunction1<Dataset<Row>, Dataset<EntityNode>>() {
|
||||||
|
@Override
|
||||||
|
public Dataset<EntityNode> apply(Dataset<Row> d) {
|
||||||
|
return d.map((MapFunction<Row, EntityNode>) r -> {
|
||||||
|
|
||||||
|
final List<String> res = r.getList(r.fieldIndex("related_entity"));
|
||||||
|
final byte[] payload = r.getAs("payload");
|
||||||
|
return new EntityNode(r.getAs("id"), r.getAs("type"), new String(payload))
|
||||||
|
.setRelatedEntities(res
|
||||||
|
.stream()
|
||||||
|
.map(re -> new Tuple2<>(substringBefore(re, "@@"), substringAfter(re, "@@")))
|
||||||
|
.map(re -> new RelatedEntity(r.getAs("reltype"), r.getAs("subreltype"), r.getAs("relclass"), re._1(), re._2()))
|
||||||
|
.limit(LIMIT)
|
||||||
|
.collect(Collectors.toList()));
|
||||||
|
|
||||||
|
}, bean(EntityNode.class));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String getJoinEntitiesSQL(String hiveDbName) {
|
||||||
|
return String.format(
|
||||||
|
"SELECT " +
|
||||||
|
"E_s.id AS id, " +
|
||||||
|
"E_s.type AS type, " +
|
||||||
|
"E_s.payload AS payload, " +
|
||||||
|
"r.reltype AS reltype, r.subreltype AS subreltype, r.relclass AS relclass, " +
|
||||||
|
"collect_list(concat(E_t.type, '@@', E_t.payload)) AS related_entity " +
|
||||||
|
"FROM %s.entities " + "" /*"TABLESAMPLE(0.1 PERCENT) "*/ + "E_s " +
|
||||||
|
"LEFT JOIN %s.relation r ON (r.source = E_s.id) " +
|
||||||
|
"JOIN %s.entities E_t ON (E_t.id = r.target) \n" +
|
||||||
|
"GROUP BY E_s.id, E_s.type, E_s.payload, r.reltype, r.subreltype, r.relclass", hiveDbName, hiveDbName, hiveDbName);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,5 @@
|
||||||
|
[
|
||||||
|
{"paramName":"mt", "paramLongName":"master", "paramDescription": "should be local or yarn", "paramRequired": true},
|
||||||
|
{"paramName":"h", "paramLongName":"hive_metastore_uris","paramDescription": "the hive metastore uris", "paramRequired": true},
|
||||||
|
{"paramName":"db", "paramLongName":"hive_db_name", "paramDescription": "the target hive database name", "paramRequired": true}
|
||||||
|
]
|
|
@ -0,0 +1,26 @@
|
||||||
|
<configuration>
|
||||||
|
<property>
|
||||||
|
<name>jobTracker</name>
|
||||||
|
<value>yarnRM</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>nameNode</name>
|
||||||
|
<value>hdfs://nameservice1</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>oozie.use.system.libpath</name>
|
||||||
|
<value>true</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>oozie.action.sharelib.for.spark</name>
|
||||||
|
<value>spark2</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hive_metastore_uris</name>
|
||||||
|
<value>thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083</value>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>hive_db_name</name>
|
||||||
|
<value>openaire</value>
|
||||||
|
</property>
|
||||||
|
</configuration>
|
|
@ -0,0 +1,46 @@
|
||||||
|
<workflow-app name="index_infospace_graph" xmlns="uri:oozie:workflow:0.5">
|
||||||
|
<parameters>
|
||||||
|
<property>
|
||||||
|
<name>hive_db_name</name>
|
||||||
|
<description>the target hive database name</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>sparkDriverMemory</name>
|
||||||
|
<description>memory for driver process</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>sparkExecutorMemory</name>
|
||||||
|
<description>memory for individual executor</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>sparkExecutorCores</name>
|
||||||
|
<description>number of cores used by single executor</description>
|
||||||
|
</property>
|
||||||
|
</parameters>
|
||||||
|
|
||||||
|
<start to="GraphJoinEntities"/>
|
||||||
|
|
||||||
|
<kill name="Kill">
|
||||||
|
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||||
|
</kill>
|
||||||
|
|
||||||
|
<action name="GraphJoinEntities">
|
||||||
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
|
<job-tracker>${jobTracker}</job-tracker>
|
||||||
|
<name-node>${nameNode}</name-node>
|
||||||
|
<master>yarn-cluster</master>
|
||||||
|
<mode>cluster</mode>
|
||||||
|
<name>GraphIndexing</name>
|
||||||
|
<class>eu.dnetlib.dhp.graph.SparkGraphIndexingJob</class>
|
||||||
|
<jar>dhp-graph-provision-${projectVersion}.jar</jar>
|
||||||
|
<spark-opts>--executor-memory ${sparkExecutorMemory} --executor-cores ${sparkExecutorCores} --driver-memory=${sparkDriverMemory} --conf spark.extraListeners="com.cloudera.spark.lineage.NavigatorAppListener" --conf spark.sql.queryExecutionListeners="com.cloudera.spark.lineage.NavigatorQueryListener" --conf spark.sql.warehouse.dir="/user/hive/warehouse"</spark-opts>
|
||||||
|
<arg>-mt</arg> <arg>yarn-cluster</arg>
|
||||||
|
<arg>--hive_db_name</arg><arg>${hive_db_name}</arg>
|
||||||
|
<arg>--hive_metastore_uris</arg><arg>${hive_metastore_uris}</arg>
|
||||||
|
</spark>
|
||||||
|
<ok to="End"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<end name="End"/>
|
||||||
|
</workflow-app>
|
Loading…
Reference in New Issue