forked from D-Net/dnet-hadoop
mergin with branch beta
This commit is contained in:
commit
74f801b689
|
@ -67,6 +67,7 @@ public class AuthorMerger {
|
||||||
a -> a
|
a -> a
|
||||||
.getPid()
|
.getPid()
|
||||||
.stream()
|
.stream()
|
||||||
|
.filter(Objects::nonNull)
|
||||||
.map(p -> new Tuple2<>(pidToComparableString(p), a)))
|
.map(p -> new Tuple2<>(pidToComparableString(p), a)))
|
||||||
.collect(Collectors.toMap(Tuple2::_1, Tuple2::_2, (x1, x2) -> x1));
|
.collect(Collectors.toMap(Tuple2::_1, Tuple2::_2, (x1, x2) -> x1));
|
||||||
|
|
||||||
|
@ -78,6 +79,7 @@ public class AuthorMerger {
|
||||||
a -> a
|
a -> a
|
||||||
.getPid()
|
.getPid()
|
||||||
.stream()
|
.stream()
|
||||||
|
.filter(Objects::nonNull)
|
||||||
.filter(p -> !basePidAuthorMap.containsKey(pidToComparableString(p)))
|
.filter(p -> !basePidAuthorMap.containsKey(pidToComparableString(p)))
|
||||||
.map(p -> new Tuple2<>(p, a)))
|
.map(p -> new Tuple2<>(p, a)))
|
||||||
.collect(Collectors.toList());
|
.collect(Collectors.toList());
|
||||||
|
@ -150,7 +152,7 @@ public class AuthorMerger {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static boolean hasPid(Author a) {
|
private static boolean hasPid(Author a) {
|
||||||
if (a == null || a.getPid() == null || a.getPid().size() == 0)
|
if (a == null || a.getPid() == null || a.getPid().isEmpty())
|
||||||
return false;
|
return false;
|
||||||
return a.getPid().stream().anyMatch(p -> p != null && StringUtils.isNotBlank(p.getValue()));
|
return a.getPid().stream().anyMatch(p -> p != null && StringUtils.isNotBlank(p.getValue()));
|
||||||
}
|
}
|
||||||
|
@ -159,7 +161,10 @@ public class AuthorMerger {
|
||||||
if (StringUtils.isNotBlank(author.getSurname())) {
|
if (StringUtils.isNotBlank(author.getSurname())) {
|
||||||
return new Person(author.getSurname() + ", " + author.getName(), false);
|
return new Person(author.getSurname() + ", " + author.getName(), false);
|
||||||
} else {
|
} else {
|
||||||
|
if (StringUtils.isNotBlank(author.getFullname()))
|
||||||
return new Person(author.getFullname(), false);
|
return new Person(author.getFullname(), false);
|
||||||
|
else
|
||||||
|
return new Person("", false);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -98,7 +98,7 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
||||||
|
|
||||||
Result r = (Result) value;
|
Result r = (Result) value;
|
||||||
|
|
||||||
if (Objects.nonNull(r.getTitle()) && r.getTitle().isEmpty()) {
|
if (Objects.isNull(r.getTitle()) || r.getTitle().isEmpty()) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -532,11 +532,11 @@ object DataciteToOAFTransformation {
|
||||||
JField("awardUri", JString(awardUri)) <- fundingReferences
|
JField("awardUri", JString(awardUri)) <- fundingReferences
|
||||||
} yield awardUri
|
} yield awardUri
|
||||||
|
|
||||||
|
result.setId(IdentifierFactory.createIdentifier(result))
|
||||||
var relations: List[Relation] = awardUris.flatMap(a => get_projectRelation(a, result.getId)).filter(r => r != null)
|
var relations: List[Relation] = awardUris.flatMap(a => get_projectRelation(a, result.getId)).filter(r => r != null)
|
||||||
|
|
||||||
|
|
||||||
fix_figshare(result)
|
fix_figshare(result)
|
||||||
result.setId(IdentifierFactory.createIdentifier(result))
|
|
||||||
if (result.getId == null)
|
if (result.getId == null)
|
||||||
return List()
|
return List()
|
||||||
|
|
||||||
|
|
File diff suppressed because one or more lines are too long
|
@ -38,7 +38,8 @@ import scala.Tuple2;
|
||||||
/**
|
/**
|
||||||
* Groups the graph content by entity identifier to ensure ID uniqueness
|
* Groups the graph content by entity identifier to ensure ID uniqueness
|
||||||
*/
|
*/
|
||||||
public class GroupEntitiesSparkJob {
|
public class
|
||||||
|
GroupEntitiesSparkJob {
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(GroupEntitiesSparkJob.class);
|
private static final Logger log = LoggerFactory.getLogger(GroupEntitiesSparkJob.class);
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
||||||
|
import eu.dnetlib.dhp.common.HdfsSupport;
|
||||||
import eu.dnetlib.dhp.schema.common.EntityType;
|
import eu.dnetlib.dhp.schema.common.EntityType;
|
||||||
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
import eu.dnetlib.dhp.schema.common.ModelSupport;
|
||||||
import eu.dnetlib.dhp.schema.oaf.*;
|
import eu.dnetlib.dhp.schema.oaf.*;
|
||||||
|
@ -77,17 +78,22 @@ public class SparkUpdateEntity extends AbstractSparkAction {
|
||||||
(type, clazz) -> {
|
(type, clazz) -> {
|
||||||
final String outputPath = dedupGraphPath + "/" + type;
|
final String outputPath = dedupGraphPath + "/" + type;
|
||||||
removeOutputDir(spark, outputPath);
|
removeOutputDir(spark, outputPath);
|
||||||
|
final String ip = DedupUtility.createEntityPath(graphBasePath, type.toString());
|
||||||
|
if (HdfsSupport.exists(ip, sc.hadoopConfiguration())) {
|
||||||
JavaRDD<String> sourceEntity = sc
|
JavaRDD<String> sourceEntity = sc
|
||||||
.textFile(DedupUtility.createEntityPath(graphBasePath, type.toString()));
|
.textFile(DedupUtility.createEntityPath(graphBasePath, type.toString()));
|
||||||
|
|
||||||
if (mergeRelExists(workingPath, type.toString())) {
|
if (mergeRelExists(workingPath, type.toString())) {
|
||||||
|
|
||||||
final String mergeRelPath = DedupUtility.createMergeRelPath(workingPath, "*", type.toString());
|
final String mergeRelPath = DedupUtility
|
||||||
|
.createMergeRelPath(workingPath, "*", type.toString());
|
||||||
final String dedupRecordPath = DedupUtility
|
final String dedupRecordPath = DedupUtility
|
||||||
.createDedupRecordPath(workingPath, "*", type.toString());
|
.createDedupRecordPath(workingPath, "*", type.toString());
|
||||||
|
|
||||||
final Dataset<Relation> rel = spark.read().load(mergeRelPath).as(Encoders.bean(Relation.class));
|
final Dataset<Relation> rel = spark
|
||||||
|
.read()
|
||||||
|
.load(mergeRelPath)
|
||||||
|
.as(Encoders.bean(Relation.class));
|
||||||
|
|
||||||
final JavaPairRDD<String, String> mergedIds = rel
|
final JavaPairRDD<String, String> mergedIds = rel
|
||||||
.where("relClass == 'merges'")
|
.where("relClass == 'merges'")
|
||||||
|
@ -119,6 +125,7 @@ public class SparkUpdateEntity extends AbstractSparkAction {
|
||||||
}
|
}
|
||||||
|
|
||||||
sourceEntity.saveAsTextFile(outputPath, GzipCodec.class);
|
sourceEntity.saveAsTextFile(outputPath, GzipCodec.class);
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,42 @@
|
||||||
|
package eu.dnetlib.dhp.sx.graph
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper
|
||||||
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.{Oaf, OtherResearchProduct, Publication, Result, Software, Dataset => OafDataset}
|
||||||
|
import org.apache.commons.io.IOUtils
|
||||||
|
import org.apache.hadoop.io.compress.GzipCodec
|
||||||
|
import org.apache.spark.SparkConf
|
||||||
|
import org.apache.spark.sql.{Encoder, Encoders, SparkSession}
|
||||||
|
import org.slf4j.{Logger, LoggerFactory}
|
||||||
|
|
||||||
|
object SparkConvertDatasetToJsonRDD {
|
||||||
|
|
||||||
|
|
||||||
|
def main(args: Array[String]): Unit = {
|
||||||
|
val log: Logger = LoggerFactory.getLogger(getClass)
|
||||||
|
val conf: SparkConf = new SparkConf()
|
||||||
|
val parser = new ArgumentApplicationParser(IOUtils.toString(getClass.getResourceAsStream("/eu/dnetlib/dhp/sx/graph/convert_dataset_json_params.json")))
|
||||||
|
parser.parseArgument(args)
|
||||||
|
val spark: SparkSession =
|
||||||
|
SparkSession
|
||||||
|
.builder()
|
||||||
|
.config(conf)
|
||||||
|
.appName(getClass.getSimpleName)
|
||||||
|
.master(parser.get("master")).getOrCreate()
|
||||||
|
|
||||||
|
val sourcePath = parser.get("sourcePath")
|
||||||
|
log.info(s"sourcePath -> $sourcePath")
|
||||||
|
val targetPath = parser.get("targetPath")
|
||||||
|
log.info(s"targetPath -> $targetPath")
|
||||||
|
|
||||||
|
val resultObject = List("publication","dataset","software", "otherResearchProduct")
|
||||||
|
val mapper = new ObjectMapper()
|
||||||
|
implicit val oafEncoder: Encoder[Result] = Encoders.kryo(classOf[Result])
|
||||||
|
|
||||||
|
|
||||||
|
resultObject.foreach{item =>
|
||||||
|
spark.read.load(s"$sourcePath/$item").as[Result].map(r=> mapper.writeValueAsString(r))(Encoders.STRING).rdd.saveAsTextFile(s"$targetPath/${item.toLowerCase}", classOf[GzipCodec])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -0,0 +1,67 @@
|
||||||
|
package eu.dnetlib.dhp.sx.graph
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper
|
||||||
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||||
|
import eu.dnetlib.dhp.schema.oaf.{OtherResearchProduct, Publication, Relation, Result, Software, Dataset => OafDataset}
|
||||||
|
import org.apache.commons.io.IOUtils
|
||||||
|
import org.apache.spark.SparkConf
|
||||||
|
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
|
||||||
|
import org.slf4j.{Logger, LoggerFactory}
|
||||||
|
object SparkConvertRDDtoDataset {
|
||||||
|
|
||||||
|
def main(args: Array[String]): Unit = {
|
||||||
|
|
||||||
|
|
||||||
|
val log: Logger = LoggerFactory.getLogger(getClass)
|
||||||
|
val conf: SparkConf = new SparkConf()
|
||||||
|
val parser = new ArgumentApplicationParser(IOUtils.toString(getClass.getResourceAsStream("/eu/dnetlib/dhp/sx/graph/convert_dataset_json_params.json")))
|
||||||
|
parser.parseArgument(args)
|
||||||
|
val spark: SparkSession =
|
||||||
|
SparkSession
|
||||||
|
.builder()
|
||||||
|
.config(conf)
|
||||||
|
.appName(getClass.getSimpleName)
|
||||||
|
.master(parser.get("master")).getOrCreate()
|
||||||
|
|
||||||
|
val sourcePath = parser.get("sourcePath")
|
||||||
|
log.info(s"sourcePath -> $sourcePath")
|
||||||
|
val t = parser.get("targetPath")
|
||||||
|
log.info(s"targetPath -> $t")
|
||||||
|
|
||||||
|
val entityPath = s"$t/entities"
|
||||||
|
val relPath = s"$t/relation"
|
||||||
|
val mapper = new ObjectMapper()
|
||||||
|
implicit val datasetEncoder: Encoder[OafDataset] = Encoders.kryo(classOf[OafDataset])
|
||||||
|
implicit val publicationEncoder: Encoder[Publication] = Encoders.kryo(classOf[Publication])
|
||||||
|
implicit val relationEncoder: Encoder[Relation] = Encoders.kryo(classOf[Relation])
|
||||||
|
implicit val orpEncoder: Encoder[OtherResearchProduct] = Encoders.kryo(classOf[OtherResearchProduct])
|
||||||
|
implicit val softwareEncoder: Encoder[Software] = Encoders.kryo(classOf[Software])
|
||||||
|
|
||||||
|
|
||||||
|
log.info("Converting dataset")
|
||||||
|
val rddDataset =spark.sparkContext.textFile(s"$sourcePath/dataset").map(s => mapper.readValue(s, classOf[OafDataset]))
|
||||||
|
spark.createDataset(rddDataset).as[OafDataset].write.mode(SaveMode.Overwrite).save(s"$entityPath/dataset")
|
||||||
|
|
||||||
|
|
||||||
|
log.info("Converting publication")
|
||||||
|
val rddPublication =spark.sparkContext.textFile(s"$sourcePath/publication").map(s => mapper.readValue(s, classOf[Publication]))
|
||||||
|
spark.createDataset(rddPublication).as[Publication].write.mode(SaveMode.Overwrite).save(s"$entityPath/publication")
|
||||||
|
|
||||||
|
log.info("Converting software")
|
||||||
|
val rddSoftware =spark.sparkContext.textFile(s"$sourcePath/software").map(s => mapper.readValue(s, classOf[Software]))
|
||||||
|
spark.createDataset(rddSoftware).as[Software].write.mode(SaveMode.Overwrite).save(s"$entityPath/software")
|
||||||
|
|
||||||
|
log.info("Converting otherresearchproduct")
|
||||||
|
val rddOtherResearchProduct =spark.sparkContext.textFile(s"$sourcePath/otherresearchproduct").map(s => mapper.readValue(s, classOf[OtherResearchProduct]))
|
||||||
|
spark.createDataset(rddOtherResearchProduct).as[OtherResearchProduct].write.mode(SaveMode.Overwrite).save(s"$entityPath/otherresearchproduct")
|
||||||
|
|
||||||
|
|
||||||
|
log.info("Converting Relation")
|
||||||
|
|
||||||
|
|
||||||
|
val rddRelation =spark.sparkContext.textFile(s"$sourcePath/relation").map(s => mapper.readValue(s, classOf[Relation]))
|
||||||
|
spark.createDataset(rddRelation).as[Relation].write.mode(SaveMode.Overwrite).save(s"$relPath")
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
|
@ -70,7 +70,7 @@ object SparkCreateInputGraph {
|
||||||
|
|
||||||
resultObject.foreach { r =>
|
resultObject.foreach { r =>
|
||||||
log.info(s"Make ${r._1} unique")
|
log.info(s"Make ${r._1} unique")
|
||||||
makeDatasetUnique(s"$targetPath/extracted/${r._1}",s"$targetPath/dedup/${r._1}",spark, r._2)
|
makeDatasetUnique(s"$targetPath/extracted/${r._1}",s"$targetPath/preprocess/${r._1}",spark, r._2)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -42,6 +42,7 @@ object SparkCreateScholix {
|
||||||
|
|
||||||
|
|
||||||
val relationDS: Dataset[(String, Relation)] = spark.read.load(relationPath).as[Relation]
|
val relationDS: Dataset[(String, Relation)] = spark.read.load(relationPath).as[Relation]
|
||||||
|
.filter(r => (r.getDataInfo== null || r.getDataInfo.getDeletedbyinference == false) && !r.getRelClass.toLowerCase.contains("merge"))
|
||||||
.map(r => (r.getSource, r))(Encoders.tuple(Encoders.STRING, relEncoder))
|
.map(r => (r.getSource, r))(Encoders.tuple(Encoders.STRING, relEncoder))
|
||||||
|
|
||||||
val summaryDS: Dataset[(String, ScholixSummary)] = spark.read.load(summaryPath).as[ScholixSummary]
|
val summaryDS: Dataset[(String, ScholixSummary)] = spark.read.load(summaryPath).as[ScholixSummary]
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
package eu.dnetlib.dhp.sx.graph
|
package eu.dnetlib.dhp.sx.graph
|
||||||
|
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||||
import eu.dnetlib.dhp.schema.oaf.Result
|
import eu.dnetlib.dhp.schema.oaf.{Oaf, Result}
|
||||||
import eu.dnetlib.dhp.schema.sx.summary.ScholixSummary
|
import eu.dnetlib.dhp.schema.sx.summary.ScholixSummary
|
||||||
import eu.dnetlib.dhp.sx.graph.scholix.ScholixUtils
|
import eu.dnetlib.dhp.sx.graph.scholix.ScholixUtils
|
||||||
import org.apache.commons.io.IOUtils
|
import org.apache.commons.io.IOUtils
|
||||||
|
@ -29,11 +29,12 @@ object SparkCreateSummaryObject {
|
||||||
log.info(s"targetPath -> $targetPath")
|
log.info(s"targetPath -> $targetPath")
|
||||||
|
|
||||||
implicit val resultEncoder:Encoder[Result] = Encoders.kryo[Result]
|
implicit val resultEncoder:Encoder[Result] = Encoders.kryo[Result]
|
||||||
|
implicit val oafEncoder:Encoder[Oaf] = Encoders.kryo[Oaf]
|
||||||
|
|
||||||
implicit val summaryEncoder:Encoder[ScholixSummary] = Encoders.kryo[ScholixSummary]
|
implicit val summaryEncoder:Encoder[ScholixSummary] = Encoders.kryo[ScholixSummary]
|
||||||
|
|
||||||
|
|
||||||
val ds:Dataset[Result] = spark.read.load(s"$sourcePath/*").as[Result]
|
val ds:Dataset[Result] = spark.read.load(s"$sourcePath/*").as[Result].filter(r=>r.getDataInfo== null || r.getDataInfo.getDeletedbyinference== false)
|
||||||
|
|
||||||
ds.repartition(6000).map(r => ScholixUtils.resultToSummary(r)).filter(s => s!= null).write.mode(SaveMode.Overwrite).save(targetPath)
|
ds.repartition(6000).map(r => ScholixUtils.resultToSummary(r)).filter(s => s!= null).write.mode(SaveMode.Overwrite).save(targetPath)
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,17 @@
|
||||||
package eu.dnetlib.dhp.sx.graph
|
package eu.dnetlib.dhp.sx.graph
|
||||||
|
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||||
import eu.dnetlib.dhp.schema.oaf.{Relation, Result}
|
import eu.dnetlib.dhp.schema.oaf.{Relation, Result}
|
||||||
import org.apache.commons.io.IOUtils
|
import org.apache.commons.io.IOUtils
|
||||||
|
import org.apache.hadoop.io.compress.GzipCodec
|
||||||
import org.apache.spark.SparkConf
|
import org.apache.spark.SparkConf
|
||||||
|
import org.apache.spark.rdd.RDD
|
||||||
import org.apache.spark.sql._
|
import org.apache.spark.sql._
|
||||||
|
import org.json4s
|
||||||
|
import org.json4s.DefaultFormats
|
||||||
|
import org.json4s.JsonAST.{JField, JObject, JString}
|
||||||
|
import org.json4s.jackson.JsonMethods.parse
|
||||||
import org.slf4j.{Logger, LoggerFactory}
|
import org.slf4j.{Logger, LoggerFactory}
|
||||||
|
|
||||||
import scala.collection.JavaConverters._
|
import scala.collection.JavaConverters._
|
||||||
|
@ -25,60 +32,109 @@ object SparkResolveRelation {
|
||||||
val relationPath = parser.get("relationPath")
|
val relationPath = parser.get("relationPath")
|
||||||
log.info(s"sourcePath -> $relationPath")
|
log.info(s"sourcePath -> $relationPath")
|
||||||
val entityPath = parser.get("entityPath")
|
val entityPath = parser.get("entityPath")
|
||||||
log.info(s"targetPath -> $entityPath")
|
log.info(s"entityPath -> $entityPath")
|
||||||
val workingPath = parser.get("workingPath")
|
val workingPath = parser.get("workingPath")
|
||||||
log.info(s"workingPath -> $workingPath")
|
log.info(s"workingPath -> $workingPath")
|
||||||
|
|
||||||
|
|
||||||
implicit val oafEncoder: Encoder[Result] = Encoders.kryo(classOf[Result])
|
|
||||||
implicit val relEncoder: Encoder[Relation] = Encoders.kryo(classOf[Relation])
|
implicit val relEncoder: Encoder[Relation] = Encoders.kryo(classOf[Relation])
|
||||||
import spark.implicits._
|
import spark.implicits._
|
||||||
val entities:Dataset[Result] = spark.read.load(s"$entityPath/*").as[Result]
|
|
||||||
|
|
||||||
entities.flatMap(e => e.getPid.asScala
|
|
||||||
.map(p =>
|
|
||||||
convertPidToDNETIdentifier(p.getValue, p.getQualifier.getClassid))
|
|
||||||
.filter(s => s!= null)
|
|
||||||
.map(s => (s,e.getId))
|
|
||||||
).groupByKey(_._1)
|
|
||||||
.reduceGroups((x,y) => if (x._2.startsWith("50|doi") || x._2.startsWith("50|pmid")) x else y)
|
|
||||||
.map(s =>s._2)
|
|
||||||
.write
|
|
||||||
.mode(SaveMode.Overwrite)
|
|
||||||
.save(s"$workingPath/resolvedPid")
|
|
||||||
|
|
||||||
val rPid:Dataset[(String,String)] = spark.read.load(s"$workingPath/resolvedPid").as[(String,String)]
|
extractPidResolvedTableFromJsonRDD(spark, entityPath, workingPath)
|
||||||
|
|
||||||
|
val mappper = new ObjectMapper()
|
||||||
|
|
||||||
|
val rPid:Dataset[(String,String)] = spark.read.load(s"$workingPath/relationResolvedPid").as[(String,String)]
|
||||||
|
|
||||||
val relationDs:Dataset[(String,Relation)] = spark.read.load(relationPath).as[Relation].map(r => (r.getSource.toLowerCase, r))(Encoders.tuple(Encoders.STRING, relEncoder))
|
val relationDs:Dataset[(String,Relation)] = spark.read.load(relationPath).as[Relation].map(r => (r.getSource.toLowerCase, r))(Encoders.tuple(Encoders.STRING, relEncoder))
|
||||||
|
|
||||||
relationDs.joinWith(rPid, relationDs("_1").equalTo(rPid("_1")), "left").map{
|
relationDs.joinWith(rPid, relationDs("_1").equalTo(rPid("_2")), "left").map{
|
||||||
m =>
|
m =>
|
||||||
val sourceResolved = m._2
|
val sourceResolved = m._2
|
||||||
val currentRelation = m._1._2
|
val currentRelation = m._1._2
|
||||||
if (sourceResolved!=null && sourceResolved._2.nonEmpty)
|
if (sourceResolved!=null && sourceResolved._1!=null && sourceResolved._1.nonEmpty)
|
||||||
currentRelation.setSource(sourceResolved._2)
|
currentRelation.setSource(sourceResolved._1)
|
||||||
currentRelation
|
currentRelation
|
||||||
}.write
|
}.write
|
||||||
.mode(SaveMode.Overwrite)
|
.mode(SaveMode.Overwrite)
|
||||||
.save(s"$workingPath/resolvedSource")
|
.save(s"$workingPath/relationResolvedSource")
|
||||||
|
|
||||||
|
|
||||||
val relationSourceResolved:Dataset[(String,Relation)] = spark.read.load(s"$workingPath/resolvedSource").as[Relation].map(r => (r.getTarget.toLowerCase, r))(Encoders.tuple(Encoders.STRING, relEncoder))
|
val relationSourceResolved:Dataset[(String,Relation)] = spark.read.load(s"$workingPath/relationResolvedSource").as[Relation].map(r => (r.getTarget.toLowerCase, r))(Encoders.tuple(Encoders.STRING, relEncoder))
|
||||||
relationSourceResolved.joinWith(rPid, relationSourceResolved("_1").equalTo(rPid("_1")), "left").map{
|
relationSourceResolved.joinWith(rPid, relationSourceResolved("_1").equalTo(rPid("_2")), "left").map{
|
||||||
m =>
|
m =>
|
||||||
val targetResolved = m._2
|
val targetResolved = m._2
|
||||||
val currentRelation = m._1._2
|
val currentRelation = m._1._2
|
||||||
if (targetResolved!=null && targetResolved._2.nonEmpty)
|
if (targetResolved!=null && targetResolved._1.nonEmpty)
|
||||||
currentRelation.setTarget(targetResolved._2)
|
currentRelation.setTarget(targetResolved._1)
|
||||||
currentRelation
|
currentRelation
|
||||||
}.filter(r => r.getSource.startsWith("50")&& r.getTarget.startsWith("50"))
|
}.filter(r => r.getSource.startsWith("50")&& r.getTarget.startsWith("50"))
|
||||||
.write
|
.write
|
||||||
.mode(SaveMode.Overwrite)
|
.mode(SaveMode.Overwrite)
|
||||||
.save(s"$workingPath/resolvedRelation")
|
.save(s"$workingPath/relation_resolved")
|
||||||
|
|
||||||
|
spark.read.load(s"$workingPath/relation_resolved").as[Relation]
|
||||||
|
.map(r => mappper.writeValueAsString(r))
|
||||||
|
.rdd.saveAsTextFile(s"$workingPath/relation", classOf[GzipCodec])
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
private def extractPidsFromRecord(input:String):(String,List[(String,String)]) = {
|
||||||
|
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
|
||||||
|
lazy val json: json4s.JValue = parse(input)
|
||||||
|
val id:String = (json \ "id").extract[String]
|
||||||
|
val result: List[(String,String)] = for {
|
||||||
|
JObject(pids) <- json \ "pid"
|
||||||
|
JField("value", JString(pidValue)) <- pids
|
||||||
|
JField("qualifier", JObject(qualifier)) <- pids
|
||||||
|
JField("classname", JString(pidType)) <- qualifier
|
||||||
|
} yield (pidValue, pidType)
|
||||||
|
(id,result)
|
||||||
|
}
|
||||||
|
|
||||||
|
private def extractPidResolvedTableFromJsonRDD(spark: SparkSession, entityPath: String, workingPath: String) = {
|
||||||
|
import spark.implicits._
|
||||||
|
|
||||||
|
val d: RDD[(String,String)] = spark.sparkContext.textFile(s"$entityPath/*")
|
||||||
|
.map(i => extractPidsFromRecord(i))
|
||||||
|
.filter(s => s != null && s._1!= null && s._2!=null && s._2.nonEmpty)
|
||||||
|
.flatMap{ p =>
|
||||||
|
p._2.map(pid =>
|
||||||
|
(p._1, convertPidToDNETIdentifier(pid._1, pid._2))
|
||||||
|
)
|
||||||
|
}.filter(r =>r._1 != null || r._2 != null)
|
||||||
|
|
||||||
|
spark.createDataset(d)
|
||||||
|
.groupByKey(_._2)
|
||||||
|
.reduceGroups((x, y) => if (x._1.startsWith("50|doi") || x._1.startsWith("50|pmid")) x else y)
|
||||||
|
.map(s => s._2)
|
||||||
|
.write
|
||||||
|
.mode(SaveMode.Overwrite)
|
||||||
|
.save(s"$workingPath/relationResolvedPid")
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/*
|
||||||
|
This method should be used once we finally convert everythings in Kryo dataset
|
||||||
|
instead of using rdd of json
|
||||||
|
*/
|
||||||
|
private def extractPidResolvedTableFromKryo(spark: SparkSession, entityPath: String, workingPath: String) = {
|
||||||
|
import spark.implicits._
|
||||||
|
implicit val oafEncoder: Encoder[Result] = Encoders.kryo(classOf[Result])
|
||||||
|
val entities: Dataset[Result] = spark.read.load(s"$entityPath/*").as[Result]
|
||||||
|
entities.flatMap(e => e.getPid.asScala
|
||||||
|
.map(p =>
|
||||||
|
convertPidToDNETIdentifier(p.getValue, p.getQualifier.getClassid))
|
||||||
|
.filter(s => s != null)
|
||||||
|
.map(s => (s, e.getId))
|
||||||
|
).groupByKey(_._1)
|
||||||
|
.reduceGroups((x, y) => if (x._2.startsWith("50|doi") || x._2.startsWith("50|pmid")) x else y)
|
||||||
|
.map(s => s._2)
|
||||||
|
.write
|
||||||
|
.mode(SaveMode.Overwrite)
|
||||||
|
.save(s"$workingPath/relationResolvedPid")
|
||||||
|
}
|
||||||
|
|
||||||
def convertPidToDNETIdentifier(pid:String, pidType: String):String = {
|
def convertPidToDNETIdentifier(pid:String, pidType: String):String = {
|
||||||
if (pid==null || pid.isEmpty || pidType== null || pidType.isEmpty)
|
if (pid==null || pid.isEmpty || pidType== null || pidType.isEmpty)
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
[
|
||||||
|
{"paramName":"mt", "paramLongName":"master", "paramDescription": "should be local or yarn", "paramRequired": true},
|
||||||
|
{"paramName":"s", "paramLongName":"sourcePath", "paramDescription": "the source Path", "paramRequired": true},
|
||||||
|
{"paramName":"t", "paramLongName":"targetPath", "paramDescription": "the path of the raw graph", "paramRequired": true}
|
||||||
|
]
|
|
@ -0,0 +1,85 @@
|
||||||
|
<workflow-app name="Create Raw Graph Step 1: extract Entities in raw graph" xmlns="uri:oozie:workflow:0.5">
|
||||||
|
<parameters>
|
||||||
|
<property>
|
||||||
|
<name>sourcePath</name>
|
||||||
|
<description>the working dir base path</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>targetPath</name>
|
||||||
|
<description>the graph Raw base path</description>
|
||||||
|
</property>
|
||||||
|
</parameters>
|
||||||
|
|
||||||
|
<start to="ExtractEntities"/>
|
||||||
|
|
||||||
|
<kill name="Kill">
|
||||||
|
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||||
|
</kill>
|
||||||
|
|
||||||
|
<action name="ExtractEntities">
|
||||||
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
|
<master>yarn</master>
|
||||||
|
<mode>cluster</mode>
|
||||||
|
<name>Extract entities in raw graph</name>
|
||||||
|
<class>eu.dnetlib.dhp.sx.graph.SparkCreateInputGraph</class>
|
||||||
|
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
||||||
|
<spark-opts>
|
||||||
|
--executor-memory=${sparkExecutorMemory}
|
||||||
|
--executor-cores=${sparkExecutorCores}
|
||||||
|
--driver-memory=${sparkDriverMemory}
|
||||||
|
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||||
|
--conf spark.sql.shuffle.partitions=2000
|
||||||
|
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||||
|
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||||
|
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||||
|
</spark-opts>
|
||||||
|
<arg>--master</arg><arg>yarn</arg>
|
||||||
|
<arg>--sourcePath</arg><arg>${sourcePath}</arg>
|
||||||
|
<arg>--targetPath</arg><arg>${targetPath}</arg>
|
||||||
|
</spark>
|
||||||
|
<ok to="DropDedupPath"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<action name="DropDedupPath">
|
||||||
|
<fs>
|
||||||
|
<delete path='${targetPath}/dedup'/>
|
||||||
|
<mkdir path='${targetPath}/dedup/'/>
|
||||||
|
</fs>
|
||||||
|
<ok to="GenerateInputGraphForDedup"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
<action name="GenerateInputGraphForDedup">
|
||||||
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
|
<master>yarn</master>
|
||||||
|
<mode>cluster</mode>
|
||||||
|
<name>Generate Input Graph for deduplication</name>
|
||||||
|
<class>eu.dnetlib.dhp.sx.graph.SparkConvertDatasetToJsonRDD</class>
|
||||||
|
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
||||||
|
<spark-opts>
|
||||||
|
--executor-memory=${sparkExecutorMemory}
|
||||||
|
--executor-cores=${sparkExecutorCores}
|
||||||
|
--driver-memory=${sparkDriverMemory}
|
||||||
|
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||||
|
--conf spark.sql.shuffle.partitions=3000
|
||||||
|
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||||
|
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||||
|
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||||
|
</spark-opts>
|
||||||
|
<arg>--master</arg><arg>yarn</arg>
|
||||||
|
<arg>--sourcePath</arg><arg>${targetPath}/preprocess</arg>
|
||||||
|
<arg>--targetPath</arg><arg>${targetPath}/dedup</arg>
|
||||||
|
</spark>
|
||||||
|
<ok to="End"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
<end name="End"/>
|
||||||
|
</workflow-app>
|
|
@ -1,4 +1,4 @@
|
||||||
<workflow-app name="Create Raw Graph Step 1: extract Entities in raw graph" xmlns="uri:oozie:workflow:0.5">
|
<workflow-app name="Create Scholix final Graph" xmlns="uri:oozie:workflow:0.5">
|
||||||
<parameters>
|
<parameters>
|
||||||
<property>
|
<property>
|
||||||
<name>sourcePath</name>
|
<name>sourcePath</name>
|
||||||
|
@ -6,48 +6,22 @@
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>targetPath</name>
|
<name>targetPath</name>
|
||||||
<description>the graph Raw base path</description>
|
<description>the final graph path</description>
|
||||||
</property>
|
</property>
|
||||||
</parameters>
|
</parameters>
|
||||||
|
|
||||||
<start to="ExtractEntities"/>
|
<start to="ImportDatasetEntities"/>
|
||||||
|
|
||||||
<kill name="Kill">
|
<kill name="Kill">
|
||||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||||
</kill>
|
</kill>
|
||||||
|
|
||||||
<action name="ExtractEntities">
|
<action name="ImportDatasetEntities">
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
<master>yarn</master>
|
<master>yarn</master>
|
||||||
<mode>cluster</mode>
|
<mode>cluster</mode>
|
||||||
<name>Extract entities in raw graph</name>
|
<name>Import JSONRDD to Dataset kryo</name>
|
||||||
<class>eu.dnetlib.dhp.sx.graph.SparkCreateInputGraph</class>
|
<class>eu.dnetlib.dhp.sx.graph.SparkConvertRDDtoDataset</class>
|
||||||
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts>
|
|
||||||
--executor-memory=${sparkExecutorMemory}
|
|
||||||
--executor-cores=${sparkExecutorCores}
|
|
||||||
--driver-memory=${sparkDriverMemory}
|
|
||||||
--conf spark.extraListeners=${spark2ExtraListeners}
|
|
||||||
--conf spark.sql.shuffle.partitions=2000
|
|
||||||
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
|
||||||
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
|
||||||
</spark-opts>
|
|
||||||
<arg>--master</arg><arg>yarn</arg>
|
|
||||||
<arg>--sourcePath</arg><arg>${sourcePath}</arg>
|
|
||||||
<arg>--targetPath</arg><arg>${targetPath}</arg>
|
|
||||||
</spark>
|
|
||||||
<ok to="ResolveRelations"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
|
|
||||||
<action name="ResolveRelations">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<master>yarn</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Resolve Relations in raw graph</name>
|
|
||||||
<class>eu.dnetlib.dhp.sx.graph.SparkResolveRelation</class>
|
|
||||||
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
||||||
<spark-opts>
|
<spark-opts>
|
||||||
--executor-memory=${sparkExecutorMemory}
|
--executor-memory=${sparkExecutorMemory}
|
||||||
|
@ -60,9 +34,8 @@
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||||
</spark-opts>
|
</spark-opts>
|
||||||
<arg>--master</arg><arg>yarn</arg>
|
<arg>--master</arg><arg>yarn</arg>
|
||||||
<arg>--relationPath</arg><arg>${targetPath}/extracted/relation</arg>
|
<arg>--sourcePath</arg><arg>${sourcePath}</arg>
|
||||||
<arg>--workingPath</arg><arg>${targetPath}/resolved/</arg>
|
<arg>--targetPath</arg><arg>${targetPath}</arg>
|
||||||
<arg>--entityPath</arg><arg>${targetPath}/dedup</arg>
|
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="CreateSummaries"/>
|
<ok to="CreateSummaries"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
|
@ -87,7 +60,7 @@
|
||||||
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||||
</spark-opts>
|
</spark-opts>
|
||||||
<arg>--master</arg><arg>yarn</arg>
|
<arg>--master</arg><arg>yarn</arg>
|
||||||
<arg>--sourcePath</arg><arg>${targetPath}/dedup</arg>
|
<arg>--sourcePath</arg><arg>${targetPath}/entities</arg>
|
||||||
<arg>--targetPath</arg><arg>${targetPath}/provision/summaries</arg>
|
<arg>--targetPath</arg><arg>${targetPath}/provision/summaries</arg>
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="CreateScholix"/>
|
<ok to="CreateScholix"/>
|
||||||
|
@ -114,7 +87,7 @@
|
||||||
<arg>--master</arg><arg>yarn</arg>
|
<arg>--master</arg><arg>yarn</arg>
|
||||||
<arg>--summaryPath</arg><arg>${targetPath}/provision/summaries</arg>
|
<arg>--summaryPath</arg><arg>${targetPath}/provision/summaries</arg>
|
||||||
<arg>--targetPath</arg><arg>${targetPath}/provision/scholix</arg>
|
<arg>--targetPath</arg><arg>${targetPath}/provision/scholix</arg>
|
||||||
<arg>--relationPath</arg><arg>${targetPath}/resolved/resolvedRelation</arg>
|
<arg>--relationPath</arg><arg>${targetPath}/relation</arg>
|
||||||
|
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="DropJSONPath"/>
|
<ok to="DropJSONPath"/>
|
||||||
|
@ -182,9 +155,5 @@
|
||||||
<ok to="End"/>
|
<ok to="End"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
</action>
|
</action>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
<end name="End"/>
|
<end name="End"/>
|
||||||
</workflow-app>
|
</workflow-app>
|
|
@ -0,0 +1,62 @@
|
||||||
|
<workflow-app name="Resolve Relation" xmlns="uri:oozie:workflow:0.5">
|
||||||
|
<parameters>
|
||||||
|
<property>
|
||||||
|
<name>entityPath</name>
|
||||||
|
<description>the path of deduplicate Entities</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>relationPath</name>
|
||||||
|
<description>the path of relation unresolved</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>targetPath</name>
|
||||||
|
<description>the path of relation unresolved</description>
|
||||||
|
</property>
|
||||||
|
|
||||||
|
</parameters>
|
||||||
|
|
||||||
|
<start to="DropRelFolder"/>
|
||||||
|
|
||||||
|
<kill name="Kill">
|
||||||
|
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||||
|
</kill>
|
||||||
|
|
||||||
|
|
||||||
|
<action name="DropRelFolder">
|
||||||
|
<fs>
|
||||||
|
<delete path='${targetPath}/relation'/>
|
||||||
|
<delete path='${targetPath}/relation_resolved'/>
|
||||||
|
<delete path='${targetPath}/resolvedSource'/>
|
||||||
|
<delete path='${targetPath}/resolvedPid'/>
|
||||||
|
|
||||||
|
</fs>
|
||||||
|
<ok to="ResolveRelations"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
<action name="ResolveRelations">
|
||||||
|
<spark xmlns="uri:oozie:spark-action:0.2">
|
||||||
|
<master>yarn</master>
|
||||||
|
<mode>cluster</mode>
|
||||||
|
<name>Resolve Relations in raw graph</name>
|
||||||
|
<class>eu.dnetlib.dhp.sx.graph.SparkResolveRelation</class>
|
||||||
|
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
||||||
|
<spark-opts>
|
||||||
|
--executor-memory=${sparkExecutorMemory}
|
||||||
|
--executor-cores=${sparkExecutorCores}
|
||||||
|
--driver-memory=${sparkDriverMemory}
|
||||||
|
--conf spark.extraListeners=${spark2ExtraListeners}
|
||||||
|
--conf spark.sql.shuffle.partitions=3000
|
||||||
|
--conf spark.sql.queryExecutionListeners=${spark2SqlQueryExecutionListeners}
|
||||||
|
--conf spark.yarn.historyServer.address=${spark2YarnHistoryServerAddress}
|
||||||
|
--conf spark.eventLog.dir=${nameNode}${spark2EventLogDir}
|
||||||
|
</spark-opts>
|
||||||
|
<arg>--master</arg><arg>yarn</arg>
|
||||||
|
<arg>--relationPath</arg><arg>${relationPath}</arg>
|
||||||
|
<arg>--workingPath</arg><arg>${targetPath}</arg>
|
||||||
|
<arg>--entityPath</arg><arg>${entityPath}</arg>
|
||||||
|
</spark>
|
||||||
|
<ok to="End"/>
|
||||||
|
<error to="Kill"/>
|
||||||
|
</action>
|
||||||
|
<end name="End"/>
|
||||||
|
</workflow-app>
|
|
@ -1,120 +0,0 @@
|
||||||
<workflow-app name="Create Raw Graph Step 2: Map XML to OAF Entities" xmlns="uri:oozie:workflow:0.5">
|
|
||||||
<parameters>
|
|
||||||
<property>
|
|
||||||
<name>workingPath</name>
|
|
||||||
<description>the working path</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>sparkDriverMemory</name>
|
|
||||||
<description>memory for driver process</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>sparkExecutorMemory</name>
|
|
||||||
<description>memory for individual executor</description>
|
|
||||||
</property>
|
|
||||||
</parameters>
|
|
||||||
|
|
||||||
<start to="ExtractDLIPublication"/>
|
|
||||||
|
|
||||||
<kill name="Kill">
|
|
||||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
|
||||||
</kill>
|
|
||||||
|
|
||||||
<action name="ExtractDLIPublication">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<job-tracker>${jobTracker}</job-tracker>
|
|
||||||
<name-node>${nameNode}</name-node>
|
|
||||||
<master>yarn-cluster</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Extract DLI Entities (Publication)</name>
|
|
||||||
<class>eu.dnetlib.dhp.sx.graph.SparkSplitOafTODLIEntities</class>
|
|
||||||
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts>
|
|
||||||
--executor-memory ${sparkExecutorMemory}
|
|
||||||
--executor-cores=${sparkExecutorCores}
|
|
||||||
--driver-memory=${sparkDriverMemory}
|
|
||||||
--conf spark.sql.shuffle.partitions=5000
|
|
||||||
${sparkExtraOPT}
|
|
||||||
</spark-opts>
|
|
||||||
<arg>-mt</arg> <arg>yarn-cluster</arg>
|
|
||||||
<arg>--workingPath</arg><arg>${workingPath}</arg>
|
|
||||||
<arg>-e</arg><arg>publication</arg>
|
|
||||||
</spark>
|
|
||||||
<ok to="ExtractDLIDataset"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<action name="ExtractDLIDataset">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<job-tracker>${jobTracker}</job-tracker>
|
|
||||||
<name-node>${nameNode}</name-node>
|
|
||||||
<master>yarn-cluster</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Extract DLI Entities (Dataset)</name>
|
|
||||||
<class>eu.dnetlib.dhp.sx.graph.SparkSplitOafTODLIEntities</class>
|
|
||||||
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts>
|
|
||||||
--executor-memory ${sparkExecutorMemory}
|
|
||||||
--executor-cores=${sparkExecutorCores}
|
|
||||||
--driver-memory=${sparkDriverMemory}
|
|
||||||
--conf spark.sql.shuffle.partitions=5000
|
|
||||||
${sparkExtraOPT}
|
|
||||||
</spark-opts>
|
|
||||||
<arg>-mt</arg> <arg>yarn-cluster</arg>
|
|
||||||
<arg>--workingPath</arg><arg>${workingPath}</arg>
|
|
||||||
<arg>-e</arg><arg>dataset</arg>
|
|
||||||
</spark>
|
|
||||||
<ok to="ExtractDLIUnknown"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<action name="ExtractDLIUnknown">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<job-tracker>${jobTracker}</job-tracker>
|
|
||||||
<name-node>${nameNode}</name-node>
|
|
||||||
<master>yarn-cluster</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Extract DLI Entities (Unknown)</name>
|
|
||||||
<class>eu.dnetlib.dhp.sx.graph.SparkSplitOafTODLIEntities</class>
|
|
||||||
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts>
|
|
||||||
--executor-memory ${sparkExecutorMemory}
|
|
||||||
--executor-cores=${sparkExecutorCores}
|
|
||||||
--driver-memory=${sparkDriverMemory}
|
|
||||||
--conf spark.sql.shuffle.partitions=5000
|
|
||||||
${sparkExtraOPT}
|
|
||||||
</spark-opts>
|
|
||||||
<arg>-mt</arg> <arg>yarn-cluster</arg>
|
|
||||||
<arg>--workingPath</arg><arg>${workingPath}</arg>
|
|
||||||
<arg>-e</arg><arg>unknown</arg>
|
|
||||||
</spark>
|
|
||||||
<ok to="ExtractDLIRelation"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<action name="ExtractDLIRelation">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<job-tracker>${jobTracker}</job-tracker>
|
|
||||||
<name-node>${nameNode}</name-node>
|
|
||||||
<master>yarn-cluster</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Extract DLI Entities (Relation)</name>
|
|
||||||
<class>eu.dnetlib.dhp.sx.graph.SparkSplitOafTODLIEntities</class>
|
|
||||||
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts>
|
|
||||||
--executor-memory ${sparkExecutorMemory}
|
|
||||||
--executor-cores=${sparkExecutorCores}
|
|
||||||
--driver-memory=${sparkDriverMemory}
|
|
||||||
--conf spark.sql.shuffle.partitions=5000
|
|
||||||
${sparkExtraOPT}
|
|
||||||
</spark-opts>
|
|
||||||
<arg>-mt</arg> <arg>yarn-cluster</arg>
|
|
||||||
<arg>--workingPath</arg><arg>${workingPath}</arg>
|
|
||||||
<arg>-e</arg><arg>relation</arg>
|
|
||||||
</spark>
|
|
||||||
<ok to="End"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<end name="End"/>
|
|
||||||
</workflow-app>
|
|
|
@ -1,61 +0,0 @@
|
||||||
<workflow-app name="Create Raw Graph Final Step: Construct the Scholexplorer Raw Graph" xmlns="uri:oozie:workflow:0.5">
|
|
||||||
<parameters>
|
|
||||||
<property>
|
|
||||||
<name>sourcePath</name>
|
|
||||||
<description>the source path</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>targetPath</name>
|
|
||||||
<description>the source path</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>sparkDriverMemory</name>
|
|
||||||
<description>memory for driver process</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>sparkExecutorMemory</name>
|
|
||||||
<description>memory for individual executor</description>
|
|
||||||
</property>
|
|
||||||
<property>
|
|
||||||
<name>entity</name>
|
|
||||||
<description>the entity to be merged</description>
|
|
||||||
</property>
|
|
||||||
</parameters>
|
|
||||||
|
|
||||||
<start to="DeleteTargetPath"/>
|
|
||||||
|
|
||||||
<kill name="Kill">
|
|
||||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
|
||||||
</kill>
|
|
||||||
|
|
||||||
<action name="DeleteTargetPath">
|
|
||||||
<fs>
|
|
||||||
<mkdir path="${targetPath}"/>
|
|
||||||
|
|
||||||
<delete path='${targetPath}/${entity}'/>
|
|
||||||
</fs>
|
|
||||||
<ok to="MergeDLIEntities"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<action name="MergeDLIEntities">
|
|
||||||
<spark xmlns="uri:oozie:spark-action:0.2">
|
|
||||||
<job-tracker>${jobTracker}</job-tracker>
|
|
||||||
<name-node>${nameNode}</name-node>
|
|
||||||
<master>yarn-cluster</master>
|
|
||||||
<mode>cluster</mode>
|
|
||||||
<name>Merge ${entity}</name>
|
|
||||||
<class>eu.dnetlib.dhp.sx.graph.SparkScholexplorerCreateRawGraphJob</class>
|
|
||||||
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
|
||||||
<spark-opts> --executor-memory ${sparkExecutorMemory} --driver-memory=${sparkDriverMemory} ${sparkExtraOPT}</spark-opts>
|
|
||||||
<arg>-mt</arg> <arg>yarn-cluster</arg>
|
|
||||||
<arg>--sourcePath</arg><arg>${sourcePath}/${entity}</arg>
|
|
||||||
<arg>--targetPath</arg><arg>${targetPath}/${entity}</arg>
|
|
||||||
<arg>--entity</arg><arg>${entity}</arg>
|
|
||||||
</spark>
|
|
||||||
<ok to="End"/>
|
|
||||||
<error to="Kill"/>
|
|
||||||
</action>
|
|
||||||
|
|
||||||
<end name="End"/>
|
|
||||||
</workflow-app>
|
|
Loading…
Reference in New Issue