forked from D-Net/dnet-hadoop
[scholix] fixed OpenCitation dump procedure
This commit is contained in:
parent
51ad93e545
commit
2aa16d0432
|
@ -2,5 +2,6 @@
|
||||||
{"paramName":"mt", "paramLongName":"master", "paramDescription": "should be local or yarn", "paramRequired": true},
|
{"paramName":"mt", "paramLongName":"master", "paramDescription": "should be local or yarn", "paramRequired": true},
|
||||||
{"paramName":"r", "paramLongName":"relationPath", "paramDescription": "the relation resolved Path", "paramRequired": true},
|
{"paramName":"r", "paramLongName":"relationPath", "paramDescription": "the relation resolved Path", "paramRequired": true},
|
||||||
{"paramName":"s", "paramLongName":"summaryPath", "paramDescription": "the summary Path", "paramRequired": true},
|
{"paramName":"s", "paramLongName":"summaryPath", "paramDescription": "the summary Path", "paramRequired": true},
|
||||||
{"paramName":"t", "paramLongName":"targetPath", "paramDescription": "the target base path of the scholix", "paramRequired": true}
|
{"paramName":"t", "paramLongName":"targetPath", "paramDescription": "the target base path of the scholix", "paramRequired": true},
|
||||||
|
{"paramName":"dc", "paramLongName":"dumpCitations", "paramDescription": "should dump citation relations", "paramRequired": false}
|
||||||
]
|
]
|
|
@ -16,7 +16,11 @@
|
||||||
<name>maxNumberOfPid</name>
|
<name>maxNumberOfPid</name>
|
||||||
<description>filter relation with at least #maxNumberOfPid</description>
|
<description>filter relation with at least #maxNumberOfPid</description>
|
||||||
</property>
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>dumpCitations</name>
|
||||||
|
<value>false</value>
|
||||||
|
<description>should dump citation relations</description>
|
||||||
|
</property>
|
||||||
</parameters>
|
</parameters>
|
||||||
|
|
||||||
<start to="ImportDatasetEntities"/>
|
<start to="ImportDatasetEntities"/>
|
||||||
|
@ -98,6 +102,7 @@
|
||||||
<arg>--summaryPath</arg><arg>${targetPath}/provision/summaries</arg>
|
<arg>--summaryPath</arg><arg>${targetPath}/provision/summaries</arg>
|
||||||
<arg>--targetPath</arg><arg>${targetPath}/provision/scholix</arg>
|
<arg>--targetPath</arg><arg>${targetPath}/provision/scholix</arg>
|
||||||
<arg>--relationPath</arg><arg>${targetPath}/relation</arg>
|
<arg>--relationPath</arg><arg>${targetPath}/relation</arg>
|
||||||
|
<arg>--dumpCitations</arg><arg>${dumpCitations}</arg>
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="DropJSONPath"/>
|
<ok to="DropJSONPath"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
package eu.dnetlib.dhp.sx.graph
|
package eu.dnetlib.dhp.sx.graph
|
||||||
|
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper
|
import com.fasterxml.jackson.databind.ObjectMapper
|
||||||
|
import com.fasterxml.jackson.module.scala.DefaultScalaModule
|
||||||
|
import com.fasterxml.jackson.module.scala.experimental.ScalaObjectMapper
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||||
import eu.dnetlib.dhp.schema.common.ModelConstants
|
import eu.dnetlib.dhp.schema.common.ModelConstants
|
||||||
import eu.dnetlib.dhp.schema.oaf.{OtherResearchProduct, Publication, Relation, Result, Software, Dataset => OafDataset}
|
import eu.dnetlib.dhp.schema.oaf.{OtherResearchProduct, Publication, Relation, Result, Software, Dataset => OafDataset}
|
||||||
|
@ -10,7 +12,8 @@ import org.apache.spark.SparkConf
|
||||||
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
|
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
|
||||||
import org.slf4j.{Logger, LoggerFactory}
|
import org.slf4j.{Logger, LoggerFactory}
|
||||||
|
|
||||||
import scala.collection.JavaConverters._
|
import scala.reflect.ClassTag
|
||||||
|
import scala.util.Try
|
||||||
|
|
||||||
object SparkConvertRDDtoDataset {
|
object SparkConvertRDDtoDataset {
|
||||||
|
|
||||||
|
@ -37,11 +40,12 @@ object SparkConvertRDDtoDataset {
|
||||||
val t = parser.get("targetPath")
|
val t = parser.get("targetPath")
|
||||||
log.info(s"targetPath -> $t")
|
log.info(s"targetPath -> $t")
|
||||||
|
|
||||||
val filterRelation = parser.get("filterRelation")
|
val subRelTypeFilter = parser.get("filterRelation")
|
||||||
log.info(s"filterRelation -> $filterRelation")
|
log.info(s"filterRelation -> $subRelTypeFilter")
|
||||||
|
|
||||||
val entityPath = s"$t/entities"
|
val entityPath = s"$t/entities"
|
||||||
val relPath = s"$t/relation"
|
val relPath = s"$t/relation"
|
||||||
|
|
||||||
val mapper = new ObjectMapper()
|
val mapper = new ObjectMapper()
|
||||||
implicit val datasetEncoder: Encoder[OafDataset] = Encoders.kryo(classOf[OafDataset])
|
implicit val datasetEncoder: Encoder[OafDataset] = Encoders.kryo(classOf[OafDataset])
|
||||||
implicit val publicationEncoder: Encoder[Publication] = Encoders.kryo(classOf[Publication])
|
implicit val publicationEncoder: Encoder[Publication] = Encoders.kryo(classOf[Publication])
|
||||||
|
@ -100,7 +104,7 @@ object SparkConvertRDDtoDataset {
|
||||||
|
|
||||||
log.info("Converting Relation")
|
log.info("Converting Relation")
|
||||||
|
|
||||||
val relationSemanticFilter = List(
|
val relClassFilter = List(
|
||||||
ModelConstants.MERGES,
|
ModelConstants.MERGES,
|
||||||
ModelConstants.IS_MERGED_IN,
|
ModelConstants.IS_MERGED_IN,
|
||||||
ModelConstants.HAS_AMONG_TOP_N_SIMILAR_DOCS,
|
ModelConstants.HAS_AMONG_TOP_N_SIMILAR_DOCS,
|
||||||
|
@ -110,22 +114,56 @@ object SparkConvertRDDtoDataset {
|
||||||
val rddRelation = spark.sparkContext
|
val rddRelation = spark.sparkContext
|
||||||
.textFile(s"$sourcePath/relation")
|
.textFile(s"$sourcePath/relation")
|
||||||
.map(s => mapper.readValue(s, classOf[Relation]))
|
.map(s => mapper.readValue(s, classOf[Relation]))
|
||||||
.filter(r => r.getDataInfo != null && r.getDataInfo.getDeletedbyinference == false)
|
.filter(r => r.getDataInfo != null && !r.getDataInfo.getDeletedbyinference)
|
||||||
.filter(r => r.getSource.startsWith("50") && r.getTarget.startsWith("50"))
|
.filter(r => r.getSource.startsWith("50") && r.getTarget.startsWith("50"))
|
||||||
|
.filter(r => filterRelations(subRelTypeFilter, relClassFilter, r))
|
||||||
//filter OpenCitations relations
|
//filter OpenCitations relations
|
||||||
.filter(r =>
|
.filter(r =>
|
||||||
r.getDataInfo.getProvenanceaction != null &&
|
r.getDataInfo.getProvenanceaction != null &&
|
||||||
!"sysimport:crosswalk:opencitations".equals(r.getDataInfo.getProvenanceaction.getClassid)
|
!"sysimport:crosswalk:opencitations".equals(r.getDataInfo.getProvenanceaction.getClassid)
|
||||||
)
|
)
|
||||||
.filter(r => filterRelations(filterRelation, relationSemanticFilter, r))
|
|
||||||
spark.createDataset(rddRelation).as[Relation].write.mode(SaveMode.Overwrite).save(s"$relPath")
|
spark.createDataset(rddRelation).as[Relation].write.mode(SaveMode.Overwrite).save(s"$relPath")
|
||||||
}
|
}
|
||||||
|
|
||||||
private def filterRelations(filterRelation: String, relationSemanticFilter: List[String], r: Relation): Boolean = {
|
private def filterRelations(subRelTypeFilter: String, relClassFilter: List[String], r: Relation): Boolean = {
|
||||||
if (filterRelation != null && StringUtils.isNoneBlank(filterRelation)) {
|
if (StringUtils.isNotBlank(subRelTypeFilter)) {
|
||||||
r.getSubRelType != null && r.getSubRelType.equalsIgnoreCase(filterRelation)
|
subRelTypeFilter.equalsIgnoreCase(r.getSubRelType)
|
||||||
} else {
|
} else {
|
||||||
!relationSemanticFilter.exists(k => k.equalsIgnoreCase(r.getRelClass))
|
!relClassFilter.exists(k => k.equalsIgnoreCase(r.getRelClass))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
//TODO: finalise implementation
|
||||||
|
private def processResult[T<: Result](
|
||||||
|
implicit ct: ClassTag[T],
|
||||||
|
log: Logger,
|
||||||
|
spark: SparkSession,
|
||||||
|
sourcePath: String,
|
||||||
|
entityPath: String,
|
||||||
|
clazz: Class[T]
|
||||||
|
): Unit = {
|
||||||
|
val entityType = clazz.getSimpleName.toLowerCase
|
||||||
|
|
||||||
|
log.info(s"Converting $entityType")
|
||||||
|
|
||||||
|
val mapper = new ObjectMapper() with ScalaObjectMapper
|
||||||
|
mapper.registerModule(DefaultScalaModule)
|
||||||
|
|
||||||
|
val rdd = spark.sparkContext
|
||||||
|
.textFile(s"$sourcePath/$entityType")
|
||||||
|
.map(s => mapper.readValue(s, clazz))
|
||||||
|
.filter(r => r.getDataInfo != null && !r.getDataInfo.getDeletedbyinference);
|
||||||
|
|
||||||
|
implicit val encoder: Encoder[T] = Encoders.kryo(clazz)
|
||||||
|
spark
|
||||||
|
.createDataset(rdd)
|
||||||
|
.as[T]
|
||||||
|
.write
|
||||||
|
.mode(SaveMode.Overwrite)
|
||||||
|
.save(s"$entityPath/$entityType")
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,6 +12,8 @@ import org.apache.spark.sql.functions.count
|
||||||
import org.apache.spark.sql._
|
import org.apache.spark.sql._
|
||||||
import org.slf4j.{Logger, LoggerFactory}
|
import org.slf4j.{Logger, LoggerFactory}
|
||||||
|
|
||||||
|
import scala.util.Try
|
||||||
|
|
||||||
object SparkCreateScholix {
|
object SparkCreateScholix {
|
||||||
|
|
||||||
def main(args: Array[String]): Unit = {
|
def main(args: Array[String]): Unit = {
|
||||||
|
@ -37,6 +39,8 @@ object SparkCreateScholix {
|
||||||
log.info(s"summaryPath -> $summaryPath")
|
log.info(s"summaryPath -> $summaryPath")
|
||||||
val targetPath = parser.get("targetPath")
|
val targetPath = parser.get("targetPath")
|
||||||
log.info(s"targetPath -> $targetPath")
|
log.info(s"targetPath -> $targetPath")
|
||||||
|
val dumpCitations = Try(parser.get("dumpCitations").toBoolean).getOrElse(false)
|
||||||
|
log.info(s"dumpCitations -> $dumpCitations")
|
||||||
|
|
||||||
implicit val relEncoder: Encoder[Relation] = Encoders.kryo[Relation]
|
implicit val relEncoder: Encoder[Relation] = Encoders.kryo[Relation]
|
||||||
implicit val summaryEncoder: Encoder[ScholixSummary] = Encoders.kryo[ScholixSummary]
|
implicit val summaryEncoder: Encoder[ScholixSummary] = Encoders.kryo[ScholixSummary]
|
||||||
|
@ -138,7 +142,7 @@ object SparkCreateScholix {
|
||||||
val relatedEntitiesDS: Dataset[RelatedEntities] = spark.read
|
val relatedEntitiesDS: Dataset[RelatedEntities] = spark.read
|
||||||
.load(s"$targetPath/related_entities")
|
.load(s"$targetPath/related_entities")
|
||||||
.as[RelatedEntities]
|
.as[RelatedEntities]
|
||||||
.filter(r => r.relatedPublication > 0 || r.relatedDataset > 0)
|
.filter(r => dumpCitations || r.relatedPublication > 0 || r.relatedDataset > 0)
|
||||||
|
|
||||||
relatedEntitiesDS
|
relatedEntitiesDS
|
||||||
.joinWith(summaryDS, relatedEntitiesDS("id").equalTo(summaryDS("_1")), "inner")
|
.joinWith(summaryDS, relatedEntitiesDS("id").equalTo(summaryDS("_1")), "inner")
|
||||||
|
|
Loading…
Reference in New Issue