[scholix] fixed OpenCitation dump procedure

This commit is contained in:
Claudio Atzori 2022-08-10 11:57:56 +02:00
parent d85ba3c1a9
commit 51ad93e545
1 changed files with 24 additions and 35 deletions

View File

@ -2,6 +2,7 @@ package eu.dnetlib.dhp.sx.graph
import com.fasterxml.jackson.databind.ObjectMapper import com.fasterxml.jackson.databind.ObjectMapper
import eu.dnetlib.dhp.application.ArgumentApplicationParser import eu.dnetlib.dhp.application.ArgumentApplicationParser
import eu.dnetlib.dhp.schema.common.ModelConstants
import eu.dnetlib.dhp.schema.oaf.{OtherResearchProduct, Publication, Relation, Result, Software, Dataset => OafDataset} import eu.dnetlib.dhp.schema.oaf.{OtherResearchProduct, Publication, Relation, Result, Software, Dataset => OafDataset}
import org.apache.commons.io.IOUtils import org.apache.commons.io.IOUtils
import org.apache.commons.lang3.StringUtils import org.apache.commons.lang3.StringUtils
@ -99,44 +100,32 @@ object SparkConvertRDDtoDataset {
log.info("Converting Relation") log.info("Converting Relation")
if (filterRelation != null && StringUtils.isNoneBlank(filterRelation)) { val relationSemanticFilter = List(
ModelConstants.MERGES,
ModelConstants.IS_MERGED_IN,
ModelConstants.HAS_AMONG_TOP_N_SIMILAR_DOCS,
ModelConstants.IS_AMONG_TOP_N_SIMILAR_DOCS
)
val rddRelation = spark.sparkContext val rddRelation = spark.sparkContext
.textFile(s"$sourcePath/relation") .textFile(s"$sourcePath/relation")
.map(s => mapper.readValue(s, classOf[Relation])) .map(s => mapper.readValue(s, classOf[Relation]))
.filter(r => r.getDataInfo != null && r.getDataInfo.getDeletedbyinference == false) .filter(r => r.getDataInfo != null && r.getDataInfo.getDeletedbyinference == false)
.filter(r => r.getSource.startsWith("50") && r.getTarget.startsWith("50")) .filter(r => r.getSource.startsWith("50") && r.getTarget.startsWith("50"))
//filter OpenCitations relations //filter OpenCitations relations
.filter(r => .filter(r =>
r.getCollectedfrom != null && r.getCollectedfrom.size() > 0 && !r.getCollectedfrom.asScala.exists(k => r.getDataInfo.getProvenanceaction != null &&
"opencitations".equalsIgnoreCase(k.getValue) !"sysimport:crosswalk:opencitations".equals(r.getDataInfo.getProvenanceaction.getClassid)
)
)
.filter(r => r.getSubRelType != null && r.getSubRelType.equalsIgnoreCase(filterRelation))
spark.createDataset(rddRelation).as[Relation].write.mode(SaveMode.Overwrite).save(s"$relPath")
} else {
val relationSemanticFilter = List(
"merges",
"ismergedin",
"HasAmongTopNSimilarDocuments",
"IsAmongTopNSimilarDocuments"
) )
.filter(r => filterRelations(filterRelation, relationSemanticFilter, r))
spark.createDataset(rddRelation).as[Relation].write.mode(SaveMode.Overwrite).save(s"$relPath")
}
val rddRelation = spark.sparkContext private def filterRelations(filterRelation: String, relationSemanticFilter: List[String], r: Relation): Boolean = {
.textFile(s"$sourcePath/relation") if (filterRelation != null && StringUtils.isNoneBlank(filterRelation)) {
.map(s => mapper.readValue(s, classOf[Relation])) r.getSubRelType != null && r.getSubRelType.equalsIgnoreCase(filterRelation)
.filter(r => r.getDataInfo != null && r.getDataInfo.getDeletedbyinference == false) } else {
.filter(r => r.getSource.startsWith("50") && r.getTarget.startsWith("50")) !relationSemanticFilter.exists(k => k.equalsIgnoreCase(r.getRelClass))
//filter OpenCitations relations
.filter(r =>
r.getCollectedfrom != null && r.getCollectedfrom.size() > 0 && !r.getCollectedfrom.asScala.exists(k =>
"opencitations".equalsIgnoreCase(k.getValue)
)
)
.filter(r => !relationSemanticFilter.exists(k => k.equalsIgnoreCase(r.getRelClass)))
spark.createDataset(rddRelation).as[Relation].write.mode(SaveMode.Overwrite).save(s"$relPath")
} }
} }
} }