forked from D-Net/dnet-hadoop
Revert to 4094f2bb9a
This commit is contained in:
parent
9c82d670b8
commit
bd9a43cefd
|
@ -27,8 +27,8 @@ public class GraphCleaningFunctions extends CleaningFunctions {
|
|||
public static final int ORCID_LEN = 19;
|
||||
public static final String CLEANING_REGEX = "(?:\\n|\\r|\\t)";
|
||||
public static final String INVALID_AUTHOR_REGEX = ".*deactivated.*";
|
||||
public static final String TITLE_FILTER_REGEX = "(test)|\\W|\\d";
|
||||
public static final int TITLE_FILTER_RESIDUAL_LENGTH = 5;
|
||||
public static final String TITLE_FILTER_REGEX = "[.*test.*\\W\\d]";
|
||||
public static final int TITLE_FILTER_RESIDUAL_LENGTH = 10;
|
||||
|
||||
public static <T extends Oaf> T fixVocabularyNames(T value) {
|
||||
if (value instanceof Datasource) {
|
||||
|
|
|
@ -3,7 +3,7 @@ package eu.dnetlib.dhp.actionmanager.scholix
|
|||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.schema.action.AtomicAction
|
||||
import eu.dnetlib.dhp.schema.oaf.{Dataset => OafDataset, Oaf, Publication, Software, OtherResearchProduct, Relation}
|
||||
import eu.dnetlib.dhp.schema.oaf.{Oaf, Dataset => OafDataset,Publication, Software, OtherResearchProduct, Relation}
|
||||
import org.apache.hadoop.io.Text
|
||||
import org.apache.hadoop.io.compress.GzipCodec
|
||||
import org.apache.hadoop.mapred.SequenceFileOutputFormat
|
|
@ -3,7 +3,8 @@ package eu.dnetlib.dhp.datacite
|
|||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.collection.CollectionUtils.fixRelations
|
||||
import eu.dnetlib.dhp.common.Constants.{MDSTORE_DATA_PATH, MDSTORE_SIZE_PATH}
|
||||
import eu.dnetlib.dhp.common.Constants.MDSTORE_DATA_PATH
|
||||
import eu.dnetlib.dhp.common.Constants.MDSTORE_SIZE_PATH
|
||||
import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup
|
||||
import eu.dnetlib.dhp.schema.mdstore.{MDStoreVersion, MetadataRecord}
|
||||
import eu.dnetlib.dhp.schema.oaf.Oaf
|
|
@ -7,7 +7,6 @@ import org.json4s.DefaultFormats
|
|||
import org.json4s.JsonAST.{JField, JObject, JString}
|
||||
import org.json4s.jackson.JsonMethods.{compact, parse, render}
|
||||
import collection.JavaConverters._
|
||||
|
||||
object BioDBToOAF {
|
||||
|
||||
case class EBILinkItem(id: Long, links: String) {}
|
|
@ -1,9 +1,9 @@
|
|||
package eu.dnetlib.dhp.sx.bio
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.collection.CollectionUtils
|
||||
import eu.dnetlib.dhp.schema.oaf.Oaf
|
||||
import eu.dnetlib.dhp.sx.bio.BioDBToOAF.ScholixResolved
|
||||
import BioDBToOAF.ScholixResolved
|
||||
import eu.dnetlib.dhp.collection.CollectionUtils
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
|
||||
|
@ -36,13 +36,13 @@ object SparkTransformBioDatabaseToOAF {
|
|||
import spark.implicits._
|
||||
database.toUpperCase() match {
|
||||
case "UNIPROT" =>
|
||||
spark.createDataset(sc.textFile(dbPath).flatMap(i => BioDBToOAF.uniprotToOAF(i))).flatMap(i => CollectionUtils.fixRelations(i)).filter(i => i != null).write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
spark.createDataset(sc.textFile(dbPath).flatMap(i => BioDBToOAF.uniprotToOAF(i))).flatMap(i=> CollectionUtils.fixRelations(i)).filter(i => i != null).write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
case "PDB" =>
|
||||
spark.createDataset(sc.textFile(dbPath).flatMap(i => BioDBToOAF.pdbTOOaf(i))).flatMap(i => CollectionUtils.fixRelations(i)).filter(i => i != null).write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
spark.createDataset(sc.textFile(dbPath).flatMap(i => BioDBToOAF.pdbTOOaf(i))).flatMap(i=> CollectionUtils.fixRelations(i)).filter(i => i != null).write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
case "SCHOLIX" =>
|
||||
spark.read.load(dbPath).as[ScholixResolved].map(i => BioDBToOAF.scholixResolvedToOAF(i)).flatMap(i => CollectionUtils.fixRelations(i)).filter(i => i != null).write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
spark.read.load(dbPath).as[ScholixResolved].map(i => BioDBToOAF.scholixResolvedToOAF(i)).flatMap(i=> CollectionUtils.fixRelations(i)).filter(i => i != null).write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
case "CROSSREF_LINKS" =>
|
||||
spark.createDataset(sc.textFile(dbPath).map(i => BioDBToOAF.crossrefLinksToOaf(i))).flatMap(i => CollectionUtils.fixRelations(i)).filter(i => i != null).write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
spark.createDataset(sc.textFile(dbPath).map(i => BioDBToOAF.crossrefLinksToOaf(i))).flatMap(i=> CollectionUtils.fixRelations(i)).filter(i => i != null).write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
}
|
||||
}
|
||||
|
|
@ -3,7 +3,7 @@ package eu.dnetlib.dhp.sx.bio.ebi
|
|||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup
|
||||
import eu.dnetlib.dhp.schema.oaf.Result
|
||||
import eu.dnetlib.dhp.sx.bio.pubmed._
|
||||
import eu.dnetlib.dhp.sx.bio.pubmed.{PMArticle, PMAuthor, PMJournal, PMParser, PubMedToOaf}
|
||||
import eu.dnetlib.dhp.utils.ISLookupClientFactory
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.hadoop.conf.Configuration
|
|
@ -1,8 +1,9 @@
|
|||
package eu.dnetlib.dhp.sx.bio.ebi
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.sx.bio.BioDBToOAF.EBILinkItem
|
||||
import eu.dnetlib.dhp.sx.bio.pubmed.{PMArticle, PMAuthor, PMJournal}
|
||||
import eu.dnetlib.dhp.sx.bio.BioDBToOAF.EBILinkItem
|
||||
import eu.dnetlib.dhp.sx.bio.pubmed.PMJournal
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.http.client.config.RequestConfig
|
||||
import org.apache.http.client.methods.HttpGet
|
|
@ -1,10 +1,11 @@
|
|||
package eu.dnetlib.dhp.sx.bio.ebi
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.collection.CollectionUtils
|
||||
import eu.dnetlib.dhp.schema.oaf.Oaf
|
||||
import eu.dnetlib.dhp.sx.bio.BioDBToOAF
|
||||
import eu.dnetlib.dhp.sx.bio.BioDBToOAF.EBILinkItem
|
||||
import BioDBToOAF.EBILinkItem
|
||||
import eu.dnetlib.dhp.collection.CollectionUtils
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql._
|
||||
|
@ -37,7 +38,7 @@ object SparkEBILinksToOaf {
|
|||
ebLinks.flatMap(j => BioDBToOAF.parse_ebi_links(j.links))
|
||||
.filter(p => BioDBToOAF.EBITargetLinksFilter(p))
|
||||
.flatMap(p => BioDBToOAF.convertEBILinksToOaf(p))
|
||||
.flatMap(i => CollectionUtils.fixRelations(i)).filter(i => i != null)
|
||||
.flatMap(i=> CollectionUtils.fixRelations(i)).filter(i => i != null)
|
||||
.write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
}
|
||||
}
|
|
@ -4,9 +4,10 @@ import eu.dnetlib.dhp.common.vocabulary.VocabularyGroup
|
|||
import eu.dnetlib.dhp.schema.common.ModelConstants
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.{GraphCleaningFunctions, IdentifierFactory, OafMapperUtils, PidType}
|
||||
import eu.dnetlib.dhp.schema.oaf._
|
||||
import scala.collection.JavaConverters._
|
||||
|
||||
import java.util.regex.Pattern
|
||||
import collection.JavaConverters._
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
|
@ -21,10 +22,10 @@ object PubMedToOaf {
|
|||
val collectedFrom: KeyValue = OafMapperUtils.keyValue(ModelConstants.EUROPE_PUBMED_CENTRAL_ID, "Europe PubMed Central")
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Cleaning the DOI Applying regex in order to
|
||||
* remove doi starting with URL
|
||||
*
|
||||
* @param doi input DOI
|
||||
* @return cleaned DOI
|
||||
*/
|
||||
|
@ -48,7 +49,7 @@ object PubMedToOaf {
|
|||
* starting from OAF instanceType value
|
||||
*
|
||||
* @param cobjQualifier OAF instance type
|
||||
* @param vocabularies All dnet vocabularies
|
||||
* @param vocabularies All dnet vocabularies
|
||||
* @return the correct instance
|
||||
*/
|
||||
def createResult(cobjQualifier: Qualifier, vocabularies: VocabularyGroup): Result = {
|
||||
|
@ -64,7 +65,7 @@ object PubMedToOaf {
|
|||
}
|
||||
|
||||
/**
|
||||
* Mapping the Pubmedjournal info into the OAF Journale
|
||||
* Mapping the Pubmedjournal info into the OAF Journale
|
||||
*
|
||||
* @param j the pubmedJournal
|
||||
* @return the OAF Journal
|
||||
|
@ -90,8 +91,9 @@ object PubMedToOaf {
|
|||
* Find vocabulary term into synonyms and term in the vocabulary
|
||||
*
|
||||
* @param vocabularyName the input vocabulary name
|
||||
* @param vocabularies all the vocabularies
|
||||
* @param term the term to find
|
||||
* @param vocabularies all the vocabularies
|
||||
* @param term the term to find
|
||||
*
|
||||
* @return the cleaned term value
|
||||
*/
|
||||
def getVocabularyTerm(vocabularyName: String, vocabularies: VocabularyGroup, term: String): Qualifier = {
|
||||
|
@ -102,9 +104,10 @@ object PubMedToOaf {
|
|||
|
||||
|
||||
/**
|
||||
* Map the Pubmed Article into the OAF instance
|
||||
* Map the Pubmed Article into the OAF instance
|
||||
*
|
||||
* @param article the pubmed articles
|
||||
*
|
||||
* @param article the pubmed articles
|
||||
* @param vocabularies the vocabularies
|
||||
* @return The OAF instance if the mapping did not fail
|
||||
*/
|
||||
|
@ -182,6 +185,7 @@ object PubMedToOaf {
|
|||
//--------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
|
||||
// RESULT MAPPING
|
||||
//--------------------------------------------------------------------------------------
|
||||
result.setDateofacceptance(OafMapperUtils.field(GraphCleaningFunctions.cleanDate(article.getDate), dataInfo))
|
|
@ -19,7 +19,7 @@ import eu.dnetlib.dhp.broker.oa.matchers.simple.EnrichMissingPublicationDate;
|
|||
import eu.dnetlib.dhp.broker.oa.util.UpdateInfo;
|
||||
|
||||
@ExtendWith(MockitoExtension.class)
|
||||
public class UpdateMatcherTest {
|
||||
class UpdateMatcherTest {
|
||||
|
||||
UpdateMatcher<String> matcher = new EnrichMissingPublicationDate();
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ import org.junit.jupiter.api.Test;
|
|||
|
||||
import eu.dnetlib.broker.objects.OaBrokerMainEntity;
|
||||
|
||||
public class EnrichMissingPublicationDateTest {
|
||||
class EnrichMissingPublicationDateTest {
|
||||
|
||||
final EnrichMissingPublicationDate matcher = new EnrichMissingPublicationDate();
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ import java.util.Arrays;
|
|||
|
||||
import org.junit.jupiter.api.Test;
|
||||
|
||||
public class SubscriptionUtilsTest {
|
||||
class SubscriptionUtilsTest {
|
||||
|
||||
@Test
|
||||
void testVerifyListSimilar() {
|
||||
|
|
|
@ -9,7 +9,7 @@ import eu.dnetlib.broker.objects.OaBrokerAuthor;
|
|||
import eu.dnetlib.broker.objects.OaBrokerMainEntity;
|
||||
import eu.dnetlib.broker.objects.OaBrokerTypedValue;
|
||||
|
||||
public class TrustUtilsTest {
|
||||
class TrustUtilsTest {
|
||||
|
||||
private static final double THRESHOLD = 0.95;
|
||||
|
||||
|
|
|
@ -4,19 +4,20 @@ import eu.dnetlib.dhp.schema.common.ModelConstants
|
|||
import eu.dnetlib.dhp.schema.oaf._
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.{IdentifierFactory, OafMapperUtils}
|
||||
import eu.dnetlib.dhp.utils.DHPUtils
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil._
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil.{decideAccessRight, _}
|
||||
import org.apache.commons.lang.StringUtils
|
||||
import org.json4s
|
||||
import org.json4s.DefaultFormats
|
||||
import org.json4s.JsonAST._
|
||||
import org.json4s.JsonAST.{JValue, _}
|
||||
import org.json4s.jackson.JsonMethods._
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
import java.util
|
||||
import scala.collection.JavaConverters._
|
||||
import scala.collection.mutable
|
||||
import scala.util.matching.Regex
|
||||
import java.util
|
||||
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil
|
||||
|
||||
case class CrossrefDT(doi: String, json:String, timestamp: Long) {}
|
||||
|
|
@ -6,7 +6,7 @@ import org.apache.commons.io.IOUtils
|
|||
import org.apache.hadoop.io.{IntWritable, Text}
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql.expressions.Aggregator
|
||||
import org.apache.spark.sql.{Dataset, Encoder, SaveMode, SparkSession}
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.json4s
|
||||
import org.json4s.DefaultFormats
|
||||
import org.json4s.jackson.JsonMethods.parse
|
||||
|
@ -17,12 +17,12 @@ object CrossrefDataset {
|
|||
val logger: Logger = LoggerFactory.getLogger(SparkMapDumpIntoOAF.getClass)
|
||||
|
||||
|
||||
def to_item(input: String): CrossrefDT = {
|
||||
def to_item(input:String):CrossrefDT = {
|
||||
|
||||
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
|
||||
lazy val json: json4s.JValue = parse(input)
|
||||
val ts: Long = (json \ "indexed" \ "timestamp").extract[Long]
|
||||
val doi: String = DoiBoostMappingUtil.normalizeDoi((json \ "DOI").extract[String])
|
||||
val ts:Long = (json \ "indexed" \ "timestamp").extract[Long]
|
||||
val doi:String = DoiBoostMappingUtil.normalizeDoi((json \ "DOI").extract[String])
|
||||
CrossrefDT(doi, input, ts)
|
||||
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ object CrossrefDataset {
|
|||
def main(args: Array[String]): Unit = {
|
||||
|
||||
|
||||
|
||||
val conf: SparkConf = new SparkConf()
|
||||
val parser = new ArgumentApplicationParser(IOUtils.toString(CrossrefDataset.getClass.getResourceAsStream("/eu/dnetlib/dhp/doiboost/crossref_to_dataset_params.json")))
|
||||
parser.parseArgument(args)
|
||||
|
@ -53,7 +54,7 @@ object CrossrefDataset {
|
|||
return b
|
||||
|
||||
|
||||
if (a.timestamp > b.timestamp) {
|
||||
if(a.timestamp >b.timestamp) {
|
||||
return a
|
||||
}
|
||||
b
|
||||
|
@ -65,7 +66,7 @@ object CrossrefDataset {
|
|||
if (a == null)
|
||||
return b
|
||||
|
||||
if (a.timestamp > b.timestamp) {
|
||||
if(a.timestamp >b.timestamp) {
|
||||
return a
|
||||
}
|
||||
b
|
||||
|
@ -78,20 +79,20 @@ object CrossrefDataset {
|
|||
override def finish(reduction: CrossrefDT): CrossrefDT = reduction
|
||||
}
|
||||
|
||||
val workingPath: String = parser.get("workingPath")
|
||||
val workingPath:String = parser.get("workingPath")
|
||||
|
||||
|
||||
val main_ds: Dataset[CrossrefDT] = spark.read.load(s"$workingPath/crossref_ds").as[CrossrefDT]
|
||||
val main_ds:Dataset[CrossrefDT] = spark.read.load(s"$workingPath/crossref_ds").as[CrossrefDT]
|
||||
|
||||
|
||||
val update =
|
||||
spark.createDataset(spark.sparkContext.sequenceFile(s"$workingPath/index_update", classOf[IntWritable], classOf[Text])
|
||||
.map(i => CrossrefImporter.decompressBlob(i._2.toString))
|
||||
.map(i => to_item(i)))
|
||||
spark.createDataset(spark.sparkContext.sequenceFile(s"$workingPath/index_update", classOf[IntWritable], classOf[Text])
|
||||
.map(i =>CrossrefImporter.decompressBlob(i._2.toString))
|
||||
.map(i =>to_item(i)))
|
||||
|
||||
main_ds.union(update).groupByKey(_.doi)
|
||||
.agg(crossrefAggregator.toColumn)
|
||||
.map(s => s._2)
|
||||
.map(s=>s._2)
|
||||
.write.mode(SaveMode.Overwrite).save(s"$workingPath/crossref_ds_updated")
|
||||
|
||||
}
|
|
@ -2,12 +2,17 @@ package eu.dnetlib.doiboost.crossref
|
|||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil
|
||||
import eu.dnetlib.doiboost.crossref.CrossrefDataset.to_item
|
||||
import eu.dnetlib.doiboost.crossref.UnpackCrtossrefEntries.getClass
|
||||
import org.apache.hadoop.io.{IntWritable, Text}
|
||||
import org.apache.hadoop.io.compress.GzipCodec
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.apache.spark.{SparkConf, SparkContext}
|
||||
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.json4s
|
||||
import org.json4s.DefaultFormats
|
||||
import org.json4s.jackson.JsonMethods.parse
|
||||
import org.json4s.JsonAST.JArray
|
||||
import org.json4s.jackson.JsonMethods.{compact, parse, render}
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
import scala.io.Source
|
||||
|
@ -19,10 +24,11 @@ object GenerateCrossrefDataset {
|
|||
implicit val mrEncoder: Encoder[CrossrefDT] = Encoders.kryo[CrossrefDT]
|
||||
|
||||
|
||||
|
||||
def crossrefElement(meta: String): CrossrefDT = {
|
||||
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
|
||||
lazy val json: json4s.JValue = parse(meta)
|
||||
val doi: String = DoiBoostMappingUtil.normalizeDoi((json \ "DOI").extract[String])
|
||||
val doi:String = DoiBoostMappingUtil.normalizeDoi((json \ "DOI").extract[String])
|
||||
val timestamp: Long = (json \ "indexed" \ "timestamp").extract[Long]
|
||||
CrossrefDT(doi, meta, timestamp)
|
||||
|
||||
|
@ -45,14 +51,14 @@ object GenerateCrossrefDataset {
|
|||
import spark.implicits._
|
||||
|
||||
|
||||
val tmp: RDD[String] = sc.textFile(sourcePath, 6000)
|
||||
val tmp : RDD[String] = sc.textFile(sourcePath,6000)
|
||||
|
||||
spark.createDataset(tmp)
|
||||
.map(entry => crossrefElement(entry))
|
||||
.write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
// .map(meta => crossrefElement(meta))
|
||||
// .toDS.as[CrossrefDT]
|
||||
// .write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
// .map(meta => crossrefElement(meta))
|
||||
// .toDS.as[CrossrefDT]
|
||||
// .write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
|
||||
}
|
||||
|
|
@ -4,8 +4,10 @@ import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
|||
import eu.dnetlib.dhp.schema.oaf
|
||||
import eu.dnetlib.dhp.schema.oaf.{Oaf, Publication, Relation, Dataset => OafDataset}
|
||||
import org.apache.commons.io.IOUtils
|
||||
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql._
|
||||
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
|
|
@ -2,8 +2,8 @@ package eu.dnetlib.doiboost.crossref
|
|||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import org.apache.hadoop.io.compress.GzipCodec
|
||||
import org.apache.spark.sql.SparkSession
|
||||
import org.apache.spark.{SparkConf, SparkContext}
|
||||
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.json4s
|
||||
import org.json4s.DefaultFormats
|
||||
import org.json4s.JsonAST.JArray
|
||||
|
@ -17,7 +17,9 @@ object UnpackCrtossrefEntries {
|
|||
val log: Logger = LoggerFactory.getLogger(UnpackCrtossrefEntries.getClass)
|
||||
|
||||
|
||||
def extractDump(input: String): List[String] = {
|
||||
|
||||
|
||||
def extractDump(input:String):List[String] = {
|
||||
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
|
||||
lazy val json: json4s.JValue = parse(input)
|
||||
|
||||
|
@ -28,6 +30,7 @@ object UnpackCrtossrefEntries {
|
|||
}
|
||||
|
||||
|
||||
|
||||
def main(args: Array[String]): Unit = {
|
||||
val conf = new SparkConf
|
||||
val parser = new ArgumentApplicationParser(Source.fromInputStream(getClass.getResourceAsStream("/eu/dnetlib/dhp/doiboost/crossref_dump_reader/generate_dataset_params.json")).mkString)
|
||||
|
@ -42,7 +45,7 @@ object UnpackCrtossrefEntries {
|
|||
.getOrCreate()
|
||||
val sc: SparkContext = spark.sparkContext
|
||||
|
||||
sc.wholeTextFiles(sourcePath, 6000).flatMap(d => extractDump(d._2))
|
||||
sc.wholeTextFiles(sourcePath,6000).flatMap(d =>extractDump(d._2))
|
||||
.saveAsTextFile(targetPath, classOf[GzipCodec])
|
||||
|
||||
|
|
@ -5,10 +5,10 @@ import eu.dnetlib.dhp.schema.common.ModelConstants
|
|||
import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory
|
||||
import eu.dnetlib.dhp.schema.oaf.{Instance, Journal, Publication, StructuredProperty}
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil._
|
||||
import org.json4s
|
||||
import org.json4s.DefaultFormats
|
||||
import org.json4s.jackson.JsonMethods.parse
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil._
|
||||
|
||||
import scala.collection.JavaConverters._
|
||||
import scala.collection.mutable
|
|
@ -3,8 +3,8 @@ package eu.dnetlib.doiboost.mag
|
|||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql.types._
|
||||
import org.apache.spark.sql.{SaveMode, SparkSession}
|
||||
import org.apache.spark.sql.types._
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
object SparkImportMagIntoDataset {
|
||||
|
@ -24,13 +24,13 @@ object SparkImportMagIntoDataset {
|
|||
"Affiliations" -> Tuple2("mag/Affiliations.txt", Seq("AffiliationId:long", "Rank:uint", "NormalizedName:string", "DisplayName:string", "GridId:string", "OfficialPage:string", "WikiPage:string", "PaperCount:long", "PaperFamilyCount:long", "CitationCount:long", "Iso3166Code:string", "Latitude:float?", "Longitude:float?", "CreatedDate:DateTime")),
|
||||
"AuthorExtendedAttributes" -> Tuple2("mag/AuthorExtendedAttributes.txt", Seq("AuthorId:long", "AttributeType:int", "AttributeValue:string")),
|
||||
"Authors" -> Tuple2("mag/Authors.txt", Seq("AuthorId:long", "Rank:uint", "NormalizedName:string", "DisplayName:string", "LastKnownAffiliationId:long?", "PaperCount:long", "PaperFamilyCount:long", "CitationCount:long", "CreatedDate:DateTime")),
|
||||
"ConferenceInstances" -> Tuple2("mag/ConferenceInstances.txt", Seq("ConferenceInstanceId:long", "NormalizedName:string", "DisplayName:string", "ConferenceSeriesId:long", "Location:string", "OfficialUrl:string", "StartDate:DateTime?", "EndDate:DateTime?", "AbstractRegistrationDate:DateTime?", "SubmissionDeadlineDate:DateTime?", "NotificationDueDate:DateTime?", "FinalVersionDueDate:DateTime?", "PaperCount:long", "PaperFamilyCount:long", "CitationCount:long", "Latitude:float?", "Longitude:float?", "CreatedDate:DateTime")),
|
||||
"ConferenceInstances" -> Tuple2("mag/ConferenceInstances.txt", Seq("ConferenceInstanceId:long", "NormalizedName:string", "DisplayName:string", "ConferenceSeriesId:long", "Location:string", "OfficialUrl:string", "StartDate:DateTime?", "EndDate:DateTime?", "AbstractRegistrationDate:DateTime?", "SubmissionDeadlineDate:DateTime?", "NotificationDueDate:DateTime?", "FinalVersionDueDate:DateTime?", "PaperCount:long", "PaperFamilyCount:long" ,"CitationCount:long", "Latitude:float?", "Longitude:float?", "CreatedDate:DateTime")),
|
||||
"ConferenceSeries" -> Tuple2("mag/ConferenceSeries.txt", Seq("ConferenceSeriesId:long", "Rank:uint", "NormalizedName:string", "DisplayName:string", "PaperCount:long", "PaperFamilyCount:long", "CitationCount:long", "CreatedDate:DateTime")),
|
||||
"EntityRelatedEntities" -> Tuple2("advanced/EntityRelatedEntities.txt", Seq("EntityId:long", "EntityType:string", "RelatedEntityId:long", "RelatedEntityType:string", "RelatedType:int", "Score:float")),
|
||||
"FieldOfStudyChildren" -> Tuple2("advanced/FieldOfStudyChildren.txt", Seq("FieldOfStudyId:long", "ChildFieldOfStudyId:long")),
|
||||
"FieldOfStudyExtendedAttributes" -> Tuple2("advanced/FieldOfStudyExtendedAttributes.txt", Seq("FieldOfStudyId:long", "AttributeType:int", "AttributeValue:string")),
|
||||
"FieldsOfStudy" -> Tuple2("advanced/FieldsOfStudy.txt", Seq("FieldOfStudyId:long", "Rank:uint", "NormalizedName:string", "DisplayName:string", "MainType:string", "Level:int", "PaperCount:long", "PaperFamilyCount:long", "CitationCount:long", "CreatedDate:DateTime")),
|
||||
"Journals" -> Tuple2("mag/Journals.txt", Seq("JournalId:long", "Rank:uint", "NormalizedName:string", "DisplayName:string", "Issn:string", "Publisher:string", "Webpage:string", "PaperCount:long", "PaperFamilyCount:long", "CitationCount:long", "CreatedDate:DateTime")),
|
||||
"Journals" -> Tuple2("mag/Journals.txt", Seq("JournalId:long", "Rank:uint", "NormalizedName:string", "DisplayName:string", "Issn:string", "Publisher:string", "Webpage:string", "PaperCount:long", "PaperFamilyCount:long" ,"CitationCount:long", "CreatedDate:DateTime")),
|
||||
"PaperAbstractsInvertedIndex" -> Tuple2("nlp/PaperAbstractsInvertedIndex.txt.*", Seq("PaperId:long", "IndexedAbstract:string")),
|
||||
"PaperAuthorAffiliations" -> Tuple2("mag/PaperAuthorAffiliations.txt", Seq("PaperId:long", "AuthorId:long", "AffiliationId:long?", "AuthorSequenceNumber:uint", "OriginalAuthor:string", "OriginalAffiliation:string")),
|
||||
"PaperCitationContexts" -> Tuple2("nlp/PaperCitationContexts.txt", Seq("PaperId:long", "PaperReferenceId:long", "CitationContext:string")),
|
||||
|
@ -75,6 +75,7 @@ object SparkImportMagIntoDataset {
|
|||
.master(parser.get("master")).getOrCreate()
|
||||
|
||||
|
||||
|
||||
stream.foreach { case (k, v) =>
|
||||
val s: StructType = getSchema(k)
|
||||
val df = spark.read
|
|
@ -5,16 +5,19 @@ import eu.dnetlib.dhp.schema.oaf.Publication
|
|||
import eu.dnetlib.doiboost.DoiBoostMappingUtil
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql.functions.{col, collect_list, struct}
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql.functions._
|
||||
import org.apache.spark.sql._
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
import scala.collection.JavaConverters._
|
||||
|
||||
object SparkProcessMAG {
|
||||
|
||||
def getDistinctResults(d: Dataset[MagPapers]): Dataset[MagPapers] = {
|
||||
def getDistinctResults (d:Dataset[MagPapers]):Dataset[MagPapers]={
|
||||
d.where(col("Doi").isNotNull)
|
||||
.groupByKey(mp => DoiBoostMappingUtil.normalizeDoi(mp.Doi))(Encoders.STRING)
|
||||
.reduceGroups((p1: MagPapers, p2: MagPapers) => ConversionUtil.choiceLatestMagArtitcle(p1, p2))
|
||||
.reduceGroups((p1:MagPapers,p2:MagPapers) => ConversionUtil.choiceLatestMagArtitcle(p1,p2))
|
||||
.map(_._2)(Encoders.product[MagPapers])
|
||||
.map(mp => {
|
||||
new MagPapers(mp.PaperId, mp.Rank, DoiBoostMappingUtil.normalizeDoi(mp.Doi),
|
||||
|
@ -95,13 +98,13 @@ object SparkProcessMAG {
|
|||
|
||||
var magPubs: Dataset[(String, Publication)] =
|
||||
spark.read.load(s"$workingPath/merge_step_2").as[Publication]
|
||||
.map(p => (ConversionUtil.extractMagIdentifier(p.getOriginalId.asScala), p)).as[(String, Publication)]
|
||||
.map(p => (ConversionUtil.extractMagIdentifier(p.getOriginalId.asScala), p)).as[(String, Publication)]
|
||||
|
||||
|
||||
val conference = spark.read.load(s"$sourcePath/ConferenceInstances")
|
||||
.select($"ConferenceInstanceId".as("ci"), $"DisplayName", $"Location", $"StartDate", $"EndDate")
|
||||
.select($"ConferenceInstanceId".as("ci"), $"DisplayName", $"Location", $"StartDate",$"EndDate" )
|
||||
val conferenceInstance = conference.joinWith(papers, papers("ConferenceInstanceId").equalTo(conference("ci")))
|
||||
.select($"_1.ci", $"_1.DisplayName", $"_1.Location", $"_1.StartDate", $"_1.EndDate", $"_2.PaperId").as[MagConferenceInstance]
|
||||
.select($"_1.ci", $"_1.DisplayName", $"_1.Location", $"_1.StartDate",$"_1.EndDate", $"_2.PaperId").as[MagConferenceInstance]
|
||||
|
||||
|
||||
magPubs.joinWith(conferenceInstance, col("_1").equalTo(conferenceInstance("PaperId")), "left")
|
||||
|
@ -119,7 +122,7 @@ object SparkProcessMAG {
|
|||
|
||||
magPubs.joinWith(paperAbstract, col("_1").equalTo(paperAbstract("PaperId")), "left")
|
||||
.map(item => ConversionUtil.updatePubsWithDescription(item)
|
||||
).write.mode(SaveMode.Overwrite).save(s"$workingPath/merge_step_4")
|
||||
).write.mode(SaveMode.Overwrite).save(s"$workingPath/merge_step_4")
|
||||
|
||||
|
||||
logger.info("Phase 7) Enrich Publication with FieldOfStudy")
|
||||
|
@ -145,10 +148,11 @@ object SparkProcessMAG {
|
|||
spark.read.load(s"$workingPath/mag_publication").as[Publication]
|
||||
.filter(p => p.getId == null)
|
||||
.groupByKey(p => p.getId)
|
||||
.reduceGroups((a: Publication, b: Publication) => ConversionUtil.mergePublication(a, b))
|
||||
.reduceGroups((a:Publication, b:Publication) => ConversionUtil.mergePublication(a,b))
|
||||
.map(_._2)
|
||||
.write.mode(SaveMode.Overwrite).save(s"$targetPath/magPublication")
|
||||
|
||||
|
||||
|
||||
}
|
||||
}
|
|
@ -4,16 +4,17 @@ import com.fasterxml.jackson.databind.ObjectMapper
|
|||
import eu.dnetlib.dhp.schema.common.ModelConstants
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory
|
||||
import eu.dnetlib.dhp.schema.oaf.{Author, DataInfo, Publication}
|
||||
import eu.dnetlib.dhp.schema.orcid.{AuthorData, OrcidDOI}
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil.{createSP, generateDataInfo}
|
||||
import org.apache.commons.lang.StringUtils
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
import scala.collection.JavaConverters._
|
||||
import org.json4s
|
||||
import org.json4s.DefaultFormats
|
||||
import org.json4s.JsonAST._
|
||||
import org.json4s.jackson.JsonMethods._
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
import scala.collection.JavaConverters._
|
||||
|
||||
|
||||
case class ORCIDItem(doi:String, authors:List[OrcidAuthor]){}
|
|
@ -11,10 +11,10 @@ object SparkConvertORCIDToOAF {
|
|||
val logger: Logger = LoggerFactory.getLogger(SparkConvertORCIDToOAF.getClass)
|
||||
|
||||
|
||||
def run(spark: SparkSession, workingPath: String, targetPath: String): Unit = {
|
||||
def run(spark:SparkSession, workingPath:String, targetPath:String) :Unit = {
|
||||
implicit val mapEncoderPubs: Encoder[Publication] = Encoders.kryo[Publication]
|
||||
import spark.implicits._
|
||||
val dataset: Dataset[ORCIDItem] = spark.read.load(s"$workingPath/orcidworksWithAuthor").as[ORCIDItem]
|
||||
val dataset: Dataset[ORCIDItem] =spark.read.load(s"$workingPath/orcidworksWithAuthor").as[ORCIDItem]
|
||||
|
||||
logger.info("Converting ORCID to OAF")
|
||||
dataset.map(o => ORCIDToOAF.convertTOOAF(o)).write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
|
@ -35,8 +35,8 @@ object SparkConvertORCIDToOAF {
|
|||
val workingPath = parser.get("workingPath")
|
||||
val targetPath = parser.get("targetPath")
|
||||
|
||||
run(spark, workingPath, targetPath)
|
||||
run(spark,workingPath, targetPath)
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -1,45 +1,48 @@
|
|||
package eu.dnetlib.doiboost.orcid
|
||||
|
||||
import com.fasterxml.jackson.databind.{DeserializationFeature, ObjectMapper}
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.oa.merge.AuthorMerger
|
||||
import eu.dnetlib.dhp.schema.oaf.Publication
|
||||
import eu.dnetlib.dhp.schema.orcid.OrcidDOI
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql.functions.{col, collect_list}
|
||||
import org.apache.spark.sql._
|
||||
import org.apache.spark.sql.functions._
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
object SparkPreprocessORCID {
|
||||
val logger: Logger = LoggerFactory.getLogger(SparkConvertORCIDToOAF.getClass)
|
||||
|
||||
def fixORCIDItem(item: ORCIDItem): ORCIDItem = {
|
||||
ORCIDItem(item.doi, item.authors.groupBy(_.oid).map(_._2.head).toList)
|
||||
def fixORCIDItem(item :ORCIDItem):ORCIDItem = {
|
||||
ORCIDItem(item.doi, item.authors.groupBy(_.oid).map(_._2.head).toList)
|
||||
|
||||
}
|
||||
|
||||
|
||||
def run(spark: SparkSession, sourcePath: String, workingPath: String): Unit = {
|
||||
def run(spark:SparkSession,sourcePath:String,workingPath:String):Unit = {
|
||||
import spark.implicits._
|
||||
implicit val mapEncoderPubs: Encoder[Publication] = Encoders.kryo[Publication]
|
||||
|
||||
val inputRDD: RDD[OrcidAuthor] = spark.sparkContext.textFile(s"$sourcePath/authors").map(s => ORCIDToOAF.convertORCIDAuthor(s)).filter(s => s != null).filter(s => ORCIDToOAF.authorValid(s))
|
||||
val inputRDD:RDD[OrcidAuthor] = spark.sparkContext.textFile(s"$sourcePath/authors").map(s => ORCIDToOAF.convertORCIDAuthor(s)).filter(s => s!= null).filter(s => ORCIDToOAF.authorValid(s))
|
||||
|
||||
spark.createDataset(inputRDD).as[OrcidAuthor].write.mode(SaveMode.Overwrite).save(s"$workingPath/author")
|
||||
|
||||
val res = spark.sparkContext.textFile(s"$sourcePath/works").flatMap(s => ORCIDToOAF.extractDOIWorks(s)).filter(s => s != null)
|
||||
val res = spark.sparkContext.textFile(s"$sourcePath/works").flatMap(s => ORCIDToOAF.extractDOIWorks(s)).filter(s => s!= null)
|
||||
|
||||
spark.createDataset(res).as[OrcidWork].write.mode(SaveMode.Overwrite).save(s"$workingPath/works")
|
||||
|
||||
val authors: Dataset[OrcidAuthor] = spark.read.load(s"$workingPath/author").as[OrcidAuthor]
|
||||
val authors :Dataset[OrcidAuthor] = spark.read.load(s"$workingPath/author").as[OrcidAuthor]
|
||||
|
||||
val works: Dataset[OrcidWork] = spark.read.load(s"$workingPath/works").as[OrcidWork]
|
||||
val works :Dataset[OrcidWork] = spark.read.load(s"$workingPath/works").as[OrcidWork]
|
||||
|
||||
works.joinWith(authors, authors("oid").equalTo(works("oid")))
|
||||
.map(i => {
|
||||
.map(i =>{
|
||||
val doi = i._1.doi
|
||||
val author = i._2
|
||||
(doi, author)
|
||||
}).groupBy(col("_1").alias("doi"))
|
||||
(doi, author)
|
||||
}).groupBy(col("_1").alias("doi"))
|
||||
.agg(collect_list(col("_2")).alias("authors")).as[ORCIDItem]
|
||||
.map(s => fixORCIDItem(s))
|
||||
.write.mode(SaveMode.Overwrite).save(s"$workingPath/orcidworksWithAuthor")
|
||||
|
@ -64,4 +67,4 @@ object SparkPreprocessORCID {
|
|||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
|
@ -1,14 +1,16 @@
|
|||
package eu.dnetlib.doiboost.uw
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
|
||||
import eu.dnetlib.dhp.schema.oaf.Publication
|
||||
import eu.dnetlib.doiboost.crossref.SparkMapDumpIntoOAF
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql._
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
|
||||
object SparkMapUnpayWallToOAF {
|
||||
|
||||
def main(args: Array[String]): Unit = {
|
||||
|
@ -30,11 +32,11 @@ object SparkMapUnpayWallToOAF {
|
|||
|
||||
val sourcePath = parser.get("sourcePath")
|
||||
val targetPath = parser.get("targetPath")
|
||||
val inputRDD: RDD[String] = spark.sparkContext.textFile(s"$sourcePath")
|
||||
val inputRDD:RDD[String] = spark.sparkContext.textFile(s"$sourcePath")
|
||||
|
||||
logger.info("Converting UnpayWall to OAF")
|
||||
|
||||
val d: Dataset[Publication] = spark.createDataset(inputRDD.map(UnpayWallToOAF.convertToOAF).filter(p => p != null)).as[Publication]
|
||||
val d:Dataset[Publication] = spark.createDataset(inputRDD.map(UnpayWallToOAF.convertToOAF).filter(p=>p!=null)).as[Publication]
|
||||
d.write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
}
|
||||
|
|
@ -4,13 +4,14 @@ import eu.dnetlib.dhp.schema.common.ModelConstants
|
|||
import eu.dnetlib.dhp.schema.oaf.utils.IdentifierFactory
|
||||
import eu.dnetlib.dhp.schema.oaf.{AccessRight, Instance, OpenAccessRoute, Publication}
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil._
|
||||
import org.json4s
|
||||
import org.json4s.DefaultFormats
|
||||
import org.json4s.jackson.JsonMethods.parse
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
import scala.collection.JavaConverters._
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil._
|
||||
import eu.dnetlib.doiboost.uw.UnpayWallToOAF.get_unpaywall_color
|
||||
|
||||
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package eu.dnetlib.doiboost
|
||||
package eu.dnetlib.dhp.doiboost
|
||||
|
||||
import eu.dnetlib.dhp.schema.oaf.{Publication, Dataset => OafDataset}
|
||||
import eu.dnetlib.doiboost.{DoiBoostMappingUtil, HostedByItemType}
|
|
@ -1,4 +1,4 @@
|
|||
package eu.dnetlib.doiboost
|
||||
package eu.dnetlib.dhp.doiboost
|
||||
|
||||
import eu.dnetlib.doiboost.DoiBoostMappingUtil
|
||||
import org.junit.jupiter.api.Test
|
|
@ -3,9 +3,9 @@ package eu.dnetlib.doiboost.mag
|
|||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql.{Dataset, SparkSession}
|
||||
import org.codehaus.jackson.map.ObjectMapper
|
||||
import org.json4s.DefaultFormats
|
||||
import org.junit.jupiter.api.Assertions._
|
||||
import org.junit.jupiter.api.Test
|
||||
import org.json4s.DefaultFormats
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
import java.sql.Timestamp
|
|
@ -10,9 +10,10 @@ import org.junit.jupiter.api.io.TempDir
|
|||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
import java.nio.file.Path
|
||||
import scala.collection.JavaConversions._
|
||||
import scala.io.Source
|
||||
|
||||
import scala.collection.JavaConversions._
|
||||
|
||||
class MappingORCIDToOAFTest {
|
||||
val logger: Logger = LoggerFactory.getLogger(ORCIDToOAF.getClass)
|
||||
val mapper = new ObjectMapper()
|
|
@ -3,11 +3,11 @@ package eu.dnetlib.doiboost.uw
|
|||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import eu.dnetlib.dhp.schema.oaf.OpenAccessRoute
|
||||
import org.junit.jupiter.api.Assertions._
|
||||
import org.junit.jupiter.api.Test
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
import scala.io.Source
|
||||
import org.junit.jupiter.api.Assertions._
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
class UnpayWallMappingTest {
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
package eu.dnetlib.dhp.oa.graph.hostedbymap
|
||||
|
||||
import eu.dnetlib.dhp.oa.graph.hostedbymap.model.EntityInfo
|
||||
import org.apache.spark.sql.expressions.Aggregator
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, TypedColumn}
|
||||
import org.apache.spark.sql.expressions.Aggregator
|
||||
|
||||
|
||||
case class HostedByItemType(id: String, officialname: String, issn: String, eissn: String, lissn: String, openAccess: Boolean) {}
|
|
@ -2,12 +2,13 @@ package eu.dnetlib.dhp.oa.graph.hostedbymap
|
|||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.oa.graph.hostedbymap.SparkApplyHostedByMapToResult.{applyHBtoPubs, getClass}
|
||||
import eu.dnetlib.dhp.oa.graph.hostedbymap.model.EntityInfo
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants
|
||||
import eu.dnetlib.dhp.schema.oaf.Datasource
|
||||
import eu.dnetlib.dhp.schema.oaf.{Datasource, Publication}
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql._
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.json4s.DefaultFormats
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
|
@ -51,18 +52,18 @@ object SparkApplyHostedByMapToDatasource {
|
|||
|
||||
val mapper = new ObjectMapper()
|
||||
|
||||
val dats: Dataset[Datasource] = spark.read.textFile(graphPath + "/datasource")
|
||||
val dats : Dataset[Datasource] = spark.read.textFile(graphPath + "/datasource")
|
||||
.map(r => mapper.readValue(r, classOf[Datasource]))
|
||||
|
||||
val pinfo: Dataset[EntityInfo] = Aggregators.datasourceToSingleId(spark.read.textFile(preparedInfoPath)
|
||||
val pinfo : Dataset[EntityInfo] = Aggregators.datasourceToSingleId( spark.read.textFile(preparedInfoPath)
|
||||
.map(ei => mapper.readValue(ei, classOf[EntityInfo])))
|
||||
|
||||
applyHBtoDats(pinfo, dats).write.mode(SaveMode.Overwrite).option("compression", "gzip").json(outputPath)
|
||||
applyHBtoDats(pinfo, dats).write.mode(SaveMode.Overwrite).option("compression","gzip").json(outputPath)
|
||||
|
||||
spark.read.textFile(outputPath)
|
||||
.write
|
||||
.mode(SaveMode.Overwrite)
|
||||
.option("compression", "gzip")
|
||||
.option("compression","gzip")
|
||||
.text(graphPath + "/datasource")
|
||||
}
|
||||
|
|
@ -5,13 +5,16 @@ import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
|||
import eu.dnetlib.dhp.oa.graph.hostedbymap.model.EntityInfo
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants
|
||||
import eu.dnetlib.dhp.schema.oaf.utils.OafMapperUtils
|
||||
import eu.dnetlib.dhp.schema.oaf.{Instance, OpenAccessRoute, Publication}
|
||||
import eu.dnetlib.dhp.schema.oaf.{Datasource, Instance, OpenAccessRoute, Publication}
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql._
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.json4s.DefaultFormats
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
import scala.collection.JavaConverters._
|
||||
|
||||
|
||||
object SparkApplyHostedByMapToResult {
|
||||
|
||||
def applyHBtoPubs(join: Dataset[EntityInfo], pubs: Dataset[Publication]) = {
|
||||
|
@ -36,7 +39,6 @@ object SparkApplyHostedByMapToResult {
|
|||
p
|
||||
})(Encoders.bean(classOf[Publication]))
|
||||
}
|
||||
|
||||
def main(args: Array[String]): Unit = {
|
||||
|
||||
|
||||
|
@ -65,18 +67,18 @@ object SparkApplyHostedByMapToResult {
|
|||
implicit val mapEncoderEinfo: Encoder[EntityInfo] = Encoders.bean(classOf[EntityInfo])
|
||||
val mapper = new ObjectMapper()
|
||||
|
||||
val pubs: Dataset[Publication] = spark.read.textFile(graphPath + "/publication")
|
||||
val pubs : Dataset[Publication] = spark.read.textFile(graphPath + "/publication")
|
||||
.map(r => mapper.readValue(r, classOf[Publication]))
|
||||
|
||||
val pinfo: Dataset[EntityInfo] = spark.read.textFile(preparedInfoPath)
|
||||
.map(ei => mapper.readValue(ei, classOf[EntityInfo]))
|
||||
val pinfo : Dataset[EntityInfo] = spark.read.textFile(preparedInfoPath)
|
||||
.map(ei => mapper.readValue(ei, classOf[EntityInfo]))
|
||||
|
||||
applyHBtoPubs(pinfo, pubs).write.mode(SaveMode.Overwrite).option("compression", "gzip").json(outputPath)
|
||||
applyHBtoPubs(pinfo, pubs).write.mode(SaveMode.Overwrite).option("compression","gzip").json(outputPath)
|
||||
|
||||
spark.read.textFile(outputPath)
|
||||
.write
|
||||
.mode(SaveMode.Overwrite)
|
||||
.option("compression", "gzip")
|
||||
.option("compression","gzip")
|
||||
.text(graphPath + "/publication")
|
||||
}
|
||||
|
|
@ -3,58 +3,61 @@ package eu.dnetlib.dhp.oa.graph.hostedbymap
|
|||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.oa.graph.hostedbymap.model.EntityInfo
|
||||
|
||||
import eu.dnetlib.dhp.schema.oaf.{Journal, Publication}
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql._
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.json4s
|
||||
import org.json4s.DefaultFormats
|
||||
import org.json4s.jackson.JsonMethods.parse
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
|
||||
|
||||
object SparkPrepareHostedByInfoToApply {
|
||||
|
||||
implicit val mapEncoderPInfo: Encoder[EntityInfo] = Encoders.bean(classOf[EntityInfo])
|
||||
|
||||
def getList(id: String, j: Journal, name: String): List[EntityInfo] = {
|
||||
var lst: List[EntityInfo] = List()
|
||||
def getList(id: String, j: Journal, name: String ) : List[EntityInfo] = {
|
||||
var lst:List[EntityInfo] = List()
|
||||
|
||||
|
||||
if (j.getIssnLinking != null && !j.getIssnLinking.equals("")) {
|
||||
if (j.getIssnLinking != null && !j.getIssnLinking.equals("")){
|
||||
lst = EntityInfo.newInstance(id, j.getIssnLinking, name) :: lst
|
||||
}
|
||||
if (j.getIssnOnline != null && !j.getIssnOnline.equals("")) {
|
||||
if (j.getIssnOnline != null && !j.getIssnOnline.equals("")){
|
||||
lst = EntityInfo.newInstance(id, j.getIssnOnline, name) :: lst
|
||||
}
|
||||
if (j.getIssnPrinted != null && !j.getIssnPrinted.equals("")) {
|
||||
if (j.getIssnPrinted != null && !j.getIssnPrinted.equals("")){
|
||||
lst = EntityInfo.newInstance(id, j.getIssnPrinted, name) :: lst
|
||||
}
|
||||
lst
|
||||
}
|
||||
|
||||
def prepareResultInfo(spark: SparkSession, publicationPath: String): Dataset[EntityInfo] = {
|
||||
def prepareResultInfo(spark:SparkSession, publicationPath:String) : Dataset[EntityInfo] = {
|
||||
implicit val mapEncoderPubs: Encoder[Publication] = Encoders.bean(classOf[Publication])
|
||||
|
||||
val mapper = new ObjectMapper()
|
||||
|
||||
val dd: Dataset[Publication] = spark.read.textFile(publicationPath)
|
||||
val dd : Dataset[Publication] = spark.read.textFile(publicationPath)
|
||||
.map(r => mapper.readValue(r, classOf[Publication]))
|
||||
|
||||
dd.filter(p => p.getJournal != null).flatMap(p => getList(p.getId, p.getJournal, ""))
|
||||
dd.filter(p => p.getJournal != null ).flatMap(p => getList(p.getId, p.getJournal, ""))
|
||||
|
||||
}
|
||||
|
||||
|
||||
def toEntityInfo(input: String): EntityInfo = {
|
||||
def toEntityInfo(input:String): EntityInfo = {
|
||||
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
|
||||
|
||||
lazy val json: json4s.JValue = parse(input)
|
||||
val c: Map[String, HostedByItemType] = json.extract[Map[String, HostedByItemType]]
|
||||
val c :Map[String,HostedByItemType] = json.extract[Map[String, HostedByItemType]]
|
||||
toEntityItem(c.keys.head, c.values.head)
|
||||
}
|
||||
|
||||
|
||||
def toEntityItem(journal_id: String, hbi: HostedByItemType): EntityInfo = {
|
||||
def toEntityItem(journal_id: String , hbi: HostedByItemType): EntityInfo = {
|
||||
|
||||
EntityInfo.newInstance(hbi.id, journal_id, hbi.officialname, hbi.openAccess)
|
||||
|
||||
|
@ -64,7 +67,7 @@ object SparkPrepareHostedByInfoToApply {
|
|||
Aggregators.resultToSingleId(res.joinWith(hbm, res.col("journalId").equalTo(hbm.col("journalId")), "left")
|
||||
.map(t2 => {
|
||||
val res: EntityInfo = t2._1
|
||||
if (t2._2 != null) {
|
||||
if(t2._2 != null ){
|
||||
val ds = t2._2
|
||||
res.setHostedById(ds.getId)
|
||||
res.setOpenAccess(ds.getOpenAccess)
|
||||
|
@ -104,10 +107,10 @@ object SparkPrepareHostedByInfoToApply {
|
|||
|
||||
|
||||
//STEP1: read the hostedbymap and transform it in EntityInfo
|
||||
val hostedByInfo: Dataset[EntityInfo] = spark.createDataset(spark.sparkContext.textFile(hostedByMapPath)).map(toEntityInfo)
|
||||
val hostedByInfo:Dataset[EntityInfo] = spark.createDataset(spark.sparkContext.textFile(hostedByMapPath)).map(toEntityInfo)
|
||||
|
||||
//STEP2: create association (publication, issn), (publication, eissn), (publication, lissn)
|
||||
val resultInfoDataset: Dataset[EntityInfo] = prepareResultInfo(spark, graphPath + "/publication")
|
||||
//STEP2: create association (publication, issn), (publication, eissn), (publication, lissn)
|
||||
val resultInfoDataset:Dataset[EntityInfo] = prepareResultInfo(spark, graphPath + "/publication")
|
||||
|
||||
//STEP3: left join resultInfo with hostedByInfo on journal_id. Reduction of all the results with the same id in just
|
||||
//one entry (one result could be associated to issn and eissn and so possivly matching more than once against the map)
|
|
@ -1,39 +1,41 @@
|
|||
package eu.dnetlib.dhp.oa.graph.hostedbymap
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.oa.graph.hostedbymap.model.{DOAJModel, UnibiGoldModel}
|
||||
import eu.dnetlib.dhp.schema.oaf.Datasource
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.hadoop.conf.Configuration
|
||||
import org.apache.hadoop.fs.{FileSystem, Path}
|
||||
import org.apache.hadoop.io.compress.GzipCodec
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SparkSession}
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.json4s.DefaultFormats
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import org.apache.hadoop.conf.Configuration
|
||||
import org.apache.hadoop.fs.FileSystem
|
||||
import org.apache.hadoop.fs.Path
|
||||
import java.io.PrintWriter
|
||||
|
||||
import org.apache.hadoop.io.compress.GzipCodec
|
||||
|
||||
|
||||
object SparkProduceHostedByMap {
|
||||
|
||||
|
||||
implicit val tupleForJoinEncoder: Encoder[(String, HostedByItemType)] = Encoders.tuple(Encoders.STRING, Encoders.product[HostedByItemType])
|
||||
|
||||
|
||||
def toHostedByItemType(input: ((HostedByInfo, HostedByInfo), HostedByInfo)): HostedByItemType = {
|
||||
def toHostedByItemType(input: ((HostedByInfo, HostedByInfo), HostedByInfo)) : HostedByItemType = {
|
||||
val openaire: HostedByInfo = input._1._1
|
||||
val doaj: HostedByInfo = input._1._2
|
||||
val gold: HostedByInfo = input._2
|
||||
val isOpenAccess: Boolean = doaj == null && gold == null
|
||||
|
||||
openaire.journal_id match {
|
||||
case Constants.ISSN => HostedByItemType(openaire.id, openaire.officialname, openaire.journal_id, "", "", isOpenAccess)
|
||||
case Constants.EISSN => HostedByItemType(openaire.id, openaire.officialname, "", openaire.journal_id, "", isOpenAccess)
|
||||
case Constants.ISSNL => HostedByItemType(openaire.id, openaire.officialname, "", "", openaire.journal_id, isOpenAccess)
|
||||
case Constants.ISSN => HostedByItemType(openaire.id, openaire.officialname, openaire.journal_id, "", "", isOpenAccess)
|
||||
case Constants.EISSN => HostedByItemType(openaire.id, openaire.officialname, "", openaire.journal_id, "", isOpenAccess)
|
||||
case Constants.ISSNL => HostedByItemType(openaire.id, openaire.officialname, "", "", openaire.journal_id, isOpenAccess)
|
||||
|
||||
// catch the default with a variable so you can print it
|
||||
case whoa => null
|
||||
case whoa => null
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,7 +44,7 @@ object SparkProduceHostedByMap {
|
|||
|
||||
implicit val formats = org.json4s.DefaultFormats
|
||||
|
||||
val map: Map[String, HostedByItemType] = Map(input._1 -> input._2)
|
||||
val map: Map [String, HostedByItemType] = Map (input._1 -> input._2 )
|
||||
|
||||
Serialization.write(map)
|
||||
|
||||
|
@ -50,33 +52,34 @@ object SparkProduceHostedByMap {
|
|||
}
|
||||
|
||||
|
||||
def getHostedByItemType(id: String, officialname: String, issn: String, eissn: String, issnl: String, oa: Boolean): HostedByItemType = {
|
||||
if (issn != null) {
|
||||
if (eissn != null) {
|
||||
if (issnl != null) {
|
||||
HostedByItemType(id, officialname, issn, eissn, issnl, oa)
|
||||
} else {
|
||||
HostedByItemType(id, officialname, issn, eissn, "", oa)
|
||||
|
||||
def getHostedByItemType(id:String, officialname: String, issn:String, eissn:String, issnl:String, oa:Boolean): HostedByItemType = {
|
||||
if(issn != null){
|
||||
if(eissn != null){
|
||||
if(issnl != null){
|
||||
HostedByItemType(id, officialname, issn, eissn, issnl , oa)
|
||||
}else{
|
||||
HostedByItemType(id, officialname, issn, eissn, "" , oa)
|
||||
}
|
||||
} else {
|
||||
if (issnl != null) {
|
||||
HostedByItemType(id, officialname, issn, "", issnl, oa)
|
||||
} else {
|
||||
HostedByItemType(id, officialname, issn, "", "", oa)
|
||||
}else{
|
||||
if(issnl != null){
|
||||
HostedByItemType(id, officialname, issn, "", issnl , oa)
|
||||
}else{
|
||||
HostedByItemType(id, officialname, issn, "", "" , oa)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (eissn != null) {
|
||||
if (issnl != null) {
|
||||
HostedByItemType(id, officialname, "", eissn, issnl, oa)
|
||||
} else {
|
||||
HostedByItemType(id, officialname, "", eissn, "", oa)
|
||||
}else{
|
||||
if(eissn != null){
|
||||
if(issnl != null){
|
||||
HostedByItemType(id, officialname, "", eissn, issnl , oa)
|
||||
}else{
|
||||
HostedByItemType(id, officialname, "", eissn, "" , oa)
|
||||
}
|
||||
} else {
|
||||
if (issnl != null) {
|
||||
HostedByItemType(id, officialname, "", "", issnl, oa)
|
||||
} else {
|
||||
HostedByItemType("", "", "", "", "", oa)
|
||||
}else{
|
||||
if(issnl != null){
|
||||
HostedByItemType(id, officialname, "", "", issnl , oa)
|
||||
}else{
|
||||
HostedByItemType("", "", "", "", "" , oa)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -87,10 +90,10 @@ object SparkProduceHostedByMap {
|
|||
|
||||
return getHostedByItemType(dats.getId, dats.getOfficialname.getValue, dats.getJournal.getIssnPrinted, dats.getJournal.getIssnOnline, dats.getJournal.getIssnLinking, false)
|
||||
}
|
||||
HostedByItemType("", "", "", "", "", false)
|
||||
HostedByItemType("","","","","",false)
|
||||
}
|
||||
|
||||
def oaHostedByDataset(spark: SparkSession, datasourcePath: String): Dataset[HostedByItemType] = {
|
||||
def oaHostedByDataset(spark:SparkSession, datasourcePath : String) : Dataset[HostedByItemType] = {
|
||||
|
||||
import spark.implicits._
|
||||
|
||||
|
@ -99,10 +102,10 @@ object SparkProduceHostedByMap {
|
|||
|
||||
implicit var encoderD = Encoders.kryo[Datasource]
|
||||
|
||||
val dd: Dataset[Datasource] = spark.read.textFile(datasourcePath)
|
||||
val dd : Dataset[Datasource] = spark.read.textFile(datasourcePath)
|
||||
.map(r => mapper.readValue(r, classOf[Datasource]))
|
||||
|
||||
dd.map { ddt => oaToHostedbyItemType(ddt) }.filter(hb => !(hb.id.equals("")))
|
||||
dd.map{ddt => oaToHostedbyItemType(ddt)}.filter(hb => !(hb.id.equals("")))
|
||||
|
||||
}
|
||||
|
||||
|
@ -112,17 +115,17 @@ object SparkProduceHostedByMap {
|
|||
}
|
||||
|
||||
|
||||
def goldHostedByDataset(spark: SparkSession, datasourcePath: String): Dataset[HostedByItemType] = {
|
||||
def goldHostedByDataset(spark:SparkSession, datasourcePath:String) : Dataset[HostedByItemType] = {
|
||||
import spark.implicits._
|
||||
|
||||
implicit val mapEncoderUnibi: Encoder[UnibiGoldModel] = Encoders.kryo[UnibiGoldModel]
|
||||
|
||||
val mapper = new ObjectMapper()
|
||||
|
||||
val dd: Dataset[UnibiGoldModel] = spark.read.textFile(datasourcePath)
|
||||
val dd : Dataset[UnibiGoldModel] = spark.read.textFile(datasourcePath)
|
||||
.map(r => mapper.readValue(r, classOf[UnibiGoldModel]))
|
||||
|
||||
dd.map { ddt => goldToHostedbyItemType(ddt) }.filter(hb => !(hb.id.equals("")))
|
||||
dd.map{ddt => goldToHostedbyItemType(ddt)}.filter(hb => !(hb.id.equals("")))
|
||||
|
||||
}
|
||||
|
||||
|
@ -131,40 +134,41 @@ object SparkProduceHostedByMap {
|
|||
return getHostedByItemType(Constants.DOAJ, doaj.getJournalTitle, doaj.getIssn, doaj.getEissn, "", true)
|
||||
}
|
||||
|
||||
def doajHostedByDataset(spark: SparkSession, datasourcePath: String): Dataset[HostedByItemType] = {
|
||||
def doajHostedByDataset(spark:SparkSession, datasourcePath:String) : Dataset[HostedByItemType] = {
|
||||
import spark.implicits._
|
||||
|
||||
implicit val mapEncoderDOAJ: Encoder[DOAJModel] = Encoders.kryo[DOAJModel]
|
||||
|
||||
val mapper = new ObjectMapper()
|
||||
|
||||
val dd: Dataset[DOAJModel] = spark.read.textFile(datasourcePath)
|
||||
val dd : Dataset[DOAJModel] = spark.read.textFile(datasourcePath)
|
||||
.map(r => mapper.readValue(r, classOf[DOAJModel]))
|
||||
|
||||
dd.map { ddt => doajToHostedbyItemType(ddt) }.filter(hb => !(hb.id.equals("")))
|
||||
dd.map{ddt => doajToHostedbyItemType(ddt)}.filter(hb => !(hb.id.equals("")))
|
||||
|
||||
}
|
||||
|
||||
def toList(input: HostedByItemType): List[(String, HostedByItemType)] = {
|
||||
var lst: List[(String, HostedByItemType)] = List()
|
||||
if (!input.issn.equals("")) {
|
||||
var lst : List[(String, HostedByItemType)] = List()
|
||||
if(!input.issn.equals("")){
|
||||
lst = (input.issn, input) :: lst
|
||||
}
|
||||
if (!input.eissn.equals("")) {
|
||||
if(!input.eissn.equals("")){
|
||||
lst = (input.eissn, input) :: lst
|
||||
}
|
||||
if (!input.lissn.equals("")) {
|
||||
if(!input.lissn.equals("")){
|
||||
lst = (input.lissn, input) :: lst
|
||||
}
|
||||
lst
|
||||
}
|
||||
|
||||
|
||||
def writeToHDFS(input: Array[String], outputPath: String, hdfsNameNode: String): Unit = {
|
||||
|
||||
def writeToHDFS(input: Array[String], outputPath: String, hdfsNameNode : String):Unit = {
|
||||
val conf = new Configuration()
|
||||
|
||||
conf.set("fs.defaultFS", hdfsNameNode)
|
||||
val fs = FileSystem.get(conf)
|
||||
val fs= FileSystem.get(conf)
|
||||
val output = fs.create(new Path(outputPath))
|
||||
val writer = new PrintWriter(output)
|
||||
try {
|
||||
|
@ -178,6 +182,7 @@ object SparkProduceHostedByMap {
|
|||
}
|
||||
|
||||
|
||||
|
||||
def main(args: Array[String]): Unit = {
|
||||
|
||||
val logger: Logger = LoggerFactory.getLogger(getClass)
|
||||
|
@ -208,7 +213,7 @@ object SparkProduceHostedByMap {
|
|||
.union(doajHostedByDataset(spark, workingDirPath + "/doaj.json"))
|
||||
.flatMap(hbi => toList(hbi))).filter(hbi => hbi._2.id.startsWith("10|"))
|
||||
.map(hbi => toHostedByMap(hbi))(Encoders.STRING)
|
||||
.rdd.saveAsTextFile(outputPath, classOf[GzipCodec])
|
||||
.rdd.saveAsTextFile(outputPath , classOf[GzipCodec])
|
||||
|
||||
|
||||
}
|
|
@ -4,14 +4,20 @@ import com.fasterxml.jackson.databind.ObjectMapper
|
|||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.common.HdfsSupport
|
||||
import eu.dnetlib.dhp.schema.common.ModelSupport
|
||||
import eu.dnetlib.dhp.schema.mdstore.MDStoreWithInfo
|
||||
import eu.dnetlib.dhp.schema.oaf.Oaf
|
||||
import eu.dnetlib.dhp.utils.DHPUtils
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.commons.lang3.StringUtils
|
||||
import org.apache.http.client.methods.HttpGet
|
||||
import org.apache.http.impl.client.HttpClients
|
||||
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.apache.spark.{SparkConf, SparkContext}
|
||||
import org.slf4j.LoggerFactory
|
||||
|
||||
import scala.io.Source
|
||||
import scala.collection.JavaConverters._
|
||||
import scala.io.Source
|
||||
|
||||
object CopyHdfsOafSparkApplication {
|
||||
|
||||
def main(args: Array[String]): Unit = {
|
||||
|
@ -53,7 +59,7 @@ object CopyHdfsOafSparkApplication {
|
|||
if (validPaths.nonEmpty) {
|
||||
val oaf = spark.read.load(validPaths: _*).as[Oaf]
|
||||
val mapper = new ObjectMapper()
|
||||
val l = ModelSupport.oafTypes.entrySet.asScala.map(e => e.getKey).toList
|
||||
val l =ModelSupport.oafTypes.entrySet.asScala.map(e => e.getKey).toList
|
||||
l.foreach(
|
||||
e =>
|
||||
oaf.filter(o => o.getClass.getSimpleName.equalsIgnoreCase(e))
|
|
@ -2,6 +2,7 @@ package eu.dnetlib.dhp.oa.graph.resolution
|
|||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.common.HdfsSupport
|
||||
import eu.dnetlib.dhp.schema.common.EntityType
|
||||
import eu.dnetlib.dhp.schema.oaf.{OtherResearchProduct, Publication, Result, Software, Dataset => OafDataset}
|
||||
import org.apache.commons.io.IOUtils
|
||||
|
@ -13,7 +14,7 @@ import org.slf4j.{Logger, LoggerFactory}
|
|||
object SparkResolveEntities {
|
||||
|
||||
val mapper = new ObjectMapper()
|
||||
val entities = List(EntityType.dataset, EntityType.publication, EntityType.software, EntityType.otherresearchproduct)
|
||||
val entities = List(EntityType.dataset,EntityType.publication, EntityType.software, EntityType.otherresearchproduct)
|
||||
|
||||
def main(args: Array[String]): Unit = {
|
||||
val log: Logger = LoggerFactory.getLogger(getClass)
|
||||
|
@ -50,10 +51,10 @@ object SparkResolveEntities {
|
|||
fs.rename(new Path(s"$workingPath/resolvedGraph/$e"), new Path(s"$graphBasePath/$e"))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def resolveEntities(spark: SparkSession, workingPath: String, unresolvedPath: String) = {
|
||||
def resolveEntities(spark: SparkSession, workingPath: String, unresolvedPath: String) = {
|
||||
implicit val resEncoder: Encoder[Result] = Encoders.kryo(classOf[Result])
|
||||
import spark.implicits._
|
||||
|
||||
|
@ -70,22 +71,22 @@ object SparkResolveEntities {
|
|||
}
|
||||
|
||||
|
||||
def deserializeObject(input: String, entity: EntityType): Result = {
|
||||
def deserializeObject(input:String, entity:EntityType ) :Result = {
|
||||
|
||||
entity match {
|
||||
case EntityType.publication => mapper.readValue(input, classOf[Publication])
|
||||
case EntityType.dataset => mapper.readValue(input, classOf[OafDataset])
|
||||
case EntityType.software => mapper.readValue(input, classOf[Software])
|
||||
case EntityType.otherresearchproduct => mapper.readValue(input, classOf[OtherResearchProduct])
|
||||
}
|
||||
entity match {
|
||||
case EntityType.publication => mapper.readValue(input, classOf[Publication])
|
||||
case EntityType.dataset => mapper.readValue(input, classOf[OafDataset])
|
||||
case EntityType.software=> mapper.readValue(input, classOf[Software])
|
||||
case EntityType.otherresearchproduct=> mapper.readValue(input, classOf[OtherResearchProduct])
|
||||
}
|
||||
}
|
||||
|
||||
def generateResolvedEntities(spark: SparkSession, workingPath: String, graphBasePath: String) = {
|
||||
def generateResolvedEntities(spark:SparkSession, workingPath: String, graphBasePath:String) = {
|
||||
|
||||
implicit val resEncoder: Encoder[Result] = Encoders.kryo(classOf[Result])
|
||||
import spark.implicits._
|
||||
|
||||
val re: Dataset[Result] = spark.read.load(s"$workingPath/resolvedEntities").as[Result]
|
||||
val re:Dataset[Result] = spark.read.load(s"$workingPath/resolvedEntities").as[Result]
|
||||
entities.foreach {
|
||||
e =>
|
||||
|
|
@ -3,7 +3,7 @@ package eu.dnetlib.dhp.oa.graph.resolution
|
|||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.common.HdfsSupport
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation
|
||||
import eu.dnetlib.dhp.schema.oaf.{Relation, Result}
|
||||
import eu.dnetlib.dhp.utils.DHPUtils
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.hadoop.fs.{FileSystem, Path}
|
|
@ -0,0 +1,31 @@
|
|||
package eu.dnetlib.dhp.oa.sx.graphimport
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql.SparkSession
|
||||
|
||||
object SparkDataciteToOAF {
|
||||
|
||||
|
||||
def main(args: Array[String]): Unit = {
|
||||
val conf: SparkConf = new SparkConf()
|
||||
val parser = new ArgumentApplicationParser(IOUtils.toString(getClass.getResourceAsStream("/eu/dnetlib/dhp/sx/ebi/datacite_to_df_params.json")))
|
||||
parser.parseArgument(args)
|
||||
val spark: SparkSession =
|
||||
SparkSession
|
||||
.builder()
|
||||
.config(conf)
|
||||
.appName(getClass.getSimpleName)
|
||||
.master(parser.get("master")).getOrCreate()
|
||||
import spark.implicits._
|
||||
|
||||
|
||||
val sc = spark.sparkContext
|
||||
|
||||
val inputPath = parser.get("inputPath")
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -2,7 +2,7 @@ package eu.dnetlib.dhp.sx.graph
|
|||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.schema.oaf.Result
|
||||
import eu.dnetlib.dhp.schema.oaf.{Oaf, OtherResearchProduct, Publication, Result, Software, Dataset => OafDataset}
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.hadoop.io.compress.GzipCodec
|
||||
import org.apache.spark.SparkConf
|
||||
|
@ -29,13 +29,13 @@ object SparkConvertDatasetToJsonRDD {
|
|||
val targetPath = parser.get("targetPath")
|
||||
log.info(s"targetPath -> $targetPath")
|
||||
|
||||
val resultObject = List("publication", "dataset", "software", "otherResearchProduct")
|
||||
val resultObject = List("publication","dataset","software", "otherResearchProduct")
|
||||
val mapper = new ObjectMapper()
|
||||
implicit val oafEncoder: Encoder[Result] = Encoders.kryo(classOf[Result])
|
||||
implicit val oafEncoder: Encoder[Result] = Encoders.kryo(classOf[Result])
|
||||
|
||||
|
||||
resultObject.foreach { item =>
|
||||
spark.read.load(s"$sourcePath/$item").as[Result].map(r => mapper.writeValueAsString(r))(Encoders.STRING).rdd.saveAsTextFile(s"$targetPath/${item.toLowerCase}", classOf[GzipCodec])
|
||||
resultObject.foreach{item =>
|
||||
spark.read.load(s"$sourcePath/$item").as[Result].map(r=> mapper.writeValueAsString(r))(Encoders.STRING).rdd.saveAsTextFile(s"$targetPath/${item.toLowerCase}", classOf[GzipCodec])
|
||||
}
|
||||
}
|
||||
|
|
@ -5,10 +5,10 @@ import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
|||
import eu.dnetlib.dhp.schema.sx.scholix.Scholix
|
||||
import eu.dnetlib.dhp.schema.sx.summary.ScholixSummary
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.hadoop.io.compress.GzipCodec
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SparkSession}
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
import org.apache.hadoop.io.compress._
|
||||
|
||||
object SparkConvertObjectToJson {
|
||||
|
||||
|
@ -32,8 +32,8 @@ object SparkConvertObjectToJson {
|
|||
log.info(s"objectType -> $objectType")
|
||||
|
||||
|
||||
implicit val scholixEncoder: Encoder[Scholix] = Encoders.kryo[Scholix]
|
||||
implicit val summaryEncoder: Encoder[ScholixSummary] = Encoders.kryo[ScholixSummary]
|
||||
implicit val scholixEncoder :Encoder[Scholix]= Encoders.kryo[Scholix]
|
||||
implicit val summaryEncoder :Encoder[ScholixSummary]= Encoders.kryo[ScholixSummary]
|
||||
|
||||
|
||||
val mapper = new ObjectMapper
|
||||
|
@ -42,11 +42,11 @@ object SparkConvertObjectToJson {
|
|||
case "scholix" =>
|
||||
log.info("Serialize Scholix")
|
||||
val d: Dataset[Scholix] = spark.read.load(sourcePath).as[Scholix]
|
||||
d.map(s => mapper.writeValueAsString(s))(Encoders.STRING).rdd.repartition(6000).saveAsTextFile(targetPath, classOf[GzipCodec])
|
||||
d.map(s => mapper.writeValueAsString(s))(Encoders.STRING).rdd.repartition(6000).saveAsTextFile(targetPath, classOf[GzipCodec])
|
||||
case "summary" =>
|
||||
log.info("Serialize Summary")
|
||||
val d: Dataset[ScholixSummary] = spark.read.load(sourcePath).as[ScholixSummary]
|
||||
d.map(s => mapper.writeValueAsString(s))(Encoders.STRING).rdd.repartition(1000).saveAsTextFile(targetPath, classOf[GzipCodec])
|
||||
d.map(s => mapper.writeValueAsString(s))(Encoders.STRING).rdd.repartition(1000).saveAsTextFile(targetPath, classOf[GzipCodec])
|
||||
}
|
||||
}
|
||||
|
|
@ -2,12 +2,11 @@ package eu.dnetlib.dhp.sx.graph
|
|||
|
||||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.schema.oaf.{OtherResearchProduct, Publication, Relation, Software, Dataset => OafDataset}
|
||||
import eu.dnetlib.dhp.schema.oaf.{OtherResearchProduct, Publication, Relation, Result, Software, Dataset => OafDataset}
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
object SparkConvertRDDtoDataset {
|
||||
|
||||
def main(args: Array[String]): Unit = {
|
||||
|
@ -32,39 +31,39 @@ object SparkConvertRDDtoDataset {
|
|||
val entityPath = s"$t/entities"
|
||||
val relPath = s"$t/relation"
|
||||
val mapper = new ObjectMapper()
|
||||
implicit val datasetEncoder: Encoder[OafDataset] = Encoders.kryo(classOf[OafDataset])
|
||||
implicit val publicationEncoder: Encoder[Publication] = Encoders.kryo(classOf[Publication])
|
||||
implicit val relationEncoder: Encoder[Relation] = Encoders.kryo(classOf[Relation])
|
||||
implicit val orpEncoder: Encoder[OtherResearchProduct] = Encoders.kryo(classOf[OtherResearchProduct])
|
||||
implicit val softwareEncoder: Encoder[Software] = Encoders.kryo(classOf[Software])
|
||||
implicit val datasetEncoder: Encoder[OafDataset] = Encoders.kryo(classOf[OafDataset])
|
||||
implicit val publicationEncoder: Encoder[Publication] = Encoders.kryo(classOf[Publication])
|
||||
implicit val relationEncoder: Encoder[Relation] = Encoders.kryo(classOf[Relation])
|
||||
implicit val orpEncoder: Encoder[OtherResearchProduct] = Encoders.kryo(classOf[OtherResearchProduct])
|
||||
implicit val softwareEncoder: Encoder[Software] = Encoders.kryo(classOf[Software])
|
||||
|
||||
|
||||
log.info("Converting dataset")
|
||||
val rddDataset = spark.sparkContext.textFile(s"$sourcePath/dataset").map(s => mapper.readValue(s, classOf[OafDataset]))
|
||||
val rddDataset =spark.sparkContext.textFile(s"$sourcePath/dataset").map(s => mapper.readValue(s, classOf[OafDataset]))
|
||||
spark.createDataset(rddDataset).as[OafDataset].write.mode(SaveMode.Overwrite).save(s"$entityPath/dataset")
|
||||
|
||||
|
||||
log.info("Converting publication")
|
||||
val rddPublication = spark.sparkContext.textFile(s"$sourcePath/publication").map(s => mapper.readValue(s, classOf[Publication]))
|
||||
val rddPublication =spark.sparkContext.textFile(s"$sourcePath/publication").map(s => mapper.readValue(s, classOf[Publication]))
|
||||
spark.createDataset(rddPublication).as[Publication].write.mode(SaveMode.Overwrite).save(s"$entityPath/publication")
|
||||
|
||||
log.info("Converting software")
|
||||
val rddSoftware = spark.sparkContext.textFile(s"$sourcePath/software").map(s => mapper.readValue(s, classOf[Software]))
|
||||
val rddSoftware =spark.sparkContext.textFile(s"$sourcePath/software").map(s => mapper.readValue(s, classOf[Software]))
|
||||
spark.createDataset(rddSoftware).as[Software].write.mode(SaveMode.Overwrite).save(s"$entityPath/software")
|
||||
|
||||
log.info("Converting otherresearchproduct")
|
||||
val rddOtherResearchProduct = spark.sparkContext.textFile(s"$sourcePath/otherresearchproduct").map(s => mapper.readValue(s, classOf[OtherResearchProduct]))
|
||||
val rddOtherResearchProduct =spark.sparkContext.textFile(s"$sourcePath/otherresearchproduct").map(s => mapper.readValue(s, classOf[OtherResearchProduct]))
|
||||
spark.createDataset(rddOtherResearchProduct).as[OtherResearchProduct].write.mode(SaveMode.Overwrite).save(s"$entityPath/otherresearchproduct")
|
||||
|
||||
|
||||
log.info("Converting Relation")
|
||||
|
||||
|
||||
val relationSemanticFilter = List("cites", "iscitedby", "merges", "ismergedin")
|
||||
val relationSemanticFilter = List("cites", "iscitedby","merges", "ismergedin")
|
||||
|
||||
val rddRelation = spark.sparkContext.textFile(s"$sourcePath/relation")
|
||||
val rddRelation =spark.sparkContext.textFile(s"$sourcePath/relation")
|
||||
.map(s => mapper.readValue(s, classOf[Relation]))
|
||||
.filter(r => r.getSource.startsWith("50") && r.getTarget.startsWith("50"))
|
||||
.filter(r=> r.getSource.startsWith("50") && r.getTarget.startsWith("50"))
|
||||
.filter(r => !relationSemanticFilter.exists(k => k.equalsIgnoreCase(r.getRelClass)))
|
||||
spark.createDataset(rddRelation).as[Relation].write.mode(SaveMode.Overwrite).save(s"$relPath")
|
||||
|
|
@ -1,12 +1,14 @@
|
|||
package eu.dnetlib.dhp.sx.graph
|
||||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import eu.dnetlib.dhp.schema.oaf.{Dataset => OafDataset, _}
|
||||
import eu.dnetlib.dhp.schema.oaf.{Oaf, OtherResearchProduct, Publication, Relation, Result, Software, Dataset => OafDataset}
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql._
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
|
||||
|
||||
object SparkCreateInputGraph {
|
||||
|
||||
def main(args: Array[String]): Unit = {
|
||||
|
@ -31,7 +33,7 @@ object SparkCreateInputGraph {
|
|||
|
||||
)
|
||||
|
||||
implicit val oafEncoder: Encoder[Oaf] = Encoders.kryo(classOf[Oaf])
|
||||
implicit val oafEncoder: Encoder[Oaf] = Encoders.kryo(classOf[Oaf])
|
||||
implicit val publicationEncoder: Encoder[Publication] = Encoders.kryo(classOf[Publication])
|
||||
implicit val datasetEncoder: Encoder[OafDataset] = Encoders.kryo(classOf[OafDataset])
|
||||
implicit val softwareEncoder: Encoder[Software] = Encoders.kryo(classOf[Software])
|
||||
|
@ -39,13 +41,16 @@ object SparkCreateInputGraph {
|
|||
implicit val relEncoder: Encoder[Relation] = Encoders.kryo(classOf[Relation])
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
val sourcePath = parser.get("sourcePath")
|
||||
log.info(s"sourcePath -> $sourcePath")
|
||||
val targetPath = parser.get("targetPath")
|
||||
log.info(s"targetPath -> $targetPath")
|
||||
|
||||
|
||||
val oafDs: Dataset[Oaf] = spark.read.load(s"$sourcePath/*").as[Oaf]
|
||||
val oafDs:Dataset[Oaf] = spark.read.load(s"$sourcePath/*").as[Oaf]
|
||||
|
||||
|
||||
log.info("Extract Publication")
|
||||
|
@ -65,27 +70,27 @@ object SparkCreateInputGraph {
|
|||
|
||||
resultObject.foreach { r =>
|
||||
log.info(s"Make ${r._1} unique")
|
||||
makeDatasetUnique(s"$targetPath/extracted/${r._1}", s"$targetPath/preprocess/${r._1}", spark, r._2)
|
||||
makeDatasetUnique(s"$targetPath/extracted/${r._1}",s"$targetPath/preprocess/${r._1}",spark, r._2)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def extractEntities[T <: Oaf](oafDs: Dataset[Oaf], targetPath: String, clazz: Class[T], log: Logger): Unit = {
|
||||
def extractEntities[T <: Oaf ](oafDs:Dataset[Oaf], targetPath:String, clazz:Class[T], log:Logger) :Unit = {
|
||||
|
||||
implicit val resEncoder: Encoder[T] = Encoders.kryo(clazz)
|
||||
implicit val resEncoder: Encoder[T] = Encoders.kryo(clazz)
|
||||
log.info(s"Extract ${clazz.getSimpleName}")
|
||||
oafDs.filter(o => o.isInstanceOf[T]).map(p => p.asInstanceOf[T]).write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
}
|
||||
|
||||
|
||||
def makeDatasetUnique[T <: Result](sourcePath: String, targetPath: String, spark: SparkSession, clazz: Class[T]): Unit = {
|
||||
def makeDatasetUnique[T <: Result ](sourcePath:String, targetPath:String, spark:SparkSession, clazz:Class[T]) :Unit = {
|
||||
import spark.implicits._
|
||||
|
||||
implicit val resEncoder: Encoder[T] = Encoders.kryo(clazz)
|
||||
implicit val resEncoder: Encoder[T] = Encoders.kryo(clazz)
|
||||
|
||||
val ds: Dataset[T] = spark.read.load(sourcePath).as[T]
|
||||
val ds:Dataset[T] = spark.read.load(sourcePath).as[T]
|
||||
|
||||
ds.groupByKey(_.getId).reduceGroups { (x, y) =>
|
||||
ds.groupByKey(_.getId).reduceGroups{(x,y) =>
|
||||
x.mergeFrom(y)
|
||||
x
|
||||
}.map(_._2).write.mode(SaveMode.Overwrite).save(targetPath)
|
|
@ -9,7 +9,7 @@ import eu.dnetlib.dhp.sx.graph.scholix.ScholixUtils.RelatedEntities
|
|||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql.functions.count
|
||||
import org.apache.spark.sql._
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
object SparkCreateScholix {
|
||||
|
@ -42,7 +42,7 @@ object SparkCreateScholix {
|
|||
|
||||
|
||||
val relationDS: Dataset[(String, Relation)] = spark.read.load(relationPath).as[Relation]
|
||||
.filter(r => (r.getDataInfo == null || r.getDataInfo.getDeletedbyinference == false) && !r.getRelClass.toLowerCase.contains("merge"))
|
||||
.filter(r => (r.getDataInfo== null || r.getDataInfo.getDeletedbyinference == false) && !r.getRelClass.toLowerCase.contains("merge"))
|
||||
.map(r => (r.getSource, r))(Encoders.tuple(Encoders.STRING, relEncoder))
|
||||
|
||||
val summaryDS: Dataset[(String, ScholixSummary)] = spark.read.load(summaryPath).as[ScholixSummary]
|
||||
|
@ -51,54 +51,54 @@ object SparkCreateScholix {
|
|||
|
||||
relationDS.joinWith(summaryDS, relationDS("_1").equalTo(summaryDS("_1")), "left")
|
||||
.map { input: ((String, Relation), (String, ScholixSummary)) =>
|
||||
if (input._1 != null && input._2 != null) {
|
||||
if (input._1!= null && input._2!= null) {
|
||||
val rel: Relation = input._1._2
|
||||
val source: ScholixSummary = input._2._2
|
||||
(rel.getTarget, ScholixUtils.scholixFromSource(rel, source))
|
||||
}
|
||||
else null
|
||||
else null
|
||||
}(Encoders.tuple(Encoders.STRING, scholixEncoder))
|
||||
.filter(r => r != null)
|
||||
.filter(r => r!= null)
|
||||
.write.mode(SaveMode.Overwrite).save(s"$targetPath/scholix_from_source")
|
||||
|
||||
val scholixSource: Dataset[(String, Scholix)] = spark.read.load(s"$targetPath/scholix_from_source").as[(String, Scholix)](Encoders.tuple(Encoders.STRING, scholixEncoder))
|
||||
|
||||
scholixSource.joinWith(summaryDS, scholixSource("_1").equalTo(summaryDS("_1")), "left")
|
||||
.map { input: ((String, Scholix), (String, ScholixSummary)) =>
|
||||
if (input._2 == null) {
|
||||
if (input._2== null) {
|
||||
null
|
||||
} else {
|
||||
val s: Scholix = input._1._2
|
||||
val target: ScholixSummary = input._2._2
|
||||
ScholixUtils.generateCompleteScholix(s, target)
|
||||
}
|
||||
}.filter(s => s != null).write.mode(SaveMode.Overwrite).save(s"$targetPath/scholix_one_verse")
|
||||
}.filter(s => s!= null).write.mode(SaveMode.Overwrite).save(s"$targetPath/scholix_one_verse")
|
||||
|
||||
|
||||
val scholix_o_v: Dataset[Scholix] = spark.read.load(s"$targetPath/scholix_one_verse").as[Scholix]
|
||||
|
||||
scholix_o_v.flatMap(s => List(s, ScholixUtils.createInverseScholixRelation(s))).as[Scholix]
|
||||
.map(s => (s.getIdentifier, s))(Encoders.tuple(Encoders.STRING, scholixEncoder))
|
||||
.map(s=> (s.getIdentifier,s))(Encoders.tuple(Encoders.STRING, scholixEncoder))
|
||||
.groupByKey(_._1)
|
||||
.agg(ScholixUtils.scholixAggregator.toColumn)
|
||||
.map(s => s._2)
|
||||
.write.mode(SaveMode.Overwrite).save(s"$targetPath/scholix")
|
||||
|
||||
val scholix_final: Dataset[Scholix] = spark.read.load(s"$targetPath/scholix").as[Scholix]
|
||||
val scholix_final:Dataset[Scholix] = spark.read.load(s"$targetPath/scholix").as[Scholix]
|
||||
|
||||
val stats: Dataset[(String, String, Long)] = scholix_final.map(s => (s.getSource.getDnetIdentifier, s.getTarget.getObjectType)).groupBy("_1", "_2").agg(count("_1")).as[(String, String, Long)]
|
||||
val stats:Dataset[(String,String,Long)]= scholix_final.map(s => (s.getSource.getDnetIdentifier, s.getTarget.getObjectType)).groupBy("_1", "_2").agg(count("_1")).as[(String,String,Long)]
|
||||
|
||||
|
||||
stats
|
||||
.map(s => RelatedEntities(s._1, if ("dataset".equalsIgnoreCase(s._2)) s._3 else 0, if ("publication".equalsIgnoreCase(s._2)) s._3 else 0))
|
||||
.map(s => RelatedEntities(s._1, if ("dataset".equalsIgnoreCase(s._2)) s._3 else 0, if ("publication".equalsIgnoreCase(s._2)) s._3 else 0 ))
|
||||
.groupByKey(_.id)
|
||||
.reduceGroups((a, b) => RelatedEntities(a.id, a.relatedDataset + b.relatedDataset, a.relatedPublication + b.relatedPublication))
|
||||
.reduceGroups((a, b) => RelatedEntities(a.id, a.relatedDataset+b.relatedDataset, a.relatedPublication+b.relatedPublication))
|
||||
.map(_._2)
|
||||
.write.mode(SaveMode.Overwrite).save(s"$targetPath/related_entities")
|
||||
|
||||
val relatedEntitiesDS: Dataset[RelatedEntities] = spark.read.load(s"$targetPath/related_entities").as[RelatedEntities].filter(r => r.relatedPublication > 0 || r.relatedDataset > 0)
|
||||
val relatedEntitiesDS:Dataset[RelatedEntities] = spark.read.load(s"$targetPath/related_entities").as[RelatedEntities].filter(r => r.relatedPublication>0 || r.relatedDataset > 0)
|
||||
|
||||
relatedEntitiesDS.joinWith(summaryDS, relatedEntitiesDS("id").equalTo(summaryDS("_1")), "inner").map { i =>
|
||||
relatedEntitiesDS.joinWith(summaryDS, relatedEntitiesDS("id").equalTo(summaryDS("_1")), "inner").map{i =>
|
||||
val re = i._1
|
||||
val sum = i._2._2
|
||||
|
|
@ -6,7 +6,7 @@ import eu.dnetlib.dhp.schema.sx.summary.ScholixSummary
|
|||
import eu.dnetlib.dhp.sx.graph.scholix.ScholixUtils
|
||||
import org.apache.commons.io.IOUtils
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql._
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
object SparkCreateSummaryObject {
|
||||
|
@ -28,15 +28,15 @@ object SparkCreateSummaryObject {
|
|||
val targetPath = parser.get("targetPath")
|
||||
log.info(s"targetPath -> $targetPath")
|
||||
|
||||
implicit val resultEncoder: Encoder[Result] = Encoders.kryo[Result]
|
||||
implicit val oafEncoder: Encoder[Oaf] = Encoders.kryo[Oaf]
|
||||
implicit val resultEncoder:Encoder[Result] = Encoders.kryo[Result]
|
||||
implicit val oafEncoder:Encoder[Oaf] = Encoders.kryo[Oaf]
|
||||
|
||||
implicit val summaryEncoder: Encoder[ScholixSummary] = Encoders.kryo[ScholixSummary]
|
||||
implicit val summaryEncoder:Encoder[ScholixSummary] = Encoders.kryo[ScholixSummary]
|
||||
|
||||
|
||||
val ds: Dataset[Result] = spark.read.load(s"$sourcePath/*").as[Result].filter(r => r.getDataInfo == null || r.getDataInfo.getDeletedbyinference == false)
|
||||
val ds:Dataset[Result] = spark.read.load(s"$sourcePath/*").as[Result].filter(r=>r.getDataInfo== null || r.getDataInfo.getDeletedbyinference== false)
|
||||
|
||||
ds.repartition(6000).map(r => ScholixUtils.resultToSummary(r)).filter(s => s != null).write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
ds.repartition(6000).map(r => ScholixUtils.resultToSummary(r)).filter(s => s!= null).write.mode(SaveMode.Overwrite).save(targetPath)
|
||||
|
||||
}
|
||||
|
|
@ -5,7 +5,6 @@ import org.apache.spark.sql.{Encoder, Encoders}
|
|||
import org.json4s
|
||||
import org.json4s.DefaultFormats
|
||||
import org.json4s.jackson.JsonMethods.parse
|
||||
|
||||
import java.util.regex.Pattern
|
||||
import scala.language.postfixOps
|
||||
import scala.xml.{Elem, Node, XML}
|
|
@ -2,12 +2,13 @@ package eu.dnetlib.dhp.sx.graph.pangaea
|
|||
|
||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser
|
||||
import org.apache.spark.rdd.RDD
|
||||
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.apache.spark.{SparkConf, SparkContext}
|
||||
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
|
||||
import org.slf4j.{Logger, LoggerFactory}
|
||||
|
||||
import scala.io.Source
|
||||
import scala.collection.JavaConverters._
|
||||
import scala.io.Source
|
||||
|
||||
object SparkGeneratePanagaeaDataset {
|
||||
|
||||
|
||||
|
@ -27,17 +28,17 @@ object SparkGeneratePanagaeaDataset {
|
|||
|
||||
parser.getObjectMap.asScala.foreach(s => logger.info(s"${s._1} -> ${s._2}"))
|
||||
logger.info("Converting sequential file into Dataset")
|
||||
val sc: SparkContext = spark.sparkContext
|
||||
val sc:SparkContext = spark.sparkContext
|
||||
|
||||
val workingPath: String = parser.get("workingPath")
|
||||
val workingPath:String = parser.get("workingPath")
|
||||
|
||||
implicit val pangaeaEncoders: Encoder[PangaeaDataModel] = Encoders.kryo[PangaeaDataModel]
|
||||
|
||||
val inputRDD: RDD[PangaeaDataModel] = sc.textFile(s"$workingPath/update").map(s => PangaeaUtils.toDataset(s))
|
||||
val inputRDD:RDD[PangaeaDataModel] = sc.textFile(s"$workingPath/update").map(s => PangaeaUtils.toDataset(s))
|
||||
|
||||
spark.createDataset(inputRDD).as[PangaeaDataModel]
|
||||
.map(s => (s.identifier, s))(Encoders.tuple(Encoders.STRING, pangaeaEncoders))
|
||||
.groupByKey(_._1)(Encoders.STRING)
|
||||
.map(s => (s.identifier,s))(Encoders.tuple(Encoders.STRING, pangaeaEncoders))
|
||||
.groupByKey(_._1)(Encoders.STRING)
|
||||
.agg(PangaeaUtils.getDatasetAggregator().toColumn)
|
||||
.map(s => s._2)
|
||||
.write.mode(SaveMode.Overwrite).save(s"$workingPath/dataset")
|
||||
|
@ -45,4 +46,7 @@ object SparkGeneratePanagaeaDataset {
|
|||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
|
@ -1,5 +1,6 @@
|
|||
package eu.dnetlib.dhp.sx.graph.scholix
|
||||
|
||||
|
||||
import eu.dnetlib.dhp.schema.oaf.{Publication, Relation, Result, StructuredProperty}
|
||||
import eu.dnetlib.dhp.schema.sx.scholix._
|
||||
import eu.dnetlib.dhp.schema.sx.summary.{CollectedFromType, SchemeValue, ScholixSummary, Typology}
|
||||
|
@ -10,23 +11,22 @@ import org.json4s
|
|||
import org.json4s.DefaultFormats
|
||||
import org.json4s.jackson.JsonMethods.parse
|
||||
|
||||
import scala.io.Source
|
||||
import scala.collection.JavaConverters._
|
||||
|
||||
import scala.io.Source
|
||||
import scala.language.postfixOps
|
||||
|
||||
object ScholixUtils {
|
||||
|
||||
|
||||
val DNET_IDENTIFIER_SCHEMA: String = "DNET Identifier"
|
||||
|
||||
val DATE_RELATION_KEY: String = "RelationDate"
|
||||
val DATE_RELATION_KEY:String = "RelationDate"
|
||||
case class RelationVocabulary(original:String, inverse:String){}
|
||||
|
||||
case class RelationVocabulary(original: String, inverse: String) {}
|
||||
case class RelatedEntities(id:String, relatedDataset:Long, relatedPublication:Long){}
|
||||
|
||||
case class RelatedEntities(id: String, relatedDataset: Long, relatedPublication: Long) {}
|
||||
|
||||
val relations: Map[String, RelationVocabulary] = {
|
||||
val input = Source.fromInputStream(getClass.getResourceAsStream("/eu/dnetlib/dhp/sx/graph/relations.json")).mkString
|
||||
val relations:Map[String, RelationVocabulary] = {
|
||||
val input =Source.fromInputStream(getClass.getResourceAsStream("/eu/dnetlib/dhp/sx/graph/relations.json")).mkString
|
||||
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
|
||||
|
||||
lazy val json: json4s.JValue = parse(input)
|
||||
|
@ -35,12 +35,12 @@ object ScholixUtils {
|
|||
}
|
||||
|
||||
|
||||
def extractRelationDate(relation: Relation): String = {
|
||||
def extractRelationDate(relation: Relation):String = {
|
||||
|
||||
if (relation.getProperties == null || !relation.getProperties.isEmpty)
|
||||
if (relation.getProperties== null || !relation.getProperties.isEmpty)
|
||||
null
|
||||
else {
|
||||
val date = relation.getProperties.asScala.find(p => DATE_RELATION_KEY.equalsIgnoreCase(p.getKey)).map(p => p.getValue)
|
||||
val date =relation.getProperties.asScala.find(p => DATE_RELATION_KEY.equalsIgnoreCase(p.getKey)).map(p => p.getValue)
|
||||
if (date.isDefined)
|
||||
date.get
|
||||
else
|
||||
|
@ -48,9 +48,9 @@ object ScholixUtils {
|
|||
}
|
||||
}
|
||||
|
||||
def extractRelationDate(summary: ScholixSummary): String = {
|
||||
def extractRelationDate(summary: ScholixSummary):String = {
|
||||
|
||||
if (summary.getDate == null || summary.getDate.isEmpty)
|
||||
if(summary.getDate== null || summary.getDate.isEmpty)
|
||||
null
|
||||
else {
|
||||
summary.getDate.get(0)
|
||||
|
@ -59,14 +59,15 @@ object ScholixUtils {
|
|||
|
||||
}
|
||||
|
||||
def inverseRelationShip(rel: ScholixRelationship): ScholixRelationship = {
|
||||
def inverseRelationShip(rel:ScholixRelationship):ScholixRelationship = {
|
||||
new ScholixRelationship(rel.getInverse, rel.getSchema, rel.getName)
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
val statsAggregator: Aggregator[(String, String, Long), RelatedEntities, RelatedEntities] = new Aggregator[(String, String, Long), RelatedEntities, RelatedEntities] with Serializable {
|
||||
|
||||
val statsAggregator:Aggregator[(String,String, Long), RelatedEntities, RelatedEntities] = new Aggregator[(String,String, Long), RelatedEntities, RelatedEntities] with Serializable {
|
||||
override def zero: RelatedEntities = null
|
||||
|
||||
override def reduce(b: RelatedEntities, a: (String, String, Long)): RelatedEntities = {
|
||||
|
@ -77,16 +78,17 @@ object ScholixUtils {
|
|||
if (b == null)
|
||||
RelatedEntities(a._1, relatedDataset, relatedPublication)
|
||||
else
|
||||
RelatedEntities(a._1, b.relatedDataset + relatedDataset, b.relatedPublication + relatedPublication)
|
||||
RelatedEntities(a._1,b.relatedDataset+ relatedDataset, b.relatedPublication+ relatedPublication )
|
||||
}
|
||||
|
||||
override def merge(b1: RelatedEntities, b2: RelatedEntities): RelatedEntities = {
|
||||
if (b1 != null && b2 != null)
|
||||
RelatedEntities(b1.id, b1.relatedDataset + b2.relatedDataset, b1.relatedPublication + b2.relatedPublication)
|
||||
if (b1!= null && b2!= null)
|
||||
RelatedEntities(b1.id, b1.relatedDataset+ b2.relatedDataset, b1.relatedPublication+ b2.relatedPublication)
|
||||
|
||||
else if (b1 != null)
|
||||
b1
|
||||
else
|
||||
if (b1!= null)
|
||||
b1
|
||||
else
|
||||
b2
|
||||
}
|
||||
|
||||
|
@ -102,12 +104,12 @@ object ScholixUtils {
|
|||
override def zero: Scholix = null
|
||||
|
||||
|
||||
def scholix_complete(s: Scholix): Boolean = {
|
||||
if (s == null || s.getIdentifier == null) {
|
||||
def scholix_complete(s:Scholix):Boolean ={
|
||||
if (s== null || s.getIdentifier==null) {
|
||||
false
|
||||
} else if (s.getSource == null || s.getTarget == null) {
|
||||
false
|
||||
}
|
||||
false
|
||||
}
|
||||
else if (s.getLinkprovider == null || s.getLinkprovider.isEmpty)
|
||||
false
|
||||
else
|
||||
|
@ -119,7 +121,7 @@ object ScholixUtils {
|
|||
}
|
||||
|
||||
override def merge(b1: Scholix, b2: Scholix): Scholix = {
|
||||
if (scholix_complete(b1)) b1 else b2
|
||||
if (scholix_complete(b1)) b1 else b2
|
||||
}
|
||||
|
||||
override def finish(reduction: Scholix): Scholix = reduction
|
||||
|
@ -130,7 +132,7 @@ object ScholixUtils {
|
|||
}
|
||||
|
||||
|
||||
def createInverseScholixRelation(scholix: Scholix): Scholix = {
|
||||
def createInverseScholixRelation(scholix: Scholix):Scholix = {
|
||||
val s = new Scholix
|
||||
s.setPublicationDate(scholix.getPublicationDate)
|
||||
s.setPublisher(scholix.getPublisher)
|
||||
|
@ -142,33 +144,34 @@ object ScholixUtils {
|
|||
s
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
def extractCollectedFrom(summary: ScholixSummary): List[ScholixEntityId] = {
|
||||
if (summary.getDatasources != null && !summary.getDatasources.isEmpty) {
|
||||
val l: List[ScholixEntityId] = summary.getDatasources.asScala.map {
|
||||
def extractCollectedFrom(summary:ScholixSummary): List[ScholixEntityId] = {
|
||||
if (summary.getDatasources!= null && !summary.getDatasources.isEmpty) {
|
||||
val l: List[ScholixEntityId] = summary.getDatasources.asScala.map{
|
||||
d => new ScholixEntityId(d.getDatasourceName, List(new ScholixIdentifier(d.getDatasourceId, "DNET Identifier", null)).asJava)
|
||||
}(collection.breakOut)
|
||||
l
|
||||
l
|
||||
} else List()
|
||||
}
|
||||
|
||||
def extractCollectedFrom(relation: Relation): List[ScholixEntityId] = {
|
||||
def extractCollectedFrom(relation: Relation) : List[ScholixEntityId] = {
|
||||
if (relation.getCollectedfrom != null && !relation.getCollectedfrom.isEmpty) {
|
||||
|
||||
|
||||
val l: List[ScholixEntityId] = relation.getCollectedfrom.asScala.map {
|
||||
c =>
|
||||
|
||||
new ScholixEntityId(c.getValue, List(new ScholixIdentifier(c.getKey, DNET_IDENTIFIER_SCHEMA, null)).asJava)
|
||||
new ScholixEntityId(c.getValue, List(new ScholixIdentifier(c.getKey, DNET_IDENTIFIER_SCHEMA,null)).asJava)
|
||||
}(collection breakOut)
|
||||
l
|
||||
} else List()
|
||||
}
|
||||
|
||||
|
||||
def generateCompleteScholix(scholix: Scholix, target: ScholixSummary): Scholix = {
|
||||
def generateCompleteScholix(scholix: Scholix, target:ScholixSummary): Scholix = {
|
||||
val s = new Scholix
|
||||
s.setPublicationDate(scholix.getPublicationDate)
|
||||
s.setPublisher(scholix.getPublisher)
|
||||
|
@ -189,28 +192,29 @@ object ScholixUtils {
|
|||
r.setObjectType(summaryObject.getTypology.toString)
|
||||
r.setObjectSubType(summaryObject.getSubType)
|
||||
|
||||
if (summaryObject.getTitle != null && !summaryObject.getTitle.isEmpty)
|
||||
r.setTitle(summaryObject.getTitle.get(0))
|
||||
if (summaryObject.getTitle!= null && !summaryObject.getTitle.isEmpty)
|
||||
r.setTitle(summaryObject.getTitle.get(0))
|
||||
|
||||
if (summaryObject.getAuthor != null && !summaryObject.getAuthor.isEmpty) {
|
||||
val l: List[ScholixEntityId] = summaryObject.getAuthor.asScala.map(a => new ScholixEntityId(a, null)).toList
|
||||
if (summaryObject.getAuthor!= null && !summaryObject.getAuthor.isEmpty){
|
||||
val l:List[ScholixEntityId] = summaryObject.getAuthor.asScala.map(a => new ScholixEntityId(a,null)).toList
|
||||
if (l.nonEmpty)
|
||||
r.setCreator(l.asJava)
|
||||
}
|
||||
|
||||
if (summaryObject.getDate != null && !summaryObject.getDate.isEmpty)
|
||||
if (summaryObject.getDate!= null && !summaryObject.getDate.isEmpty)
|
||||
r.setPublicationDate(summaryObject.getDate.get(0))
|
||||
if (summaryObject.getPublisher != null && !summaryObject.getPublisher.isEmpty) {
|
||||
val plist: List[ScholixEntityId] = summaryObject.getPublisher.asScala.map(p => new ScholixEntityId(p, null)).toList
|
||||
if (summaryObject.getPublisher!= null && !summaryObject.getPublisher.isEmpty)
|
||||
{
|
||||
val plist:List[ScholixEntityId] =summaryObject.getPublisher.asScala.map(p => new ScholixEntityId(p, null)).toList
|
||||
|
||||
if (plist.nonEmpty)
|
||||
r.setPublisher(plist.asJava)
|
||||
}
|
||||
|
||||
|
||||
if (summaryObject.getDatasources != null && !summaryObject.getDatasources.isEmpty) {
|
||||
if (summaryObject.getDatasources!= null && !summaryObject.getDatasources.isEmpty) {
|
||||
|
||||
val l: List[ScholixCollectedFrom] = summaryObject.getDatasources.asScala.map(c => new ScholixCollectedFrom(
|
||||
val l:List[ScholixCollectedFrom] = summaryObject.getDatasources.asScala.map(c => new ScholixCollectedFrom(
|
||||
new ScholixEntityId(c.getDatasourceName, List(new ScholixIdentifier(c.getDatasourceId, DNET_IDENTIFIER_SCHEMA, null)).asJava)
|
||||
, "collected", "complete"
|
||||
|
||||
|
@ -224,9 +228,12 @@ object ScholixUtils {
|
|||
}
|
||||
|
||||
|
||||
def scholixFromSource(relation: Relation, source: ScholixSummary): Scholix = {
|
||||
|
||||
if (relation == null || source == null)
|
||||
|
||||
|
||||
def scholixFromSource(relation:Relation, source:ScholixSummary):Scholix = {
|
||||
|
||||
if (relation== null || source== null)
|
||||
return null
|
||||
|
||||
val s = new Scholix
|
||||
|
@ -246,9 +253,9 @@ object ScholixUtils {
|
|||
s.setPublicationDate(d)
|
||||
|
||||
|
||||
if (source.getPublisher != null && !source.getPublisher.isEmpty) {
|
||||
if (source.getPublisher!= null && !source.getPublisher.isEmpty) {
|
||||
val l: List[ScholixEntityId] = source.getPublisher.asScala
|
||||
.map {
|
||||
.map{
|
||||
p =>
|
||||
new ScholixEntityId(p, null)
|
||||
}(collection.breakOut)
|
||||
|
@ -258,7 +265,7 @@ object ScholixUtils {
|
|||
}
|
||||
|
||||
val semanticRelation = relations.getOrElse(relation.getRelClass.toLowerCase, null)
|
||||
if (semanticRelation == null)
|
||||
if (semanticRelation== null)
|
||||
return null
|
||||
s.setRelationship(new ScholixRelationship(semanticRelation.original, "datacite", semanticRelation.inverse))
|
||||
s.setSource(generateScholixResourceFromSummary(source))
|
||||
|
@ -267,8 +274,8 @@ object ScholixUtils {
|
|||
}
|
||||
|
||||
|
||||
def findURLForPID(pidValue: List[StructuredProperty], urls: List[String]): List[(StructuredProperty, String)] = {
|
||||
pidValue.map {
|
||||
def findURLForPID(pidValue:List[StructuredProperty], urls:List[String]):List[(StructuredProperty, String)] = {
|
||||
pidValue.map{
|
||||
p =>
|
||||
val pv = p.getValue
|
||||
|
||||
|
@ -278,67 +285,67 @@ object ScholixUtils {
|
|||
}
|
||||
|
||||
|
||||
def extractTypedIdentifierFromInstance(r: Result): List[ScholixIdentifier] = {
|
||||
def extractTypedIdentifierFromInstance(r:Result):List[ScholixIdentifier] = {
|
||||
if (r.getInstance() == null || r.getInstance().isEmpty)
|
||||
return List()
|
||||
r.getInstance().asScala.filter(i => i.getUrl != null && !i.getUrl.isEmpty)
|
||||
.filter(i => i.getPid != null && i.getUrl != null)
|
||||
r.getInstance().asScala.filter(i => i.getUrl!= null && !i.getUrl.isEmpty)
|
||||
.filter(i => i.getPid!= null && i.getUrl != null)
|
||||
.flatMap(i => findURLForPID(i.getPid.asScala.toList, i.getUrl.asScala.toList))
|
||||
.map(i => new ScholixIdentifier(i._1.getValue, i._1.getQualifier.getClassid, i._2)).distinct.toList
|
||||
}
|
||||
|
||||
def resultToSummary(r: Result): ScholixSummary = {
|
||||
def resultToSummary(r:Result):ScholixSummary = {
|
||||
val s = new ScholixSummary
|
||||
s.setId(r.getId)
|
||||
if (r.getPid == null || r.getPid.isEmpty)
|
||||
return null
|
||||
|
||||
val persistentIdentifiers: List[ScholixIdentifier] = extractTypedIdentifierFromInstance(r)
|
||||
val persistentIdentifiers:List[ScholixIdentifier] = extractTypedIdentifierFromInstance(r)
|
||||
if (persistentIdentifiers.isEmpty)
|
||||
return null
|
||||
s.setLocalIdentifier(persistentIdentifiers.asJava)
|
||||
if (r.isInstanceOf[Publication])
|
||||
if (r.isInstanceOf[Publication] )
|
||||
s.setTypology(Typology.publication)
|
||||
else
|
||||
s.setTypology(Typology.dataset)
|
||||
|
||||
s.setSubType(r.getInstance().get(0).getInstancetype.getClassname)
|
||||
|
||||
if (r.getTitle != null && r.getTitle.asScala.nonEmpty) {
|
||||
val titles: List[String] = r.getTitle.asScala.map(t => t.getValue)(collection breakOut)
|
||||
if (r.getTitle!= null && r.getTitle.asScala.nonEmpty) {
|
||||
val titles:List[String] =r.getTitle.asScala.map(t => t.getValue)(collection breakOut)
|
||||
if (titles.nonEmpty)
|
||||
s.setTitle(titles.asJava)
|
||||
else
|
||||
return null
|
||||
return null
|
||||
}
|
||||
|
||||
if (r.getAuthor != null && !r.getAuthor.isEmpty) {
|
||||
val authors: List[String] = r.getAuthor.asScala.map(a => a.getFullname)(collection breakOut)
|
||||
if(r.getAuthor!= null && !r.getAuthor.isEmpty) {
|
||||
val authors:List[String] = r.getAuthor.asScala.map(a=> a.getFullname)(collection breakOut)
|
||||
if (authors nonEmpty)
|
||||
s.setAuthor(authors.asJava)
|
||||
}
|
||||
if (r.getInstance() != null) {
|
||||
val dt: List[String] = r.getInstance().asScala.filter(i => i.getDateofacceptance != null).map(i => i.getDateofacceptance.getValue)(collection.breakOut)
|
||||
val dt:List[String] = r.getInstance().asScala.filter(i => i.getDateofacceptance != null).map(i => i.getDateofacceptance.getValue)(collection.breakOut)
|
||||
if (dt.nonEmpty)
|
||||
s.setDate(dt.distinct.asJava)
|
||||
}
|
||||
if (r.getDescription != null && !r.getDescription.isEmpty) {
|
||||
val d = r.getDescription.asScala.find(f => f != null && f.getValue != null)
|
||||
if (r.getDescription!= null && !r.getDescription.isEmpty) {
|
||||
val d = r.getDescription.asScala.find(f => f!= null && f.getValue!=null)
|
||||
if (d.isDefined)
|
||||
s.setDescription(d.get.getValue)
|
||||
}
|
||||
|
||||
if (r.getSubject != null && !r.getSubject.isEmpty) {
|
||||
val subjects: List[SchemeValue] = r.getSubject.asScala.map(s => new SchemeValue(s.getQualifier.getClassname, s.getValue))(collection breakOut)
|
||||
if (r.getSubject!= null && !r.getSubject.isEmpty) {
|
||||
val subjects:List[SchemeValue] =r.getSubject.asScala.map(s => new SchemeValue(s.getQualifier.getClassname, s.getValue))(collection breakOut)
|
||||
if (subjects.nonEmpty)
|
||||
s.setSubject(subjects.asJava)
|
||||
}
|
||||
|
||||
if (r.getPublisher != null)
|
||||
if (r.getPublisher!= null)
|
||||
s.setPublisher(List(r.getPublisher.getValue).asJava)
|
||||
|
||||
if (r.getCollectedfrom != null && !r.getCollectedfrom.isEmpty) {
|
||||
val cf: List[CollectedFromType] = r.getCollectedfrom.asScala.map(c => new CollectedFromType(c.getValue, c.getKey, "complete"))(collection breakOut)
|
||||
if (r.getCollectedfrom!= null && !r.getCollectedfrom.isEmpty) {
|
||||
val cf:List[CollectedFromType] = r.getCollectedfrom.asScala.map(c => new CollectedFromType(c.getValue, c.getKey, "complete"))(collection breakOut)
|
||||
if (cf.nonEmpty)
|
||||
s.setDatasources(cf.distinct.asJava)
|
||||
}
|
|
@ -3,9 +3,13 @@ package eu.dnetlib.dhp.oa.graph.hostedbymap
|
|||
import com.fasterxml.jackson.databind.ObjectMapper
|
||||
import eu.dnetlib.dhp.oa.graph.hostedbymap.SparkPrepareHostedByInfoToApply.{joinResHBM, prepareResultInfo, toEntityInfo}
|
||||
import eu.dnetlib.dhp.oa.graph.hostedbymap.model.EntityInfo
|
||||
import eu.dnetlib.dhp.schema.oaf.{Datasource, OpenAccessRoute, Publication}
|
||||
import javax.management.openmbean.OpenMBeanAttributeInfo
|
||||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SparkSession}
|
||||
import org.json4s
|
||||
import org.json4s.DefaultFormats
|
||||
import eu.dnetlib.dhp.schema.common.ModelConstants
|
||||
import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue}
|
||||
import org.junit.jupiter.api.Test
|
||||
|
|
@ -4,9 +4,10 @@ import eu.dnetlib.dhp.schema.oaf.Datasource
|
|||
import org.apache.spark.SparkConf
|
||||
import org.apache.spark.sql.{Dataset, Encoder, Encoders, SparkSession}
|
||||
import org.json4s.DefaultFormats
|
||||
import org.json4s.jackson.Serialization.write
|
||||
import org.junit.jupiter.api.Assertions._
|
||||
import org.junit.jupiter.api.Assertions.{assertNotNull, assertTrue}
|
||||
import org.junit.jupiter.api.Test
|
||||
import org.junit.jupiter.api.Assertions._
|
||||
import org.json4s.jackson.Serialization.write
|
||||
|
||||
class TestPreprocess extends java.io.Serializable{
|
||||
|
|
@ -159,7 +159,6 @@ class ResolveEntitiesTest extends Serializable {
|
|||
|
||||
|
||||
val datDS:Dataset[Result] = spark.read.text(s"$workingDir/work/resolvedGraph/dataset").as[String].map(s => SparkResolveEntities.deserializeObject(s, EntityType.dataset))
|
||||
|
||||
val td = datDS.filter(p => p.getTitle!=null && p.getSubject!=null).filter(p => p.getTitle.asScala.exists(t => t.getValue.equalsIgnoreCase("FAKETITLE"))).count()
|
||||
|
||||
|
|
@ -3,6 +3,7 @@ package eu.dnetlib.dhp.sx.pangaea
|
|||
import eu.dnetlib.dhp.sx.graph.pangaea.PangaeaUtils
|
||||
import org.junit.jupiter.api.Test
|
||||
|
||||
import java.util.TimeZone
|
||||
import java.text.SimpleDateFormat
|
||||
import java.util.Date
|
||||
import scala.io.Source
|
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue