split the construction of crossref dataset in two parts. This one just unpacks the tar entries

This commit is contained in:
Miriam Baglioni 2021-06-14 13:39:40 +02:00
parent cf360d7c97
commit 93efe4de82
1 changed files with 9 additions and 16 deletions

View File

@ -1,6 +1,7 @@
package eu.dnetlib.doiboost.crossref package eu.dnetlib.doiboost.crossref
import eu.dnetlib.dhp.application.ArgumentApplicationParser import eu.dnetlib.dhp.application.ArgumentApplicationParser
import org.apache.hadoop.io.compress.GzipCodec
import org.apache.spark.{SparkConf, SparkContext} import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession} import org.apache.spark.sql.{Encoder, Encoders, SaveMode, SparkSession}
import org.slf4j.{Logger, LoggerFactory} import org.slf4j.{Logger, LoggerFactory}
@ -11,12 +12,12 @@ import org.json4s.jackson.JsonMethods._
import scala.io.Source import scala.io.Source
object GenerateCrossrefDatasetSpark { object UnpackCrossrefDumpEntries {
val log: Logger = LoggerFactory.getLogger(GenerateCrossrefDatasetSpark.getClass) val log: Logger = LoggerFactory.getLogger(UnpackCrossrefDumpEntries.getClass)
implicit val mrEncoder: Encoder[CrossrefDT] = Encoders.kryo[CrossrefDT]
def extractDump(input:String):List[String] = { def extractDump(input:String):List[String] = {
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
@ -26,14 +27,7 @@ object GenerateCrossrefDatasetSpark {
a.arr.map(s => compact(render(s))) a.arr.map(s => compact(render(s)))
} }
def crossrefElement(meta: String): CrossrefDT = {
implicit lazy val formats: DefaultFormats.type = org.json4s.DefaultFormats
lazy val json: json4s.JValue = parse(meta)
val doi:String = (json \ "DOI").extract[String]
val timestamp: Long = (json \ "indexed" \ "timestamp").extract[Long]
new CrossrefDT(doi, meta, timestamp)
}
def main(args: Array[String]): Unit = { def main(args: Array[String]): Unit = {
val conf = new SparkConf val conf = new SparkConf
@ -44,18 +38,17 @@ object GenerateCrossrefDatasetSpark {
val targetPath = parser.get("targetPath") val targetPath = parser.get("targetPath")
val spark: SparkSession = SparkSession.builder().config(conf) val spark: SparkSession = SparkSession.builder().config(conf)
.appName(GenerateCrossrefDatasetSpark.getClass.getSimpleName) .appName(UnpackCrossrefDumpEntries.getClass.getSimpleName)
.master(master) .master(master)
.getOrCreate() .getOrCreate()
import spark.implicits._
val sc: SparkContext = spark.sparkContext val sc: SparkContext = spark.sparkContext
sc.wholeTextFiles(sourcePath,2000).flatMap(d =>extractDump(d._2)) sc.wholeTextFiles(sourcePath,6000).flatMap(d =>extractDump(d._2))
.map(meta => crossrefElement(meta)) .saveAsTextFile(targetPath, classOf[GzipCodec]);
.toDS()
.write.mode(SaveMode.Overwrite).save(targetPath)
} }
} }