forked from D-Net/dnet-hadoop
[DOIBoost Mapping] removed not needed comments
This commit is contained in:
parent
de63d29b6f
commit
df7ee77c7a
|
@ -114,25 +114,6 @@ object SparkProcessMAG {
|
||||||
.save(s"$workingPath/merge_step_3")
|
.save(s"$workingPath/merge_step_3")
|
||||||
|
|
||||||
|
|
||||||
//no more needed to add the instance to mag records
|
|
||||||
// magPubs= spark.read.load(s"$workingPath/merge_step_2_conference").as[Publication]
|
|
||||||
// .map(p => (ConversionUtil.extractMagIdentifier(p.getOriginalId.asScala), p)).as[(String, Publication)]
|
|
||||||
//
|
|
||||||
// val paperUrlDataset = spark.read.load(s"$sourcePath/PaperUrls").as[MagPaperUrl].groupBy("PaperId").agg(collect_list(struct("sourceUrl")).as("instances")).as[MagUrl]
|
|
||||||
//
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// logger.info("Phase 5) enrich publication with URL and Instances")
|
|
||||||
// magPubs.joinWith(paperUrlDataset, col("_1").equalTo(paperUrlDataset("PaperId")), "left")
|
|
||||||
// .map { a: ((String, Publication), MagUrl) => ConversionUtil.addInstances((a._1._2, a._2)) }
|
|
||||||
// .write.mode(SaveMode.Overwrite)
|
|
||||||
// .save(s"$workingPath/merge_step_3")
|
|
||||||
|
|
||||||
|
|
||||||
// logger.info("Phase 6) Enrich Publication with description")
|
|
||||||
// val pa = spark.read.load(s"${parser.get("sourcePath")}/PaperAbstractsInvertedIndex").as[MagPaperAbstract]
|
|
||||||
// pa.map(ConversionUtil.transformPaperAbstract).write.mode(SaveMode.Overwrite).save(s"${parser.get("targetPath")}/PaperAbstract")
|
|
||||||
|
|
||||||
val paperAbstract = spark.read.load((s"$workingPath/PaperAbstract")).as[MagPaperAbstract]
|
val paperAbstract = spark.read.load((s"$workingPath/PaperAbstract")).as[MagPaperAbstract]
|
||||||
|
|
||||||
|
|
||||||
|
@ -171,11 +152,7 @@ object SparkProcessMAG {
|
||||||
.map(_._2)
|
.map(_._2)
|
||||||
.write.mode(SaveMode.Overwrite).save(s"$targetPath/magPublication")
|
.write.mode(SaveMode.Overwrite).save(s"$targetPath/magPublication")
|
||||||
|
|
||||||
// val s:RDD[Publication] = spark.read.load(s"$workingPath/mag_publication").as[Publication]
|
|
||||||
// .map(p=>Tuple2(p.getId, p)).rdd.reduceByKey((a:Publication, b:Publication) => ConversionUtil.mergePublication(a,b))
|
|
||||||
// .map(_._2)
|
|
||||||
//
|
|
||||||
// spark.createDataset(s).as[Publication].write.mode(SaveMode.Overwrite).save(s"$targetPath/magPublication")
|
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue