[SDG]fixed switch of methods

This commit is contained in:
Miriam Baglioni 2024-07-23 17:12:55 +02:00
parent 62649dc5c4
commit 19806c2ae3
2 changed files with 19 additions and 6 deletions

View File

@ -73,7 +73,7 @@ public class PrepareSDGSparkJob implements Serializable {
}); });
} }
private static void doPrepareoaid(SparkSession spark, String sourcePath, String outputPath) { private static void doPrepare(SparkSession spark, String sourcePath, String outputPath) {
Dataset<Row> sdgDataset = spark Dataset<Row> sdgDataset = spark
.read() .read()
.format("csv") .format("csv")
@ -84,7 +84,7 @@ public class PrepareSDGSparkJob implements Serializable {
.load(sourcePath); .load(sourcePath);
sdgDataset sdgDataset
.groupByKey((MapFunction<Row, String>) v -> ((String) v.getAs("oaid")).toLowerCase(), Encoders.STRING()) .groupByKey((MapFunction<Row, String>) v -> ((String) v.getAs("doi")).toLowerCase(), Encoders.STRING())
.mapGroups( .mapGroups(
(MapGroupsFunction<String, Row, Result>) (k, (MapGroupsFunction<String, Row, Result>) (k,
it) -> getResult( it) -> getResult(
@ -100,11 +100,19 @@ public class PrepareSDGSparkJob implements Serializable {
.json(outputPath + "/sdg"); .json(outputPath + "/sdg");
} }
private static void doPrepare(SparkSession spark, String sourcePath, String outputPath) { private static void doPrepareoaid(SparkSession spark, String sourcePath, String outputPath) {
Dataset<Row> sdgDataset = spark.read().csv(sourcePath); Dataset<Row> sdgDataset = spark
.read()
.format("csv")
.option("sep", DEFAULT_DELIMITER)
.option("inferSchema", "true")
.option("header", "true")
.option("quotes", "\"")
.load(sourcePath);
;
sdgDataset sdgDataset
.groupByKey((MapFunction<Row, String>) r -> ((String) r.getAs("doi")).toLowerCase(), Encoders.STRING()) .groupByKey((MapFunction<Row, String>) r -> "50|" + ((String) r.getAs("oaid")), Encoders.STRING())
.mapGroups( .mapGroups(
(MapGroupsFunction<String, Row, Result>) PrepareSDGSparkJob::getResult, Encoders.bean(Result.class)) (MapGroupsFunction<String, Row, Result>) PrepareSDGSparkJob::getResult, Encoders.bean(Result.class))
.write() .write()

View File

@ -10,6 +10,7 @@ import java.util.Optional;
import org.apache.commons.cli.ParseException; import org.apache.commons.cli.ParseException;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.Hdfs;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.GzipCodec; import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.hadoop.mapred.SequenceFileOutputFormat; import org.apache.hadoop.mapred.SequenceFileOutputFormat;
@ -23,6 +24,7 @@ import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.common.HdfsSupport;
import eu.dnetlib.dhp.schema.action.AtomicAction; import eu.dnetlib.dhp.schema.action.AtomicAction;
import eu.dnetlib.dhp.schema.oaf.Result; import eu.dnetlib.dhp.schema.oaf.Result;
import scala.Tuple2; import scala.Tuple2;
@ -63,7 +65,10 @@ public class CreateActionSetSparkJob implements Serializable {
runWithSparkSession( runWithSparkSession(
conf, conf,
isSparkSessionManaged, isSparkSessionManaged,
spark -> createActionSet(spark, inputPath, outputPath)); spark -> {
HdfsSupport.remove(outputPath, spark.sparkContext().hadoopConfiguration());
createActionSet(spark, inputPath, outputPath);
});
} }