103 lines
3.2 KiB
Java
103 lines
3.2 KiB
Java
|
|
package eu.dnetlib.dhp.oa.graph.dump.eosc;
|
|
|
|
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|
|
|
import java.io.BufferedWriter;
|
|
import java.io.IOException;
|
|
import java.io.OutputStreamWriter;
|
|
import java.io.Serializable;
|
|
import java.nio.charset.StandardCharsets;
|
|
import java.util.Optional;
|
|
|
|
import javax.rmi.CORBA.Util;
|
|
|
|
import org.apache.commons.io.IOUtils;
|
|
import org.apache.hadoop.conf.Configuration;
|
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
|
import org.apache.hadoop.fs.FileSystem;
|
|
import org.apache.hadoop.fs.Path;
|
|
import org.apache.spark.SparkConf;
|
|
import org.apache.spark.api.java.function.FilterFunction;
|
|
import org.apache.spark.api.java.function.MapFunction;
|
|
import org.apache.spark.sql.*;
|
|
import org.apache.spark.sql.catalyst.plans.logical.ColumnStat;
|
|
import org.apache.spark.sql.types.DataTypes;
|
|
import org.apache.spark.sql.types.StructType;
|
|
import org.slf4j.Logger;
|
|
import org.slf4j.LoggerFactory;
|
|
|
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
|
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
|
import eu.dnetlib.dhp.schema.oaf.Datasource;
|
|
|
|
/**
|
|
* @author miriam.baglioni
|
|
* @Date 20/09/23
|
|
*/
|
|
public class EoscDatasourceId implements Serializable {
|
|
|
|
private static final Logger log = LoggerFactory.getLogger(EoscDatasourceId.class);
|
|
|
|
public static void main(String[] args) throws Exception {
|
|
String jsonConfiguration = IOUtils
|
|
.toString(
|
|
SaveCommunityMap.class
|
|
.getResourceAsStream(
|
|
"/eu/dnetlib/dhp/oa/graph/dump/eosc_identifiers_parameters.json"));
|
|
|
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
|
|
parser.parseArgument(args);
|
|
|
|
Boolean isSparkSessionManaged = Optional
|
|
.ofNullable(parser.get("isSparkSessionManaged"))
|
|
.map(Boolean::valueOf)
|
|
.orElse(Boolean.TRUE);
|
|
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
|
|
|
final String outputPath = parser.get("outputPath");
|
|
log.info("outputPath: {}", outputPath);
|
|
|
|
final String inputPath = parser.get("sourcePath");
|
|
log.info("inputPath: {}", inputPath);
|
|
|
|
SparkConf conf = new SparkConf();
|
|
|
|
runWithSparkSession(
|
|
conf,
|
|
isSparkSessionManaged,
|
|
spark -> {
|
|
Utils.removeOutputDir(spark, outputPath);
|
|
mapEoscIdentifier(spark, inputPath, outputPath);
|
|
});
|
|
}
|
|
|
|
private static void mapEoscIdentifier(SparkSession spark, String inputPath, String outputPath) {
|
|
final StructType structureSchema = new StructType()
|
|
.add("masterId", DataTypes.StringType)
|
|
.add("masterName", DataTypes.StringType)
|
|
.add("duplicateId", DataTypes.StringType);
|
|
|
|
org.apache.spark.sql.Dataset<Row> df = spark.read().schema(structureSchema).parquet(inputPath);
|
|
|
|
// spark
|
|
// .read()
|
|
// .schema(Encoders.bean(Datasource.class).schema())
|
|
// .json(inputPath + "/datasource")
|
|
// .withColumn("orId", functions.explode(new Column("originalId")))
|
|
df
|
|
.filter(new Column("duplicateId").startsWith("eosc"))
|
|
.select(
|
|
new Column("masterId").as("graphId"),
|
|
functions.substring_index(new Column("duplicateId"), "::", -1).as("eoscId"))
|
|
.filter((FilterFunction<Row>) r -> !((String) r.getAs("graphId")).startsWith(("10|eosc")))
|
|
.write()
|
|
.mode(SaveMode.Overwrite)
|
|
.option("compression", "gzip")
|
|
.save(outputPath);
|
|
|
|
}
|
|
|
|
}
|