78 lines
3.0 KiB
Java
78 lines
3.0 KiB
Java
package eu.dnetlib.dhp.skgif;
|
|
|
|
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
|
import eu.dnetlib.dhp.oa.graph.dump.Utils;
|
|
import eu.dnetlib.dhp.schema.oaf.Datasource;
|
|
import eu.dnetlib.dhp.skgif.model.ResearchProduct;
|
|
import org.apache.commons.io.IOUtils;
|
|
import org.apache.spark.SparkConf;
|
|
import org.apache.spark.api.java.function.FilterFunction;
|
|
import org.apache.spark.sql.Dataset;
|
|
import org.apache.spark.sql.SparkSession;
|
|
import org.slf4j.Logger;
|
|
import org.slf4j.LoggerFactory;
|
|
|
|
import java.io.Serializable;
|
|
import java.util.Optional;
|
|
|
|
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|
|
|
/**
|
|
* @author miriam.baglioni
|
|
* @Date 06/02/24
|
|
*/
|
|
public class JournalsFromDatasources implements Serializable {
|
|
|
|
private static final Logger log = LoggerFactory.getLogger(JournalsFromDatasources.class);
|
|
|
|
public static void main(String[] args) throws Exception {
|
|
String jsonConfiguration = IOUtils
|
|
.toString(
|
|
PrepareResultRelation.class
|
|
.getResourceAsStream(
|
|
"/eu/dnetlib/dhp/oa/graph/dump/journals_from_datasource_parameters.json"));
|
|
|
|
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
|
|
parser.parseArgument(args);
|
|
|
|
Boolean isSparkSessionManaged = Optional
|
|
.ofNullable(parser.get("isSparkSessionManaged"))
|
|
.map(Boolean::valueOf)
|
|
.orElse(Boolean.TRUE);
|
|
|
|
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
|
|
|
final String inputPath = parser.get("sourcePath");
|
|
log.info("inputPath: {}", inputPath);
|
|
|
|
final String outputPath = parser.get("outputPath");
|
|
log.info("outputPath: {}", outputPath);
|
|
final String datasourcePath = parser.get("datasourcePath");
|
|
log.info("datasourcePath: {}", datasourcePath);
|
|
|
|
SparkConf conf = new SparkConf();
|
|
|
|
runWithSparkSession(
|
|
conf,
|
|
isSparkSessionManaged,
|
|
spark -> {
|
|
Utils.removeOutputDir(spark, outputPath);
|
|
extendResult(spark, inputPath, outputPath, datasourcePath);
|
|
});
|
|
}
|
|
|
|
//find the results having a container in the metadata
|
|
//map all the hostedby.key delle istanze associate al risultato
|
|
//find a corrispondence to a datasource which is a journal
|
|
//write for the result the biblio
|
|
public static void extendResult(SparkSession spark, String inputPath, String outputPath, String datasourcePath ){
|
|
Dataset<Datasource> datasource = Utils.readPath(spark, datasourcePath, Datasource.class)
|
|
.filter((FilterFunction<Datasource>) d -> Optional.ofNullable(d.getEosctype()).isPresent() &&
|
|
d.getEosctype().getClassname().equalsIgnoreCase("Journal archive");
|
|
|
|
Dataset<ResearchProduct> results = Utils.readPath(spark, inputPath, ResearchProduct.class);
|
|
|
|
|
|
}
|
|
}
|