351 lines
12 KiB
Java
351 lines
12 KiB
Java
|
|
package eu.dnetlib.dhp.oa.graph.dump.csv;
|
|
|
|
import static org.apache.commons.lang3.StringUtils.split;
|
|
|
|
import java.io.IOException;
|
|
import java.io.StringReader;
|
|
import java.nio.file.Files;
|
|
import java.nio.file.Path;
|
|
import java.util.HashMap;
|
|
import java.util.Optional;
|
|
|
|
import org.apache.commons.io.FileUtils;
|
|
import org.apache.spark.SparkConf;
|
|
import org.apache.spark.api.java.JavaSparkContext;
|
|
import org.apache.spark.api.java.function.FilterFunction;
|
|
import org.apache.spark.api.java.function.ForeachFunction;
|
|
import org.apache.spark.sql.Dataset;
|
|
import org.apache.spark.sql.Encoders;
|
|
import org.apache.spark.sql.Row;
|
|
import org.apache.spark.sql.SparkSession;
|
|
import org.dom4j.Document;
|
|
import org.dom4j.DocumentException;
|
|
import org.dom4j.Element;
|
|
import org.dom4j.Node;
|
|
import org.dom4j.io.SAXReader;
|
|
import org.junit.jupiter.api.AfterAll;
|
|
import org.junit.jupiter.api.Assertions;
|
|
import org.junit.jupiter.api.BeforeAll;
|
|
import org.junit.jupiter.api.Test;
|
|
import org.slf4j.Logger;
|
|
import org.slf4j.LoggerFactory;
|
|
|
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
|
|
import eu.dnetlib.dhp.oa.graph.dump.Utils;
|
|
import eu.dnetlib.dhp.oa.graph.dump.csv.model.CSVAuthor;
|
|
import eu.dnetlib.dhp.oa.graph.dump.csv.model.CSVResult;
|
|
import eu.dnetlib.dhp.schema.common.ModelConstants;
|
|
import eu.dnetlib.dhp.schema.oaf.Publication;
|
|
import eu.dnetlib.dhp.utils.DHPUtils;
|
|
import scala.Function1;
|
|
|
|
/**
|
|
* @author miriam.baglioni
|
|
* @Date 11/05/23
|
|
*/
|
|
public class DumpResultTest {
|
|
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
|
|
|
|
private static SparkSession spark;
|
|
|
|
private static Path workingDir;
|
|
|
|
private static final Logger log = LoggerFactory
|
|
.getLogger(DumpResultTest.class);
|
|
|
|
private static HashMap<String, String> map = new HashMap<>();
|
|
|
|
@BeforeAll
|
|
public static void beforeAll() throws IOException {
|
|
workingDir = Files
|
|
.createTempDirectory(DumpResultTest.class.getSimpleName());
|
|
log.info("using work dir {}", workingDir);
|
|
|
|
SparkConf conf = new SparkConf();
|
|
conf.setAppName(DumpResultTest.class.getSimpleName());
|
|
|
|
conf.setMaster("local[*]");
|
|
conf.set("spark.driver.host", "localhost");
|
|
conf.set("hive.metastore.local", "true");
|
|
conf.set("spark.ui.enabled", "false");
|
|
conf.set("spark.sql.warehouse.dir", workingDir.toString());
|
|
conf.set("hive.metastore.warehouse.dir", workingDir.resolve("warehouse").toString());
|
|
|
|
spark = SparkSession
|
|
.builder()
|
|
.appName(DumpResultTest.class.getSimpleName())
|
|
.config(conf)
|
|
.getOrCreate();
|
|
}
|
|
|
|
@AfterAll
|
|
public static void afterAll() throws IOException {
|
|
FileUtils.deleteDirectory(workingDir.toFile());
|
|
spark.stop();
|
|
}
|
|
|
|
@Test
|
|
public void testDumpResult() throws Exception {
|
|
|
|
final String sourcePath = getClass()
|
|
.getResource("/eu/dnetlib/dhp/oa/graph/dump/csv/input/")
|
|
.getPath();
|
|
|
|
spark
|
|
.read()
|
|
.text(
|
|
getClass()
|
|
.getResource("/eu/dnetlib/dhp/oa/graph/dump/csv/working/resultIds")
|
|
.getPath())
|
|
.write()
|
|
.text(workingDir.toString() + "/working/resultIds/");
|
|
|
|
SparkDumpResults.main(new String[] {
|
|
"-isSparkSessionManaged", Boolean.FALSE.toString(),
|
|
|
|
"-workingPath", workingDir.toString() + "/working",
|
|
"-resultType", "publication",
|
|
"-resultTableName", "eu.dnetlib.dhp.schema.oaf.Publication",
|
|
"-sourcePath", sourcePath
|
|
});
|
|
|
|
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
|
|
|
|
Dataset<CSVResult> tmp = Utils
|
|
.readPath(spark, workingDir.toString() + "/working/publication/result", CSVResult.class);
|
|
|
|
tmp.show(false);
|
|
|
|
Assertions.assertEquals(5, tmp.count());
|
|
CSVResult row = tmp
|
|
.filter(
|
|
(FilterFunction<CSVResult>) r -> r.getId().equals("50|DansKnawCris::0224aae28af558f21768dbc6439c7a95"))
|
|
.first();
|
|
Assertions.assertEquals(ModelConstants.OPEN_ACCESS_RIGHT().getClassid(), row.getAccessright());
|
|
Assertions.assertEquals("FI", row.getCountry());
|
|
Assertions.assertEquals("Lit.opg., bijl.", row.getDescription());
|
|
Assertions.assertEquals(3, split(row.getKeywords(), ", ").length);
|
|
Assertions.assertTrue(row.getKeywords().toString().contains("archeologie"));
|
|
Assertions.assertTrue(row.getKeywords().toString().contains("prospectie"));
|
|
Assertions.assertTrue(row.getKeywords().toString().contains("archaeology"));
|
|
Assertions.assertEquals("nl", row.getLanguage());
|
|
Assertions.assertEquals("2007-01-01", row.getPublication_date());
|
|
Assertions.assertEquals("FakePublisher1", row.getPublisher());
|
|
Assertions
|
|
.assertEquals(
|
|
"Inventariserend veldonderzoek d.m.v. boringen (karterende fase) : Raadhuisstraat te Dirkshorn, gemeente Harenkarspel",
|
|
row.getTitle());
|
|
Assertions.assertEquals("publication", row.getType());
|
|
|
|
row = tmp
|
|
.filter(
|
|
(FilterFunction<CSVResult>) r -> r.getId().equals("50|doi_________::715fec7723208e6f17e855c204656e2f"))
|
|
.first();
|
|
|
|
System.out.println(row.getPublisher());
|
|
String a = row.getPublisher().replace("\\n", " ");
|
|
System.out.println(a);
|
|
// row = tmp
|
|
// .where("id = '50|DansKnawCris::20c414a3b1c742d5dd3851f1b67df2d9'")
|
|
// .first();
|
|
// Assertions.assertEquals(ModelConstants.OPEN_ACCESS_RIGHT().getClassid(), row.getAs("accessright"));
|
|
// Assertions.assertEquals(2, split(row.getAs("country"), ", ").length);
|
|
// Assertions.assertNull(row.getAs("description"));
|
|
// Assertions.assertEquals(2, split(row.getAs("keywords"), ", ").length);
|
|
// Assertions.assertTrue(row.getAs("keywords").toString().contains("archeologie"));
|
|
// Assertions.assertTrue(row.getAs("keywords").toString().contains("archaeology"));
|
|
// Assertions.assertEquals("UNKNOWN", row.getAs("language"));
|
|
// Assertions.assertNull(row.getAs("publication_date"));
|
|
// Assertions.assertNull(row.getAs("publisher"));
|
|
// Assertions.assertEquals("None", row.getAs("title"));
|
|
// Assertions.assertEquals("publication", row.getAs("type"));
|
|
//
|
|
// row = tmp
|
|
// .where("id = '50|DansKnawCris::26780065282e607306372abd0d808245'")
|
|
// .first();
|
|
// Assertions.assertEquals(ModelConstants.OPEN_ACCESS_RIGHT().getClassid(), row.getAs("accessright"));
|
|
// Assertions.assertNull(row.getAs("country"));
|
|
// Assertions.assertNull(row.getAs("description"));
|
|
// Assertions.assertEquals(2, split(row.getAs("keywords"), ", ").length);
|
|
// Assertions.assertTrue(row.getAs("keywords").toString().contains("archeologie"));
|
|
// Assertions.assertTrue(row.getAs("keywords").toString().contains("archaeology"));
|
|
// Assertions.assertEquals("UNKNOWN", row.getAs("language"));
|
|
// Assertions.assertNull(row.getAs("publication_date"));
|
|
// Assertions.assertNull(row.getAs("publisher"));
|
|
// Assertions.assertEquals("None", row.getAs("title"));
|
|
// Assertions.assertEquals("publication", row.getAs("type"));
|
|
|
|
}
|
|
|
|
@Test
|
|
public void testDumpAuthor() throws Exception {
|
|
|
|
final String sourcePath = getClass()
|
|
.getResource("/eu/dnetlib/dhp/oa/graph/dump/csv/input/")
|
|
.getPath();
|
|
|
|
spark
|
|
.read()
|
|
.text(
|
|
getClass()
|
|
.getResource("/eu/dnetlib/dhp/oa/graph/dump/csv/working/resultIds")
|
|
.getPath())
|
|
.write()
|
|
.text(workingDir.toString() + "/working/resultIds/");
|
|
|
|
SparkDumpResults.main(new String[] {
|
|
"-isSparkSessionManaged", Boolean.FALSE.toString(),
|
|
|
|
"-workingPath", workingDir.toString() + "/working",
|
|
"-resultType", "publication",
|
|
"-resultTableName", "eu.dnetlib.dhp.schema.oaf.Publication",
|
|
"-sourcePath", sourcePath
|
|
});
|
|
|
|
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
|
|
|
|
Dataset<CSVAuthor> tmp = Utils
|
|
.readPath(spark, workingDir.toString() + "/working/publication/author", CSVAuthor.class);
|
|
|
|
Assertions.assertEquals(13, tmp.count());
|
|
|
|
Assertions.assertEquals(1, tmp.where("firstName == 'Maryam'").count());
|
|
|
|
Assertions
|
|
.assertEquals(
|
|
DHPUtils.md5("50|DansKnawCris::0224aae28af558f21768dbc6439c7a951"),
|
|
tmp.where("firstName == 'Maryam'").first().getId());
|
|
Assertions
|
|
.assertEquals(DHPUtils.md5("0000-0003-2914-2734"), tmp.where("firstName == 'Michael'").first().getId());
|
|
Assertions
|
|
.assertEquals(
|
|
DHPUtils.md5("0000-0002-6660-5673"),
|
|
tmp.where("firstName == 'Mikhail'").first().getId());
|
|
|
|
}
|
|
|
|
@Test
|
|
public void testDumpResultAuthorRelations() throws Exception {
|
|
|
|
final String sourcePath = getClass()
|
|
.getResource("/eu/dnetlib/dhp/oa/graph/dump/csv/input/")
|
|
.getPath();
|
|
|
|
spark
|
|
.read()
|
|
.text(
|
|
getClass()
|
|
.getResource("/eu/dnetlib/dhp/oa/graph/dump/csv/working/resultIds")
|
|
.getPath())
|
|
.write()
|
|
.text(workingDir.toString() + "/working/resultIds/");
|
|
|
|
SparkDumpResults.main(new String[] {
|
|
"-isSparkSessionManaged", Boolean.FALSE.toString(),
|
|
"-outputPath", workingDir.toString() + "/output",
|
|
"-workingPath", workingDir.toString() + "/working",
|
|
"-resultType", "publication",
|
|
"-resultTableName", "eu.dnetlib.dhp.schema.oaf.Publication",
|
|
"-sourcePath", sourcePath
|
|
});
|
|
|
|
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
|
|
|
|
Dataset<Row> tmp = spark
|
|
.read()
|
|
.option("header", "true")
|
|
.option("delimiter", Constants.SEP)
|
|
.csv(workingDir.toString() + "/working/publication/result_author");
|
|
|
|
Assertions.assertEquals(6, tmp.count());
|
|
|
|
Assertions.assertEquals(2, tmp.where("author_id == '" + DHPUtils.md5("0000-0003-2914-2734") + "'").count());
|
|
Assertions
|
|
.assertEquals(
|
|
1, tmp
|
|
.where("author_id == '" + DHPUtils.md5("0000-0003-2914-2734") + "'")
|
|
.where("result_id == '50|DansKnawCris::0224aae28af558f21768dbc6439c7a95'")
|
|
.count());
|
|
Assertions
|
|
.assertEquals(
|
|
1, tmp
|
|
.where("author_id == '" + DHPUtils.md5("0000-0003-2914-2734") + "'")
|
|
.where("result_id == '50|DansKnawCris::20c414a3b1c742d5dd3851f1b67df2d9'")
|
|
.count());
|
|
|
|
}
|
|
|
|
@Test
|
|
public void testDumpResultPid() throws Exception {
|
|
|
|
final String sourcePath = getClass()
|
|
.getResource("/eu/dnetlib/dhp/oa/graph/dump/csv/input/")
|
|
.getPath();
|
|
|
|
spark
|
|
.read()
|
|
.text(
|
|
getClass()
|
|
.getResource("/eu/dnetlib/dhp/oa/graph/dump/csv/working/resultIds")
|
|
.getPath())
|
|
.write()
|
|
.text(workingDir.toString() + "/working/resultIds/");
|
|
|
|
SparkDumpResults.main(new String[] {
|
|
"-isSparkSessionManaged", Boolean.FALSE.toString(),
|
|
"-outputPath", workingDir.toString() + "/output",
|
|
"-workingPath", workingDir.toString() + "/working",
|
|
"-resultType", "publication",
|
|
"-resultTableName", "eu.dnetlib.dhp.schema.oaf.Publication",
|
|
"-sourcePath", sourcePath
|
|
});
|
|
|
|
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
|
|
|
|
Dataset<Row> tmp = spark
|
|
.read()
|
|
.option("header", "true")
|
|
.option("delimiter", Constants.SEP)
|
|
.csv(workingDir.toString() + "/working/publication/result_pid");
|
|
|
|
tmp.show(false);
|
|
Assertions.assertEquals(4, tmp.count());
|
|
|
|
Assertions
|
|
.assertEquals(2, tmp.where("result_id == '50|DansKnawCris::0224aae28af558f21768dbc6439c7a95'").count());
|
|
Assertions
|
|
.assertEquals(
|
|
"10.1023/fakedoi",
|
|
tmp
|
|
.where("result_id == '50|DansKnawCris::0224aae28af558f21768dbc6439c7a95' and type == 'doi'")
|
|
.first()
|
|
.getAs("pid"));
|
|
|
|
}
|
|
|
|
@Test
|
|
public void prova() throws DocumentException {
|
|
String input = "<community id=\"dh-ch\" label=\"Digital Humanities and Cultural Heritage\">" +
|
|
" <description>This community gathers research results, data, scientific publications and projects related to the domain of Digital Humanities. This broad definition includes Humanities, Cultural Heritage, History, Archaeology and related fields.</description>"
|
|
+
|
|
"</community>";
|
|
|
|
final Document doc;
|
|
final SAXReader reader = new SAXReader();
|
|
|
|
doc = reader.read(new StringReader(input));
|
|
Element root = doc.getRootElement();
|
|
StringBuilder builder = new StringBuilder();
|
|
builder.append(DHPUtils.md5(root.attribute("id").getValue()));
|
|
builder.append(Constants.SEP);
|
|
builder.append(root.attribute("label").getValue());
|
|
builder.append(Constants.SEP);
|
|
builder.append(root.attribute("id").getValue());
|
|
builder.append(Constants.SEP);
|
|
builder.append(((Node) (root.selectNodes("//description").get(0))).getText());
|
|
System.out.println(builder.toString());
|
|
}
|
|
}
|