person_through_the_graph_newDevelopments #497

Merged
claudio.atzori merged 11 commits from person_through_the_graph_newDevelopments into person_through_the_graph 2024-10-25 10:20:41 +02:00
6 changed files with 95 additions and 54 deletions
Showing only changes of commit 821540f94a - Show all commits

View File

@ -61,8 +61,7 @@ public class CoAuthorshipIterator implements Iterator<Relation> {
private Relation getRelation(String orcid1, String orcid2) {
String source = PERSON_PREFIX + IdentifierFactory.md5(orcid1);
String target = PERSON_PREFIX + IdentifierFactory.md5(orcid2);
Relation relation =
OafMapperUtils
Relation relation = OafMapperUtils
.getRelation(
source, target, ModelConstants.PERSON_PERSON_RELTYPE,
ModelConstants.PERSON_PERSON_SUBRELTYPE,

View File

@ -13,9 +13,12 @@ import java.sql.SQLException;
import java.util.*;
import java.util.stream.Collectors;
import eu.dnetlib.dhp.common.DbClient;
import org.apache.commons.cli.ParseException;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.BZip2Codec;
import org.apache.hadoop.mapred.SequenceFileOutputFormat;
@ -27,13 +30,13 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.spark_project.jetty.util.StringUtil;
import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.collection.orcid.model.Author;
import eu.dnetlib.dhp.collection.orcid.model.Employment;
import eu.dnetlib.dhp.collection.orcid.model.Work;
import eu.dnetlib.dhp.common.DbClient;
import eu.dnetlib.dhp.common.HdfsSupport;
import eu.dnetlib.dhp.schema.action.AtomicAction;
import eu.dnetlib.dhp.schema.common.ModelConstants;
@ -49,11 +52,6 @@ import eu.dnetlib.dhp.schema.oaf.utils.PidType;
import eu.dnetlib.dhp.utils.DHPUtils;
import scala.Tuple2;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
public class ExtractPerson implements Serializable {
private static final Logger log = LoggerFactory.getLogger(ExtractPerson.class);
private static final String QUERY = "SELECT * FROM project_person WHERE pid_type = 'ORCID'";
@ -79,7 +77,6 @@ public class ExtractPerson implements Serializable {
public static List<KeyValue> collectedfromOpenAIRE = OafMapperUtils
.listKeyValues(OPENAIRE_DATASOURCE_ID, OPENAIRE_DATASOURCE_NAME);
public static final DataInfo DATAINFO = OafMapperUtils
.dataInfo(
false,
@ -136,7 +133,8 @@ public class ExtractPerson implements Serializable {
spark -> {
HdfsSupport.remove(outputPath, spark.sparkContext().hadoopConfiguration());
extractInfoForActionSetFromORCID(spark, inputPath, workingDir);
extractInfoForActionSetFromProjects(spark, inputPath, workingDir, dbUrl, dbUser, dbPassword, workingDir + "/project", hdfsNameNode);
extractInfoForActionSetFromProjects(
spark, inputPath, workingDir, dbUrl, dbUser, dbPassword, workingDir + "/project", hdfsNameNode);
createActionSet(spark, outputPath, workingDir);
});
@ -164,7 +162,8 @@ public class ExtractPerson implements Serializable {
public static Relation getRelationWithProject(ResultSet rs) {
try {
return getProjectRelation(rs.getString("project"), rs.getString("pid"),
return getProjectRelation(
rs.getString("project"), rs.getString("pid"),
rs.getString("role"));
} catch (final SQLException e) {
throw new RuntimeException(e);
@ -174,7 +173,7 @@ public class ExtractPerson implements Serializable {
private static Relation getProjectRelation(String project, String orcid, String role) {
String source = PERSON_PREFIX + "::" + IdentifierFactory.md5(orcid);
String target = project.substring(0,14)
String target = project.substring(0, 14)
+ IdentifierFactory.md5(project.substring(15));
List<KeyValue> properties = new ArrayList<>();
@ -194,12 +193,10 @@ public class ExtractPerson implements Serializable {
properties.add(kv);
}
if (!properties.isEmpty())
relation.setProperties(properties);
return relation;
}
protected static void writeRelation(final Relation relation, BufferedWriter writer) {
@ -211,7 +208,7 @@ public class ExtractPerson implements Serializable {
}
}
private static void createActionSet(SparkSession spark,String outputPath, String workingDir) {
private static void createActionSet(SparkSession spark, String outputPath, String workingDir) {
Dataset<Person> people;
people = spark
@ -276,7 +273,7 @@ public class ExtractPerson implements Serializable {
.joinWith(authors, employmentDataset.col("orcid").equalTo(authors.col("orcid")))
.map((MapFunction<Tuple2<Employment, Author>, Employment>) t2 -> t2._1(), Encoders.bean(Employment.class));
//Mapping all the orcid profiles even if the profile has no visible works
// Mapping all the orcid profiles even if the profile has no visible works
authors.map((MapFunction<Author, Person>) op -> {
Person person = new Person();

View File

@ -21,5 +21,30 @@
"paramLongName": "workingDir",
"paramDescription": "the hdfs name node",
"paramRequired": false
},
{
"paramName": "pu",
"paramLongName": "postgresUrl",
"paramDescription": "the hdfs name node",
"paramRequired": false
},
{
"paramName": "ps",
"paramLongName": "postgresUser",
"paramDescription": "the hdfs name node",
"paramRequired": false
},
{
"paramName": "pp",
"paramLongName": "postgresPassword",
"paramDescription": "the hdfs name node",
"paramRequired": false
},{
"paramName": "nn",
"paramLongName": "hdfsNameNode",
"paramDescription": "the hdfs name node",
"paramRequired": false
}
]

View File

@ -1,2 +1,5 @@
inputPath=/data/orcid_2023/tables/
outputPath=/user/miriam.baglioni/peopleAS
postgresUrl=jdbc:postgresql://beta.services.openaire.eu:5432/dnet_openaireplus
postgresUser=dnet'
postgresPassword=dnetPwd

View File

@ -9,6 +9,18 @@
<name>outputPath</name>
<description>the path where to store the actionset</description>
</property>
<property>
<name>postgresUrl</name>
<description>the path where to store the actionset</description>
</property>
<property>
<name>postgresUser</name>
<description>the path where to store the actionset</description>
</property>
<property>
<name>postgresPassword</name>
<description>the path where to store the actionset</description>
</property>
<property>
<name>sparkDriverMemory</name>
<description>memory for driver process</description>
@ -102,6 +114,10 @@
<arg>--inputPath</arg><arg>${inputPath}</arg>
<arg>--outputPath</arg><arg>${outputPath}</arg>
<arg>--workingDir</arg><arg>${workingDir}</arg>
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
<arg>--postgresUrl</arg><arg>${postgresUrl}</arg>
<arg>--postgresUser</arg><arg>${postgresUser}</arg>
<arg>--postgresPassword</arg><arg>${postgresPassword}</arg>
</spark>
<ok to="End"/>
<error to="Kill"/>

View File

@ -31,6 +31,7 @@ class ORCIDAuthorMatchersTest {
assertTrue(matchOrderedTokenAndAbbreviations("孙林 Sun Lin", "Sun Lin"))
// assertTrue(AuthorsMatchRevised.compare("孙林 Sun Lin", "孙林")); // not yet implemented
}
@Test def testDocumentationNames(): Unit = {
assertTrue(matchOrderedTokenAndAbbreviations("James C. A. Miller-Jones", "James Antony Miller-Jones"))
}