forked from D-Net/dnet-hadoop
changed the wf definition
This commit is contained in:
parent
a6df01d329
commit
fe88904df0
|
@ -1,88 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.oa.graph.dump.community;
|
|
||||||
|
|
||||||
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
|
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
import java.util.Optional;
|
|
||||||
import java.util.Set;
|
|
||||||
import java.util.stream.Collectors;
|
|
||||||
|
|
||||||
import org.apache.spark.SparkConf;
|
|
||||||
import org.apache.spark.SparkContext;
|
|
||||||
import org.apache.spark.sql.Dataset;
|
|
||||||
import org.apache.spark.sql.SaveMode;
|
|
||||||
import org.apache.spark.sql.SparkSession;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.oa.graph.dump.Utils;
|
|
||||||
import eu.dnetlib.dhp.schema.dump.oaf.community.CommunityResult;
|
|
||||||
|
|
||||||
public class CommunitySplitS3 implements Serializable {
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(CommunitySplitS3.class);
|
|
||||||
|
|
||||||
public void run(Boolean isSparkSessionManaged, String inputPath, String outputPath, String communityMapPath) {
|
|
||||||
// public void run(Boolean isSparkSessionManaged, String inputPath, String outputPath, CommunityMap
|
|
||||||
// communityMap) {
|
|
||||||
SparkConf conf = new SparkConf();
|
|
||||||
runWithSparkSession(
|
|
||||||
conf,
|
|
||||||
isSparkSessionManaged,
|
|
||||||
spark -> {
|
|
||||||
SparkContext sc = spark.sparkContext();
|
|
||||||
sc.hadoopConfiguration().set("fs.s3.impl", "org.apache.hadoop.fs.s3native.NativeS3FileSystem");
|
|
||||||
sc.hadoopConfiguration().set("fs.s3.awsAccessKeyId", "AK0MM6C2BYA0K1PNJYYX");
|
|
||||||
sc.hadoopConfiguration().set("fs.s3.awsSecretAccessKey", "fpeiqUUpKAUOtO6JWMWLTxxlSxJ+yGYwHozm3jHK");
|
|
||||||
sc.hadoopConfiguration().set("fs.s3.endpoint", "s3.acm.edu.pl");
|
|
||||||
execSplit(spark, inputPath, outputPath, communityMapPath); // communityMap.keySet());// ,
|
|
||||||
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void execSplit(SparkSession spark, String inputPath, String outputPath,
|
|
||||||
String communityMapPath) {
|
|
||||||
// Set<String> communities) {
|
|
||||||
|
|
||||||
Set<String> communities = Utils.getCommunityMap(spark, communityMapPath).keySet();
|
|
||||||
|
|
||||||
Dataset<CommunityResult> result = Utils
|
|
||||||
.readPath(spark, inputPath + "/publication", CommunityResult.class)
|
|
||||||
.union(Utils.readPath(spark, inputPath + "/dataset", CommunityResult.class))
|
|
||||||
.union(Utils.readPath(spark, inputPath + "/orp", CommunityResult.class))
|
|
||||||
.union(Utils.readPath(spark, inputPath + "/software", CommunityResult.class));
|
|
||||||
|
|
||||||
communities
|
|
||||||
.stream()
|
|
||||||
.forEach(c -> printResult(c, result, outputPath));
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void printResult(String c, Dataset<CommunityResult> result, String outputPath) {
|
|
||||||
Dataset<CommunityResult> community_products = result
|
|
||||||
.filter(r -> containsCommunity(r, c));
|
|
||||||
|
|
||||||
if (community_products.count() > 0) {
|
|
||||||
log.info("Writing dump for community: {} ", c);
|
|
||||||
community_products
|
|
||||||
.repartition(1)
|
|
||||||
.write()
|
|
||||||
.option("compression", "gzip")
|
|
||||||
.mode(SaveMode.Overwrite)
|
|
||||||
.json(outputPath + "/" + c);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private static boolean containsCommunity(CommunityResult r, String c) {
|
|
||||||
if (Optional.ofNullable(r.getContext()).isPresent()) {
|
|
||||||
return r
|
|
||||||
.getContext()
|
|
||||||
.stream()
|
|
||||||
.filter(con -> con.getCode().equals(c))
|
|
||||||
.collect(Collectors.toList())
|
|
||||||
.size() > 0;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,64 +0,0 @@
|
||||||
|
|
||||||
package eu.dnetlib.dhp.oa.graph.dump.community;
|
|
||||||
|
|
||||||
import java.io.Serializable;
|
|
||||||
import java.util.Optional;
|
|
||||||
|
|
||||||
import org.apache.commons.io.IOUtils;
|
|
||||||
import org.slf4j.Logger;
|
|
||||||
import org.slf4j.LoggerFactory;
|
|
||||||
|
|
||||||
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
|
|
||||||
import eu.dnetlib.dhp.oa.graph.dump.QueryInformationSystem;
|
|
||||||
import eu.dnetlib.dhp.utils.ISLookupClientFactory;
|
|
||||||
import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpService;
|
|
||||||
|
|
||||||
public class SparkSplitForCommunityS3 implements Serializable {
|
|
||||||
|
|
||||||
private static final Logger log = LoggerFactory.getLogger(SparkSplitForCommunityS3.class);
|
|
||||||
|
|
||||||
public static void main(String[] args) throws Exception {
|
|
||||||
String jsonConfiguration = IOUtils
|
|
||||||
.toString(
|
|
||||||
SparkSplitForCommunityS3.class
|
|
||||||
.getResourceAsStream(
|
|
||||||
"/eu/dnetlib/dhp/oa/graph/dump/split_parameters.json"));
|
|
||||||
|
|
||||||
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
|
|
||||||
parser.parseArgument(args);
|
|
||||||
|
|
||||||
Boolean isSparkSessionManaged = Optional
|
|
||||||
.ofNullable(parser.get("isSparkSessionManaged"))
|
|
||||||
.map(Boolean::valueOf)
|
|
||||||
.orElse(Boolean.TRUE);
|
|
||||||
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
|
|
||||||
|
|
||||||
final String inputPath = parser.get("sourcePath");
|
|
||||||
log.info("inputPath: {}", inputPath);
|
|
||||||
|
|
||||||
final String outputPath = parser.get("outputPath");
|
|
||||||
log.info("outputPath: {}", outputPath);
|
|
||||||
|
|
||||||
final String communityMapPath = parser.get("communityMapPath");
|
|
||||||
|
|
||||||
final String isLookUpUrl = parser.get("isLookUpUrl");
|
|
||||||
log.info("isLookUpUrl: {}", isLookUpUrl);
|
|
||||||
|
|
||||||
CommunitySplitS3 split = new CommunitySplitS3();
|
|
||||||
|
|
||||||
// CommunityMap communityMap;
|
|
||||||
|
|
||||||
// QueryInformationSystem queryInformationSystem = new QueryInformationSystem();
|
|
||||||
// queryInformationSystem.setIsLookUp(getIsLookUpService(isLookUpUrl));
|
|
||||||
// communityMap = queryInformationSystem.getCommunityMap();
|
|
||||||
|
|
||||||
split.run(isSparkSessionManaged, inputPath, outputPath, communityMapPath);
|
|
||||||
// split.run(isSparkSessionManaged, inputPath, outputPath, communityMap);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public static ISLookUpService getIsLookUpService(String isLookUpUrl) {
|
|
||||||
return ISLookupClientFactory.getLookUpService(isLookUpUrl);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -13,6 +13,26 @@
|
||||||
<name>outputPath</name>
|
<name>outputPath</name>
|
||||||
<description>the output path</description>
|
<description>the output path</description>
|
||||||
</property>
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>accessToken</name>
|
||||||
|
<description>the access token used for the deposition in Zenodo</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>connectionUrl</name>
|
||||||
|
<description>the connection url for Zenodo</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>metadata</name>
|
||||||
|
<description> the metadata associated to the deposition</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>newDeposition</name>
|
||||||
|
<description>true if it is a brand new depositon. false for new version of an old deposition</description>
|
||||||
|
</property>
|
||||||
|
<property>
|
||||||
|
<name>conceptRecordId</name>
|
||||||
|
<description>for new version, the id of the record for the old deposition</description>
|
||||||
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>hiveDbName</name>
|
<name>hiveDbName</name>
|
||||||
<description>the target hive database name</description>
|
<description>the target hive database name</description>
|
||||||
|
@ -81,7 +101,7 @@
|
||||||
</configuration>
|
</configuration>
|
||||||
</global>
|
</global>
|
||||||
|
|
||||||
<start to="save_community_map"/>
|
<start to="reset_outputpath"/>
|
||||||
|
|
||||||
<kill name="Kill">
|
<kill name="Kill">
|
||||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||||
|
@ -135,7 +155,6 @@
|
||||||
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
|
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Publication</arg>
|
||||||
<arg>--outputPath</arg><arg>${workingDir}/publication</arg>
|
<arg>--outputPath</arg><arg>${workingDir}/publication</arg>
|
||||||
<arg>--communityMapPath</arg><arg>${workingDir}/communityMap</arg>
|
<arg>--communityMapPath</arg><arg>${workingDir}/communityMap</arg>
|
||||||
<arg>--isLookUpUrl</arg><arg>${isLookUpUrl}</arg>
|
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="join_dump"/>
|
<ok to="join_dump"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
|
@ -162,7 +181,6 @@
|
||||||
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Dataset</arg>
|
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Dataset</arg>
|
||||||
<arg>--outputPath</arg><arg>${workingDir}/dataset</arg>
|
<arg>--outputPath</arg><arg>${workingDir}/dataset</arg>
|
||||||
<arg>--communityMapPath</arg><arg>${workingDir}/communityMap</arg>
|
<arg>--communityMapPath</arg><arg>${workingDir}/communityMap</arg>
|
||||||
<arg>--isLookUpUrl</arg><arg>${isLookUpUrl}</arg>
|
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="join_dump"/>
|
<ok to="join_dump"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
|
@ -189,7 +207,6 @@
|
||||||
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg>
|
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.OtherResearchProduct</arg>
|
||||||
<arg>--outputPath</arg><arg>${workingDir}/otherresearchproduct</arg>
|
<arg>--outputPath</arg><arg>${workingDir}/otherresearchproduct</arg>
|
||||||
<arg>--communityMapPath</arg><arg>${workingDir}/communityMap</arg>
|
<arg>--communityMapPath</arg><arg>${workingDir}/communityMap</arg>
|
||||||
<arg>--isLookUpUrl</arg><arg>${isLookUpUrl}</arg>
|
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="join_dump"/>
|
<ok to="join_dump"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
|
@ -216,7 +233,6 @@
|
||||||
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg>
|
<arg>--resultTableName</arg><arg>eu.dnetlib.dhp.schema.oaf.Software</arg>
|
||||||
<arg>--outputPath</arg><arg>${workingDir}/software</arg>
|
<arg>--outputPath</arg><arg>${workingDir}/software</arg>
|
||||||
<arg>--communityMapPath</arg><arg>${workingDir}/communityMap</arg>
|
<arg>--communityMapPath</arg><arg>${workingDir}/communityMap</arg>
|
||||||
<arg>--isLookUpUrl</arg><arg>${isLookUpUrl}</arg>
|
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="join_dump"/>
|
<ok to="join_dump"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
|
@ -360,7 +376,7 @@
|
||||||
<master>yarn</master>
|
<master>yarn</master>
|
||||||
<mode>cluster</mode>
|
<mode>cluster</mode>
|
||||||
<name>Split dumped result for community</name>
|
<name>Split dumped result for community</name>
|
||||||
<class>eu.dnetlib.dhp.oa.graph.dump.community.SparkSplitForCommunityS3</class>
|
<class>eu.dnetlib.dhp.oa.graph.dump.community.SparkSplitForCommunity</class>
|
||||||
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
<jar>dhp-graph-mapper-${projectVersion}.jar</jar>
|
||||||
<spark-opts>
|
<spark-opts>
|
||||||
--executor-memory=${sparkExecutorMemory}
|
--executor-memory=${sparkExecutorMemory}
|
||||||
|
@ -375,22 +391,22 @@
|
||||||
<arg>--sourcePath</arg><arg>${workingDir}/ext</arg>
|
<arg>--sourcePath</arg><arg>${workingDir}/ext</arg>
|
||||||
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
<arg>--outputPath</arg><arg>${outputPath}</arg>
|
||||||
<arg>--communityMapPath</arg><arg>${workingDir}/communityMap</arg>
|
<arg>--communityMapPath</arg><arg>${workingDir}/communityMap</arg>
|
||||||
<arg>--isLookUpUrl</arg><arg>${isLookUpUrl}</arg>
|
|
||||||
</spark>
|
</spark>
|
||||||
<ok to="End"/>
|
<ok to="send_zenodo"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
</action>
|
</action>
|
||||||
|
|
||||||
<action name="send_zenodo">
|
<action name="send_zenodo">
|
||||||
<java>
|
<java>
|
||||||
<main-class>eu.dnetlib.dhp.oa.graph.dump.SendToZenodo</main-class>
|
<main-class>eu.dnetlib.dhp.oa.graph.dump.SendToZenodoHDFS</main-class>
|
||||||
<arg>--hdfsPath</arg><arg>${outputPath}</arg>
|
<arg>--hdfsPath</arg><arg>${outputPath}</arg>
|
||||||
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
|
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
|
||||||
<arg>--accessToken</arg><arg>${accessToken}</arg>
|
<arg>--accessToken</arg><arg>${accessToken}</arg>
|
||||||
<arg>--connectionUrl</arg><arg>${connectionUrl}</arg>
|
<arg>--connectionUrl</arg><arg>${connectionUrl}</arg>
|
||||||
<arg>--metadata</arg><arg>${metadata}</arg>
|
<arg>--metadata</arg><arg>${metadata}</arg>
|
||||||
<arg>--communityMapPath</arg><arg>${workingDir}/communityMap</arg>
|
<arg>--communityMapPath</arg><arg>${workingDir}/communityMap</arg>
|
||||||
<arg>--isLookUpUrl</arg><arg>${isLookUpUrl}</arg>
|
<arg>--conceptRecordId</arg><arg>${conceptRecordId}</arg>
|
||||||
|
<arg>--newDeposition</arg><arg>${newDeposition}</arg>
|
||||||
</java>
|
</java>
|
||||||
<ok to="End"/>
|
<ok to="End"/>
|
||||||
<error to="Kill"/>
|
<error to="Kill"/>
|
||||||
|
|
Loading…
Reference in New Issue