refactoring

This commit is contained in:
Miriam Baglioni 2020-04-27 10:36:21 +02:00
parent e000754c92
commit e5a177f0a7
2 changed files with 258 additions and 193 deletions

View File

@ -1,5 +1,8 @@
package eu.dnetlib.dhp.resulttoorganizationfrominstrepo; package eu.dnetlib.dhp.resulttoorganizationfrominstrepo;
import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.schema.oaf.Datasource; import eu.dnetlib.dhp.schema.oaf.Datasource;
@ -9,28 +12,26 @@ import org.apache.commons.io.IOUtils;
import org.apache.hadoop.io.compress.GzipCodec; import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders; import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SaveMode; import org.apache.spark.sql.SaveMode;
import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
public class PrepareResultInstRepoAssociation { public class PrepareResultInstRepoAssociation {
private static final Logger log = LoggerFactory.getLogger(PrepareResultInstRepoAssociation.class); private static final Logger log =
LoggerFactory.getLogger(PrepareResultInstRepoAssociation.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
public static void main(String[] args) throws Exception{ public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils.toString(PrepareResultInstRepoAssociation.class String jsonConfiguration =
.getResourceAsStream("/eu/dnetlib/dhp/resulttoorganizationfrominstrepo/input_prepareresultorg_parameters.json")); IOUtils.toString(
PrepareResultInstRepoAssociation.class.getResourceAsStream(
"/eu/dnetlib/dhp/resulttoorganizationfrominstrepo/input_prepareresultorg_parameters.json"));
final ArgumentApplicationParser parser = new ArgumentApplicationParser( final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
jsonConfiguration);
parser.parseArgument(args); parser.parseArgument(args);
@ -49,77 +50,89 @@ public class PrepareResultInstRepoAssociation {
SparkConf conf = new SparkConf(); SparkConf conf = new SparkConf();
conf.set("hive.metastore.uris", parser.get("hive_metastore_uris")); conf.set("hive.metastore.uris", parser.get("hive_metastore_uris"));
runWithSparkHiveSession(conf, isSparkSessionManaged, runWithSparkHiveSession(
conf,
isSparkSessionManaged,
spark -> { spark -> {
readNeededResources(spark, inputPath); readNeededResources(spark, inputPath);
prepareDatasourceOrganizationAssociations(spark, datasourceOrganizationPath, alreadyLinkedPath); prepareDatasourceOrganizationAssociations(
spark, datasourceOrganizationPath, alreadyLinkedPath);
prepareAlreadyLinkedAssociation(spark, alreadyLinkedPath); prepareAlreadyLinkedAssociation(spark, alreadyLinkedPath);
}); });
} }
private static void prepareAlreadyLinkedAssociation(SparkSession spark, String alreadyLinkedPath) { private static void prepareAlreadyLinkedAssociation(
String query = "Select source resultId, collect_set(target) organizationSet " + SparkSession spark, String alreadyLinkedPath) {
"from relation " + String query =
"where datainfo.deletedbyinference = false " + "Select source resultId, collect_set(target) organizationSet "
"and relClass = '" + RELATION_RESULT_ORGANIZATION_REL_CLASS +"' " + + "from relation "
"group by source"; + "where datainfo.deletedbyinference = false "
+ "and relClass = '"
+ RELATION_RESULT_ORGANIZATION_REL_CLASS
+ "' "
+ "group by source";
spark.sql(query) spark.sql(query)
.as(Encoders.bean(ResultOrganizationSet.class)) .as(Encoders.bean(ResultOrganizationSet.class))
.toJavaRDD() .toJavaRDD()
.map(r -> OBJECT_MAPPER.writeValueAsString(r)) .map(r -> OBJECT_MAPPER.writeValueAsString(r))
.saveAsTextFile(alreadyLinkedPath, GzipCodec.class); .saveAsTextFile(alreadyLinkedPath, GzipCodec.class);
// .as(Encoders.bean(ResultOrganizationSet.class))
// .toJSON()
// .write()
// .mode(SaveMode.Overwrite)
// .option("compression","gzip")
// .text(alreadyLinkedPath);
} }
private static void readNeededResources(SparkSession spark, String inputPath) { private static void readNeededResources(SparkSession spark, String inputPath) {
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
org.apache.spark.sql.Dataset<Datasource> datasource = spark.createDataset(sc.textFile(inputPath + "/datasource") org.apache.spark.sql.Dataset<Datasource> datasource =
.map(item -> new ObjectMapper().readValue(item, Datasource.class)).rdd(), Encoders.bean(Datasource.class)); spark.createDataset(
sc.textFile(inputPath + "/datasource")
.map(item -> new ObjectMapper().readValue(item, Datasource.class))
.rdd(),
Encoders.bean(Datasource.class));
org.apache.spark.sql.Dataset<Relation> relation = spark.createDataset(sc.textFile(inputPath + "/relation") org.apache.spark.sql.Dataset<Relation> relation =
.map(item -> new ObjectMapper().readValue(item, Relation.class)).rdd(), Encoders.bean(Relation.class)); spark.createDataset(
sc.textFile(inputPath + "/relation")
.map(item -> new ObjectMapper().readValue(item, Relation.class))
.rdd(),
Encoders.bean(Relation.class));
org.apache.spark.sql.Dataset<Organization> organization = spark.createDataset(sc.textFile(inputPath + "/organization") org.apache.spark.sql.Dataset<Organization> organization =
.map(item -> new ObjectMapper().readValue(item, Organization.class)).rdd(), Encoders.bean(Organization.class)); spark.createDataset(
sc.textFile(inputPath + "/organization")
.map(item -> new ObjectMapper().readValue(item, Organization.class))
.rdd(),
Encoders.bean(Organization.class));
datasource.createOrReplaceTempView("datasource"); datasource.createOrReplaceTempView("datasource");
relation.createOrReplaceTempView("relation"); relation.createOrReplaceTempView("relation");
organization.createOrReplaceTempView("organization"); organization.createOrReplaceTempView("organization");
} }
private static void prepareDatasourceOrganizationAssociations(SparkSession spark, String datasourceOrganizationPath, private static void prepareDatasourceOrganizationAssociations(
String alreadyLinkedPath){ SparkSession spark, String datasourceOrganizationPath, String alreadyLinkedPath) {
String query =
String query = "SELECT source datasourceId, target organizationId " + "SELECT source datasourceId, target organizationId "
"FROM ( SELECT id " + + "FROM ( SELECT id "
"FROM datasource " + + "FROM datasource "
"WHERE datasourcetype.classid = 'pubsrepository::institutional' " + + "WHERE datasourcetype.classid = '"
"AND datainfo.deletedbyinference = false ) d " + + INSTITUTIONAL_REPO_TYPE
"JOIN ( SELECT source, target " + + "' "
"FROM relation " + + "AND datainfo.deletedbyinference = false ) d "
"WHERE relclass = '" + RELATION_DATASOURCE_ORGANIZATION_REL_CLASS + "' " + + "JOIN ( SELECT source, target "
"AND datainfo.deletedbyinference = false ) rel " + + "FROM relation "
"ON d.id = rel.source "; + "WHERE relclass = '"
+ RELATION_DATASOURCE_ORGANIZATION_REL_CLASS
+ "' "
+ "AND datainfo.deletedbyinference = false ) rel "
+ "ON d.id = rel.source ";
spark.sql(query) spark.sql(query)
.as(Encoders.bean(DatasourceOrganization.class)) .as(Encoders.bean(DatasourceOrganization.class))
.toJSON() .toJSON()
.write() .write()
.mode(SaveMode.Overwrite) .mode(SaveMode.Overwrite)
.option("compression","gzip") .option("compression", "gzip")
.text(datasourceOrganizationPath); .text(datasourceOrganizationPath);
} }
} }

View File

@ -1,17 +1,14 @@
package eu.dnetlib.dhp.resulttoorganizationfrominstrepo; package eu.dnetlib.dhp.resulttoorganizationfrominstrepo;
import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.countrypropagation.DatasourceCountry;
import eu.dnetlib.dhp.countrypropagation.ResultCountrySet;
import eu.dnetlib.dhp.countrypropagation.SparkCountryPropagationJob2;
import eu.dnetlib.dhp.schema.oaf.*; import eu.dnetlib.dhp.schema.oaf.*;
import java.util.*;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaPairRDD;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.FlatMapFunction; import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.broadcast.Broadcast; import org.apache.spark.broadcast.Broadcast;
@ -19,27 +16,23 @@ import org.apache.spark.sql.*;
import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Dataset;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import scala.Option;
import scala.Tuple2; import scala.Tuple2;
import java.util.*;
import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
public class SparkResultToOrganizationFromIstRepoJob2 { public class SparkResultToOrganizationFromIstRepoJob2 {
private static final Logger log = LoggerFactory.getLogger(SparkResultToOrganizationFromIstRepoJob2.class); private static final Logger log =
LoggerFactory.getLogger(SparkResultToOrganizationFromIstRepoJob2.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils.toString(SparkResultToOrganizationFromIstRepoJob2.class String jsonConfiguration =
.getResourceAsStream("/eu/dnetlib/dhp/resulttoorganizationfrominstrepo/input_propagationresulaffiliationfrominstrepo_parameters.json")); IOUtils.toString(
SparkResultToOrganizationFromIstRepoJob2.class.getResourceAsStream(
"/eu/dnetlib/dhp/resulttoorganizationfrominstrepo/input_propagationresulaffiliationfrominstrepo_parameters.json"));
final ArgumentApplicationParser parser = new ArgumentApplicationParser( final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
jsonConfiguration);
parser.parseArgument(args); parser.parseArgument(args);
@ -49,7 +42,6 @@ public class SparkResultToOrganizationFromIstRepoJob2 {
String inputPath = parser.get("sourcePath"); String inputPath = parser.get("sourcePath");
log.info("inputPath: {}", inputPath); log.info("inputPath: {}", inputPath);
final String outputPath = parser.get("outputPath"); final String outputPath = parser.get("outputPath");
log.info("outputPath: {}", outputPath); log.info("outputPath: {}", outputPath);
@ -62,119 +54,163 @@ public class SparkResultToOrganizationFromIstRepoJob2 {
final String resultClassName = parser.get("resultTableName"); final String resultClassName = parser.get("resultTableName");
log.info("resultTableName: {}", resultClassName); log.info("resultTableName: {}", resultClassName);
final String resultType = resultClassName.substring(resultClassName.lastIndexOf(".") + 1).toLowerCase(); final String resultType =
resultClassName.substring(resultClassName.lastIndexOf(".") + 1).toLowerCase();
log.info("resultType: {}", resultType); log.info("resultType: {}", resultType);
final Boolean writeUpdates = Optional final Boolean writeUpdates =
.ofNullable(parser.get("writeUpdate")) Optional.ofNullable(parser.get("writeUpdate"))
.map(Boolean::valueOf) .map(Boolean::valueOf)
.orElse(Boolean.TRUE); .orElse(Boolean.TRUE);
log.info("writeUpdate: {}", writeUpdates); log.info("writeUpdate: {}", writeUpdates);
final Boolean saveGraph = Optional final Boolean saveGraph =
.ofNullable(parser.get("saveGraph")) Optional.ofNullable(parser.get("saveGraph"))
.map(Boolean::valueOf) .map(Boolean::valueOf)
.orElse(Boolean.TRUE); .orElse(Boolean.TRUE);
log.info("saveGraph: {}", saveGraph); log.info("saveGraph: {}", saveGraph);
Class<? extends Result> resultClazz = (Class<? extends Result>) Class.forName(resultClassName); Class<? extends Result> resultClazz =
(Class<? extends Result>) Class.forName(resultClassName);
SparkConf conf = new SparkConf(); SparkConf conf = new SparkConf();
conf.set("hive.metastore.uris", parser.get("hive_metastore_uris")); conf.set("hive.metastore.uris", parser.get("hive_metastore_uris"));
runWithSparkHiveSession(conf, isSparkSessionManaged, runWithSparkHiveSession(
conf,
isSparkSessionManaged,
spark -> { spark -> {
if(isTest(parser)) { if (isTest(parser)) {
removeOutputDir(spark, outputPath); removeOutputDir(spark, outputPath);
} }
execPropagation(spark, datasourceorganization, alreadylinked, inputPath, outputPath, resultClazz, resultType, execPropagation(
writeUpdates, saveGraph); spark,
datasourceorganization,
alreadylinked,
inputPath,
outputPath,
resultClazz,
resultType,
writeUpdates,
saveGraph);
}); });
} }
private static void execPropagation(SparkSession spark, String datasourceorganization, String alreadylinked, String inputPath, private static void execPropagation(
String outputPath, Class<? extends Result> resultClazz, String resultType, SparkSession spark,
Boolean writeUpdates, Boolean saveGraph) { String datasourceorganization,
String alreadylinked,
String inputPath,
String outputPath,
Class<? extends Result> resultClazz,
String resultType,
Boolean writeUpdates,
Boolean saveGraph) {
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
org.apache.spark.sql.Dataset<DatasourceOrganization> datasourceorganizationassoc = org.apache.spark.sql.Dataset<DatasourceOrganization> datasourceorganizationassoc =
readAssocDatasourceOrganization(spark, datasourceorganization); readAssocDatasourceOrganization(spark, datasourceorganization);
//broadcasting the result of the preparation step // broadcasting the result of the preparation step
Broadcast<org.apache.spark.sql.Dataset<DatasourceOrganization>> broadcast_datasourceorganizationassoc = Broadcast<org.apache.spark.sql.Dataset<DatasourceOrganization>>
sc.broadcast(datasourceorganizationassoc); broadcast_datasourceorganizationassoc = sc.broadcast(datasourceorganizationassoc);
org.apache.spark.sql.Dataset<ResultOrganizationSet> potentialUpdates = getPotentialRelations(spark, inputPath, resultClazz, org.apache.spark.sql.Dataset<ResultOrganizationSet> potentialUpdates =
broadcast_datasourceorganizationassoc).as(Encoders.bean(ResultOrganizationSet.class)); getPotentialRelations(
spark,
inputPath,
resultClazz,
broadcast_datasourceorganizationassoc)
.as(Encoders.bean(ResultOrganizationSet.class));
if(writeUpdates){ if (writeUpdates) {
createUpdateForRelationWrite(potentialUpdates, outputPath + "/" + resultType); createUpdateForRelationWrite(potentialUpdates, outputPath + "/" + resultType);
} }
if(saveGraph){ if (saveGraph) {
getNewRelations(spark getNewRelations(
.read() spark.read()
.textFile(alreadylinked) .textFile(alreadylinked)
.map(value -> OBJECT_MAPPER.readValue(value, ResultOrganizationSet.class), .map(
Encoders.bean(ResultOrganizationSet.class)), potentialUpdates) value ->
OBJECT_MAPPER.readValue(
value, ResultOrganizationSet.class),
Encoders.bean(ResultOrganizationSet.class)),
potentialUpdates)
.toJSON() .toJSON()
.write() .write()
.mode(SaveMode.Append) .mode(SaveMode.Append)
.option("compression", "gzip") .option("compression", "gzip")
.text(outputPath); .text(outputPath);
// .toJavaRDD()
// .map(r -> OBJECT_MAPPER.writeValueAsString(r))
// .saveAsTextFile(outputPath , GzipCodec.class);
} }
} }
private static Dataset<Relation> getNewRelations(Dataset<ResultOrganizationSet> alreadyLinked, Dataset<ResultOrganizationSet> potentialUpdates) { private static Dataset<Relation> getNewRelations(
Dataset<ResultOrganizationSet> alreadyLinked,
return potentialUpdates Dataset<ResultOrganizationSet> potentialUpdates) {
.joinWith(alreadyLinked, potentialUpdates.col("resultId")
.equalTo(alreadyLinked.col("resultId")), "left_outer").flatMap((FlatMapFunction<Tuple2<ResultOrganizationSet, ResultOrganizationSet>, Relation>) value -> {
List<Relation> new_relations = new ArrayList<>();
ResultOrganizationSet potential_update = value._1();
Optional<ResultOrganizationSet> already_linked = Optional.ofNullable(value._2());
List<String> organization_list = potential_update.getOrganizationSet();
if(already_linked.isPresent()){
already_linked.get().getOrganizationSet()
.stream()
.forEach(rId -> {
if (organization_list.contains(rId)) {
organization_list.remove(rId);
}
});
}
String resultId = potential_update.getResultId();
organization_list
.stream()
.forEach(orgId -> {
new_relations.add(getRelation(orgId, resultId, RELATION_ORGANIZATION_RESULT_REL_CLASS, RELATION_RESULTORGANIZATION_REL_TYPE,
RELATION_RESULTORGANIZATION_SUBREL_TYPE, PROPAGATION_DATA_INFO_TYPE,
PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_ID,
PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_NAME));
new_relations.add(getRelation(resultId, orgId, RELATION_RESULT_ORGANIZATION_REL_CLASS, RELATION_RESULTORGANIZATION_REL_TYPE,
RELATION_RESULTORGANIZATION_SUBREL_TYPE, PROPAGATION_DATA_INFO_TYPE,
PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_ID,
PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_NAME));
}
);
return new_relations.iterator();
}
,Encoders.bean(Relation.class));
return potentialUpdates
.joinWith(
alreadyLinked,
potentialUpdates.col("resultId").equalTo(alreadyLinked.col("resultId")),
"left_outer")
.flatMap(
(FlatMapFunction<
Tuple2<ResultOrganizationSet, ResultOrganizationSet>,
Relation>)
value -> {
List<Relation> new_relations = new ArrayList<>();
ResultOrganizationSet potential_update = value._1();
Optional<ResultOrganizationSet> already_linked =
Optional.ofNullable(value._2());
List<String> organization_list =
potential_update.getOrganizationSet();
if (already_linked.isPresent()) {
already_linked.get().getOrganizationSet().stream()
.forEach(
rId -> {
if (organization_list.contains(rId)) {
organization_list.remove(rId);
}
});
}
String resultId = potential_update.getResultId();
organization_list.stream()
.forEach(
orgId -> {
new_relations.add(
getRelation(
orgId,
resultId,
RELATION_ORGANIZATION_RESULT_REL_CLASS,
RELATION_RESULTORGANIZATION_REL_TYPE,
RELATION_RESULTORGANIZATION_SUBREL_TYPE,
PROPAGATION_DATA_INFO_TYPE,
PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_ID,
PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_NAME));
new_relations.add(
getRelation(
resultId,
orgId,
RELATION_RESULT_ORGANIZATION_REL_CLASS,
RELATION_RESULTORGANIZATION_REL_TYPE,
RELATION_RESULTORGANIZATION_SUBREL_TYPE,
PROPAGATION_DATA_INFO_TYPE,
PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_ID,
PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_NAME));
});
return new_relations.iterator();
},
Encoders.bean(Relation.class));
} }
private static <R extends Result>
private static <R extends Result> org.apache.spark.sql.Dataset<ResultOrganizationSet> getPotentialRelations(SparkSession spark, org.apache.spark.sql.Dataset<ResultOrganizationSet> getPotentialRelations(
String inputPath, SparkSession spark,
Class<R> resultClazz, String inputPath,
Broadcast<org.apache.spark.sql.Dataset<DatasourceOrganization>> broadcast_datasourceorganizationassoc) { Class<R> resultClazz,
Broadcast<org.apache.spark.sql.Dataset<DatasourceOrganization>>
broadcast_datasourceorganizationassoc) {
org.apache.spark.sql.Dataset<R> result = readPathEntity(spark, inputPath, resultClazz); org.apache.spark.sql.Dataset<R> result = readPathEntity(spark, inputPath, resultClazz);
result.createOrReplaceTempView("result"); result.createOrReplaceTempView("result");
createCfHbforresult(spark); createCfHbforresult(spark);
@ -182,59 +218,75 @@ public class SparkResultToOrganizationFromIstRepoJob2 {
return organizationPropagationAssoc(spark, broadcast_datasourceorganizationassoc); return organizationPropagationAssoc(spark, broadcast_datasourceorganizationassoc);
} }
private static org.apache.spark.sql.Dataset<DatasourceOrganization>
readAssocDatasourceOrganization(
SparkSession spark, String datasourcecountryorganization) {
private static org.apache.spark.sql.Dataset<DatasourceOrganization> readAssocDatasourceOrganization(SparkSession spark, return spark.read()
String datasourcecountryorganization) {
return spark
.read()
.textFile(datasourcecountryorganization) .textFile(datasourcecountryorganization)
.map(value -> OBJECT_MAPPER.readValue(value, DatasourceOrganization.class), Encoders.bean(DatasourceOrganization.class)); .map(
value -> OBJECT_MAPPER.readValue(value, DatasourceOrganization.class),
Encoders.bean(DatasourceOrganization.class));
} }
private static void createUpdateForRelationWrite(
Dataset<ResultOrganizationSet> toupdaterelation, String outputPath) {
private static void createUpdateForRelationWrite(Dataset<ResultOrganizationSet> toupdaterelation, String outputPath) { toupdaterelation
toupdaterelation.flatMap(s -> { .flatMap(
List<Relation> relationList = new ArrayList<>(); s -> {
List<String> orgs = s.getOrganizationSet(); List<Relation> relationList = new ArrayList<>();
String resId = s.getResultId(); List<String> orgs = s.getOrganizationSet();
for (String org : orgs) { String resId = s.getResultId();
relationList.add(getRelation(org, resId, RELATION_ORGANIZATION_RESULT_REL_CLASS, for (String org : orgs) {
RELATION_RESULTORGANIZATION_REL_TYPE, RELATION_RESULTORGANIZATION_SUBREL_TYPE, PROPAGATION_DATA_INFO_TYPE, relationList.add(
PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_ID, PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_NAME)); getRelation(
relationList.add(getRelation(resId, org, RELATION_RESULT_ORGANIZATION_REL_CLASS, org,
RELATION_RESULTORGANIZATION_REL_TYPE, RELATION_RESULTORGANIZATION_SUBREL_TYPE, PROPAGATION_DATA_INFO_TYPE, resId,
PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_ID, PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_NAME)); RELATION_ORGANIZATION_RESULT_REL_CLASS,
RELATION_RESULTORGANIZATION_REL_TYPE,
} RELATION_RESULTORGANIZATION_SUBREL_TYPE,
return relationList.iterator(); PROPAGATION_DATA_INFO_TYPE,
}, Encoders.bean(Relation.class)) PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_ID,
PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_NAME));
relationList.add(
getRelation(
resId,
org,
RELATION_RESULT_ORGANIZATION_REL_CLASS,
RELATION_RESULTORGANIZATION_REL_TYPE,
RELATION_RESULTORGANIZATION_SUBREL_TYPE,
PROPAGATION_DATA_INFO_TYPE,
PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_ID,
PROPAGATION_RELATION_RESULT_ORGANIZATION_INST_REPO_CLASS_NAME));
}
return relationList.iterator();
},
Encoders.bean(Relation.class))
.toJSON() .toJSON()
.write() .write()
.mode(SaveMode.Overwrite) .mode(SaveMode.Overwrite)
.option("compression","gzip") .option("compression", "gzip")
.text(outputPath) .text(outputPath);
;
} }
private static org.apache.spark.sql.Dataset<ResultOrganizationSet> organizationPropagationAssoc(
private static org.apache.spark.sql.Dataset<ResultOrganizationSet> organizationPropagationAssoc(SparkSession spark, Broadcast<org.apache.spark.sql.Dataset<DatasourceOrganization>> broadcast_datasourceorganizationassoc){ SparkSession spark,
org.apache.spark.sql.Dataset<DatasourceOrganization> datasourceorganization = broadcast_datasourceorganizationassoc.value(); Broadcast<org.apache.spark.sql.Dataset<DatasourceOrganization>>
broadcast_datasourceorganizationassoc) {
org.apache.spark.sql.Dataset<DatasourceOrganization> datasourceorganization =
broadcast_datasourceorganizationassoc.value();
datasourceorganization.createOrReplaceTempView("rels"); datasourceorganization.createOrReplaceTempView("rels");
String query = "SELECT id resultId, collect_set(organizationId) organizationSet "+ String query =
"FROM ( SELECT id, organizationId " + "SELECT id resultId, collect_set(organizationId) organizationSet "
"FROM rels " + + "FROM ( SELECT id, organizationId "
"JOIN cfhb " + + "FROM rels "
" ON cf = datasourceId " + + "JOIN cfhb "
"UNION ALL " + + " ON cf = datasourceId "
"SELECT id , organizationId " + + "UNION ALL "
"FROM rels " + + "SELECT id , organizationId "
"JOIN cfhb " + + "FROM rels "
" ON hb = datasourceId ) tmp " + + "JOIN cfhb "
"GROUP BY id"; + " ON hb = datasourceId ) tmp "
+ "GROUP BY id";
return spark.sql(query).as(Encoders.bean(ResultOrganizationSet.class)); return spark.sql(query).as(Encoders.bean(ResultOrganizationSet.class));
} }
} }