master #11

Manually merged
claudio.atzori merged 275 commits from :master into enrichment_wfs 2020-05-11 15:14:56 +02:00
2 changed files with 228 additions and 196 deletions
Showing only changes of commit fa2ff5c6f5 - Show all commits

View File

@ -1,47 +1,43 @@
package eu.dnetlib.dhp.countrypropagation; package eu.dnetlib.dhp.countrypropagation;
import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.schema.oaf.*; import eu.dnetlib.dhp.schema.oaf.*;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.compress.GzipCodec; import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders; import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SparkSession; import org.apache.spark.sql.SparkSession;
import org.apache.spark.sql.SaveMode;
import java.util.Arrays;
import java.util.List;
import java.util.Optional;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
/** /**
* For the association of the country to the datasource * For the association of the country to the datasource The association is computed only for
* The association is computed only for datasource of specific type or having whitelisted ids * datasource of specific type or having whitelisted ids The country is registered in the
* The country is registered in the Organization associated to the Datasource, so the * Organization associated to the Datasource, so the relation provides between Datasource and
* relation provides between Datasource and Organization is exploited to get the country for the datasource * Organization is exploited to get the country for the datasource
*/ */
public class PrepareDatasourceCountryAssociation { public class PrepareDatasourceCountryAssociation {
private static final Logger log = LoggerFactory.getLogger(PrepareDatasourceCountryAssociation.class); private static final Logger log =
LoggerFactory.getLogger(PrepareDatasourceCountryAssociation.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils.toString(PrepareDatasourceCountryAssociation.class String jsonConfiguration =
.getResourceAsStream("/eu/dnetlib/dhp/countrypropagation/input_prepareassoc_parameters.json")); IOUtils.toString(
PrepareDatasourceCountryAssociation.class.getResourceAsStream(
"/eu/dnetlib/dhp/countrypropagation/input_prepareassoc_parameters.json"));
final ArgumentApplicationParser parser = new ArgumentApplicationParser( final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
jsonConfiguration);
parser.parseArgument(args); parser.parseArgument(args);
@ -57,73 +53,83 @@ public class PrepareDatasourceCountryAssociation {
SparkConf conf = new SparkConf(); SparkConf conf = new SparkConf();
conf.set("hive.metastore.uris", parser.get("hive_metastore_uris")); conf.set("hive.metastore.uris", parser.get("hive_metastore_uris"));
runWithSparkHiveSession(
conf,
runWithSparkHiveSession(conf, isSparkSessionManaged, isSparkSessionManaged,
spark -> { spark -> {
removeOutputDir(spark, outputPath); removeOutputDir(spark, outputPath);
prepareDatasourceCountryAssociation(spark, prepareDatasourceCountryAssociation(
spark,
Arrays.asList(parser.get("whitelist").split(";")), Arrays.asList(parser.get("whitelist").split(";")),
Arrays.asList(parser.get("allowedtypes").split(";")), Arrays.asList(parser.get("allowedtypes").split(";")),
inputPath, inputPath,
outputPath); outputPath);
}); });
} }
private static void prepareDatasourceCountryAssociation(
SparkSession spark,
private static void prepareDatasourceCountryAssociation(SparkSession spark, List<String> whitelist,
List<String> whitelist, List<String> allowedtypes,
List<String> allowedtypes, String inputPath,
String inputPath, String outputPath) {
String outputPath) {
String whitelisted = ""; String whitelisted = "";
for (String i : whitelist){ for (String i : whitelist) {
whitelisted += " OR id = '" + i + "'"; whitelisted += " OR id = '" + i + "'";
} }
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
Dataset<Datasource> datasource =
spark.createDataset(
sc.textFile(inputPath + "/datasource")
.map(item -> OBJECT_MAPPER.readValue(item, Datasource.class))
.rdd(),
Encoders.bean(Datasource.class));
Dataset<Datasource> datasource = spark.createDataset(sc.textFile(inputPath + "/datasource") Dataset<Relation> relation =
.map(item -> OBJECT_MAPPER.readValue(item, Datasource.class)).rdd(), Encoders.bean(Datasource.class)); spark.createDataset(
sc.textFile(inputPath + "/relation")
.map(item -> OBJECT_MAPPER.readValue(item, Relation.class))
.rdd(),
Encoders.bean(Relation.class));
Dataset<Relation> relation = spark.createDataset(sc.textFile(inputPath + "/relation") Dataset<Organization> organization =
.map(item -> OBJECT_MAPPER.readValue(item, Relation.class)).rdd(), Encoders.bean(Relation.class)); spark.createDataset(
sc.textFile(inputPath + "/organization")
Dataset<Organization> organization = spark.createDataset(sc.textFile(inputPath + "/organization") .map(item -> OBJECT_MAPPER.readValue(item, Organization.class))
.map(item -> OBJECT_MAPPER.readValue(item, Organization.class)).rdd(), Encoders.bean(Organization.class)); .rdd(),
Encoders.bean(Organization.class));
datasource.createOrReplaceTempView("datasource"); datasource.createOrReplaceTempView("datasource");
relation.createOrReplaceTempView("relation"); relation.createOrReplaceTempView("relation");
organization.createOrReplaceTempView("organization"); organization.createOrReplaceTempView("organization");
String query = "SELECT source dataSourceId, named_struct('classid', country.classid, 'classname', country.classname) country " + String query =
"FROM ( SELECT id " + "SELECT source dataSourceId, named_struct('classid', country.classid, 'classname', country.classname) country "
" FROM datasource " + + "FROM ( SELECT id "
" WHERE (datainfo.deletedbyinference = false " + whitelisted + ") " + + " FROM datasource "
getConstraintList("datasourcetype.classid = '", allowedtypes) + ") d " + + " WHERE (datainfo.deletedbyinference = false "
"JOIN ( SELECT source, target " + + whitelisted
" FROM relation " + + ") "
" WHERE relclass = '" + RELATION_DATASOURCE_ORGANIZATION_REL_CLASS + "' " + + getConstraintList("datasourcetype.classid = '", allowedtypes)
" AND datainfo.deletedbyinference = false ) rel " + + ") d "
"ON d.id = rel.source " + + "JOIN ( SELECT source, target "
"JOIN (SELECT id, country " + + " FROM relation "
" FROM organization " + + " WHERE relclass = '"
" WHERE datainfo.deletedbyinference = false " + + RELATION_DATASOURCE_ORGANIZATION_REL_CLASS
" AND length(country.classid)>0) o " + + "' "
"ON o.id = rel.target"; + " AND datainfo.deletedbyinference = false ) rel "
+ "ON d.id = rel.source "
+ "JOIN (SELECT id, country "
+ " FROM organization "
+ " WHERE datainfo.deletedbyinference = false "
+ " AND length(country.classid)>0) o "
+ "ON o.id = rel.target";
spark.sql(query) spark.sql(query)
.as(Encoders.bean(DatasourceCountry.class)) .as(Encoders.bean(DatasourceCountry.class))
.toJavaRDD() .toJavaRDD()
.map(c -> OBJECT_MAPPER.writeValueAsString(c)) .map(c -> OBJECT_MAPPER.writeValueAsString(c))
.saveAsTextFile(outputPath, GzipCodec.class); .saveAsTextFile(outputPath, GzipCodec.class);
} }
} }

View File

@ -1,28 +1,22 @@
package eu.dnetlib.dhp.countrypropagation; package eu.dnetlib.dhp.countrypropagation;
import static eu.dnetlib.dhp.PropagationConstant.*;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.application.ArgumentApplicationParser; import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.schema.oaf.*; import eu.dnetlib.dhp.schema.oaf.*;
import java.util.*;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext; import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.MapFunction; import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.broadcast.Broadcast; import org.apache.spark.broadcast.Broadcast;
import org.apache.spark.sql.*; import org.apache.spark.sql.*;
import org.apache.spark.sql.Dataset; import org.apache.spark.sql.Dataset;
import scala.Tuple2;
import java.util.*;
import static eu.dnetlib.dhp.PropagationConstant.*;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import scala.Tuple2;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkHiveSession;
public class SparkCountryPropagationJob2 { public class SparkCountryPropagationJob2 {
@ -30,14 +24,14 @@ public class SparkCountryPropagationJob2 {
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper(); private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils.toString(SparkCountryPropagationJob2.class String jsonConfiguration =
.getResourceAsStream("/eu/dnetlib/dhp/countrypropagation/input_countrypropagation_parameters.json")); IOUtils.toString(
SparkCountryPropagationJob2.class.getResourceAsStream(
"/eu/dnetlib/dhp/countrypropagation/input_countrypropagation_parameters.json"));
final ArgumentApplicationParser parser = new ArgumentApplicationParser( final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
jsonConfiguration);
parser.parseArgument(args); parser.parseArgument(args);
@ -56,182 +50,214 @@ public class SparkCountryPropagationJob2 {
final String resultClassName = parser.get("resultTableName"); final String resultClassName = parser.get("resultTableName");
log.info("resultTableName: {}", resultClassName); log.info("resultTableName: {}", resultClassName);
final String resultType = resultClassName.substring(resultClassName.lastIndexOf(".") + 1).toLowerCase(); final String resultType =
resultClassName.substring(resultClassName.lastIndexOf(".") + 1).toLowerCase();
log.info("resultType: {}", resultType); log.info("resultType: {}", resultType);
final Boolean writeUpdates = Optional final Boolean writeUpdates =
.ofNullable(parser.get("writeUpdate")) Optional.ofNullable(parser.get("writeUpdate"))
.map(Boolean::valueOf) .map(Boolean::valueOf)
.orElse(Boolean.TRUE); .orElse(Boolean.TRUE);
log.info("writeUpdate: {}", writeUpdates); log.info("writeUpdate: {}", writeUpdates);
final Boolean saveGraph = Optional final Boolean saveGraph =
.ofNullable(parser.get("saveGraph")) Optional.ofNullable(parser.get("saveGraph"))
.map(Boolean::valueOf) .map(Boolean::valueOf)
.orElse(Boolean.TRUE); .orElse(Boolean.TRUE);
log.info("saveGraph: {}", saveGraph); log.info("saveGraph: {}", saveGraph);
Class<? extends Result> resultClazz = (Class<? extends Result>) Class.forName(resultClassName); Class<? extends Result> resultClazz =
(Class<? extends Result>) Class.forName(resultClassName);
SparkConf conf = new SparkConf(); SparkConf conf = new SparkConf();
conf.set("hive.metastore.uris", parser.get("hive_metastore_uris")); conf.set("hive.metastore.uris", parser.get("hive_metastore_uris"));
runWithSparkHiveSession(conf, isSparkSessionManaged, runWithSparkHiveSession(
conf,
isSparkSessionManaged,
spark -> { spark -> {
//createOutputDirs(outputPath, FileSystem.get(spark.sparkContext().hadoopConfiguration())); // createOutputDirs(outputPath,
// FileSystem.get(spark.sparkContext().hadoopConfiguration()));
removeOutputDir(spark, outputPath); removeOutputDir(spark, outputPath);
execPropagation(spark, datasourcecountrypath, inputPath, outputPath, resultClazz, resultType, execPropagation(
writeUpdates, saveGraph); spark,
datasourcecountrypath,
inputPath,
outputPath,
resultClazz,
resultType,
writeUpdates,
saveGraph);
}); });
} }
private static <R extends Result> void execPropagation(SparkSession spark, String datasourcecountrypath, private static <R extends Result> void execPropagation(
String inputPath, String outputPath, Class<R> resultClazz, String resultType, SparkSession spark,
boolean writeUpdates, boolean saveGraph){ String datasourcecountrypath,
String inputPath,
String outputPath,
Class<R> resultClazz,
String resultType,
boolean writeUpdates,
boolean saveGraph) {
final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext()); final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
//Load parque file with preprocessed association datasource - country // Load parque file with preprocessed association datasource - country
Dataset<DatasourceCountry> datasourcecountryassoc = readAssocDatasourceCountry(spark, datasourcecountrypath); Dataset<DatasourceCountry> datasourcecountryassoc =
//broadcasting the result of the preparation step readAssocDatasourceCountry(spark, datasourcecountrypath);
Broadcast<Dataset<DatasourceCountry>> broadcast_datasourcecountryassoc = sc.broadcast(datasourcecountryassoc); // broadcasting the result of the preparation step
Broadcast<Dataset<DatasourceCountry>> broadcast_datasourcecountryassoc =
sc.broadcast(datasourcecountryassoc);
Dataset<ResultCountrySet> potentialUpdates = getPotentialResultToUpdate(spark, inputPath, resultClazz, Dataset<ResultCountrySet> potentialUpdates =
broadcast_datasourcecountryassoc).as(Encoders.bean(ResultCountrySet.class)); getPotentialResultToUpdate(
spark, inputPath, resultClazz, broadcast_datasourcecountryassoc)
.as(Encoders.bean(ResultCountrySet.class));
if(writeUpdates){ if (writeUpdates) {
writeUpdates(potentialUpdates, outputPath + "/update_" + resultType); writeUpdates(potentialUpdates, outputPath + "/update_" + resultType);
} }
if(saveGraph){ if (saveGraph) {
updateResultTable(spark, potentialUpdates, inputPath, resultClazz, outputPath); updateResultTable(spark, potentialUpdates, inputPath, resultClazz, outputPath);
} }
} }
private static <R extends Result> void updateResultTable(SparkSession spark, Dataset<ResultCountrySet> potentialUpdates, private static <R extends Result> void updateResultTable(
String inputPath, SparkSession spark,
Class<R> resultClazz, Dataset<ResultCountrySet> potentialUpdates,
String outputPath) { String inputPath,
Class<R> resultClazz,
String outputPath) {
log.info("Reading Graph table from: {}", inputPath); log.info("Reading Graph table from: {}", inputPath);
Dataset<R> result = readPathEntity(spark, inputPath, resultClazz); Dataset<R> result = readPathEntity(spark, inputPath, resultClazz);
Dataset<Tuple2<String, R>> result_pair = result Dataset<Tuple2<String, R>> result_pair =
.map(r -> new Tuple2<>(r.getId(), r), result.map(
Encoders.tuple(Encoders.STRING(), Encoders.bean(resultClazz))); r -> new Tuple2<>(r.getId(), r),
Encoders.tuple(Encoders.STRING(), Encoders.bean(resultClazz)));
// Dataset<Tuple2<String, ResultCountrySet>> potential_update_pair = potentialUpdates.map(pu -> new Tuple2<>(pu.getResultId(), // Dataset<Tuple2<String, ResultCountrySet>> potential_update_pair =
// pu), // potentialUpdates.map(pu -> new Tuple2<>(pu.getResultId(),
// Encoders.tuple(Encoders.STRING(), Encoders.bean(ResultCountrySet.class))); // pu),
// Encoders.tuple(Encoders.STRING(), Encoders.bean(ResultCountrySet.class)));
Dataset<R> new_table = result_pair Dataset<R> new_table =
.joinWith(potentialUpdates, result_pair.col("_1").equalTo(potentialUpdates.col("resultId")), "left_outer") result_pair
.map((MapFunction<Tuple2<Tuple2<String, R>, ResultCountrySet>, R>) value -> { .joinWith(
R r = value._1()._2(); potentialUpdates,
Optional<ResultCountrySet> potentialNewCountries = Optional.ofNullable(value._2()); result_pair.col("_1").equalTo(potentialUpdates.col("resultId")),
if (potentialNewCountries.isPresent()) { "left_outer")
HashSet<String> countries = new HashSet<>(); .map(
for (Qualifier country : r.getCountry()) { (MapFunction<Tuple2<Tuple2<String, R>, ResultCountrySet>, R>)
countries.add(country.getClassid()); value -> {
} R r = value._1()._2();
Result res = new Result(); Optional<ResultCountrySet> potentialNewCountries =
res.setId(r.getId()); Optional.ofNullable(value._2());
List<Country> countryList = new ArrayList<>(); if (potentialNewCountries.isPresent()) {
for (CountrySbs country : potentialNewCountries.get().getCountrySet()) { HashSet<String> countries = new HashSet<>();
if (!countries.contains(country.getClassid())) { for (Qualifier country : r.getCountry()) {
countryList.add(getCountry(country.getClassid(), country.getClassname())); countries.add(country.getClassid());
} }
} Result res = new Result();
res.setCountry(countryList); res.setId(r.getId());
r.mergeFrom(res); List<Country> countryList = new ArrayList<>();
} for (CountrySbs country :
return r; potentialNewCountries
}, Encoders.bean(resultClazz)); .get()
.getCountrySet()) {
if (!countries.contains(country.getClassid())) {
countryList.add(
getCountry(
country.getClassid(),
country.getClassname()));
}
}
res.setCountry(countryList);
r.mergeFrom(res);
}
return r;
},
Encoders.bean(resultClazz));
log.info("Saving graph table to path: {}", outputPath); log.info("Saving graph table to path: {}", outputPath);
//log.info("number of saved recordsa: {}", new_table.count()); // log.info("number of saved recordsa: {}", new_table.count());
new_table new_table.toJSON().write().option("compression", "gzip").text(outputPath);
.toJSON() // .toJavaRDD()
.write() // .map(r -> OBJECT_MAPPER.writeValueAsString(r))
.option("compression", "gzip") // .saveAsTextFile(outputPath , GzipCodec.class);
.text(outputPath);
// .toJavaRDD()
// .map(r -> OBJECT_MAPPER.writeValueAsString(r))
// .saveAsTextFile(outputPath , GzipCodec.class);
} }
private static <R extends Result> Dataset<Row> getPotentialResultToUpdate(
SparkSession spark,
private static <R extends Result> Dataset<Row> getPotentialResultToUpdate(SparkSession spark, String inputPath, String inputPath,
Class<R> resultClazz, Class<R> resultClazz,
Broadcast<Dataset<DatasourceCountry>> broadcast_datasourcecountryassoc) { Broadcast<Dataset<DatasourceCountry>> broadcast_datasourcecountryassoc) {
Dataset<R> result = readPathEntity(spark, inputPath, resultClazz); Dataset<R> result = readPathEntity(spark, inputPath, resultClazz);
result.createOrReplaceTempView("result"); result.createOrReplaceTempView("result");
//log.info("number of results: {}", result.count()); // log.info("number of results: {}", result.count());
createCfHbforresult(spark); createCfHbforresult(spark);
return countryPropagationAssoc(spark, broadcast_datasourcecountryassoc); return countryPropagationAssoc(spark, broadcast_datasourcecountryassoc);
} }
// private static void createCfHbforresult(SparkSession spark) {
// String query;
// query = "SELECT id, inst.collectedfrom.key cf , inst.hostedby.key hb " +
// "FROM ( SELECT id, instance " +
// "FROM result " +
// " WHERE datainfo.deletedbyinference = false) ds " +
// "LATERAL VIEW EXPLODE(instance) i AS inst";
// Dataset<Row> cfhb = spark.sql(query);
// cfhb.createOrReplaceTempView("cfhb");
// //log.info("cfhb_number : {}", cfhb.count());
// }
// private static void createCfHbforresult(SparkSession spark) { private static Dataset<Row> countryPropagationAssoc(
// String query; SparkSession spark,
// query = "SELECT id, inst.collectedfrom.key cf , inst.hostedby.key hb " + Broadcast<Dataset<DatasourceCountry>> broadcast_datasourcecountryassoc) {
// "FROM ( SELECT id, instance " +
// "FROM result " +
// " WHERE datainfo.deletedbyinference = false) ds " +
// "LATERAL VIEW EXPLODE(instance) i AS inst";
// Dataset<Row> cfhb = spark.sql(query);
// cfhb.createOrReplaceTempView("cfhb");
// //log.info("cfhb_number : {}", cfhb.count());
// }
private static Dataset<Row> countryPropagationAssoc(SparkSession spark,
Broadcast<Dataset<DatasourceCountry>> broadcast_datasourcecountryassoc){
Dataset<DatasourceCountry> datasource_country = broadcast_datasourcecountryassoc.value(); Dataset<DatasourceCountry> datasource_country = broadcast_datasourcecountryassoc.value();
datasource_country.createOrReplaceTempView("datasource_country"); datasource_country.createOrReplaceTempView("datasource_country");
log.info("datasource_country number : {}",datasource_country.count()); log.info("datasource_country number : {}", datasource_country.count());
String query = "SELECT id resultId, collect_set(country) countrySet "+ String query =
"FROM ( SELECT id, country " + "SELECT id resultId, collect_set(country) countrySet "
"FROM datasource_country " + + "FROM ( SELECT id, country "
"JOIN cfhb " + + "FROM datasource_country "
" ON cf = dataSourceId " + + "JOIN cfhb "
"UNION ALL " + + " ON cf = dataSourceId "
"SELECT id , country " + + "UNION ALL "
"FROM datasource_country " + + "SELECT id , country "
"JOIN cfhb " + + "FROM datasource_country "
" ON hb = dataSourceId ) tmp " + + "JOIN cfhb "
"GROUP BY id"; + " ON hb = dataSourceId ) tmp "
+ "GROUP BY id";
Dataset<Row> potentialUpdates = spark.sql(query); Dataset<Row> potentialUpdates = spark.sql(query);
//log.info("potential update number : {}", potentialUpdates.count()); // log.info("potential update number : {}", potentialUpdates.count());
return potentialUpdates; return potentialUpdates;
} }
private static Dataset<DatasourceCountry> readAssocDatasourceCountry(
SparkSession spark, String relationPath) {
private static Dataset<DatasourceCountry> readAssocDatasourceCountry(SparkSession spark, String relationPath) { return spark.read()
return spark .textFile(relationPath)
.read() .map(
.textFile(relationPath) value -> OBJECT_MAPPER.readValue(value, DatasourceCountry.class),
.map(value -> OBJECT_MAPPER.readValue(value, DatasourceCountry.class), Encoders.bean(DatasourceCountry.class)); Encoders.bean(DatasourceCountry.class));
} }
private static void writeUpdates(Dataset<ResultCountrySet> potentialUpdates, String outputPath){ private static void writeUpdates(
Dataset<ResultCountrySet> potentialUpdates, String outputPath) {
potentialUpdates potentialUpdates
.toJSON() .toJSON()
.write() .write()
.mode(SaveMode.Overwrite) .mode(SaveMode.Overwrite)
.option("compression", "gzip") .option("compression", "gzip")
.text(outputPath); .text(outputPath);
// map(u -> OBJECT_MAPPER.writeValueAsString(u)) // map(u -> OBJECT_MAPPER.writeValueAsString(u))
// .saveAsTextFile(outputPath, GzipCodec.class); // .saveAsTextFile(outputPath, GzipCodec.class);
} }
} }