[aggregator graph] handle paths including wildcards

This commit is contained in:
Claudio Atzori 2023-03-08 12:43:00 +01:00
parent 588aca5ce4
commit 7fd89566c2
3 changed files with 25 additions and 12 deletions

View File

@ -9,6 +9,7 @@ import java.util.Objects;
import java.util.Optional; import java.util.Optional;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import eu.dnetlib.dhp.oa.graph.raw.common.AbstractMigrationApplication;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.commons.lang3.StringUtils; import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
@ -36,7 +37,7 @@ import eu.dnetlib.dhp.utils.ISLookupClientFactory;
import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpService; import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpService;
import scala.Tuple2; import scala.Tuple2;
public class GenerateEntitiesApplication { public class GenerateEntitiesApplication extends AbstractMigrationApplication {
private static final Logger log = LoggerFactory.getLogger(GenerateEntitiesApplication.class); private static final Logger log = LoggerFactory.getLogger(GenerateEntitiesApplication.class);
@ -112,15 +113,12 @@ public class GenerateEntitiesApplication {
final boolean shouldHashId, final boolean shouldHashId,
final Mode mode) { final Mode mode) {
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext()); final List<String> existingSourcePaths = listEntityPaths(spark, sourcePaths);
final List<String> existingSourcePaths = Arrays
.stream(sourcePaths.split(","))
.filter(p -> HdfsSupport.exists(p, sc.hadoopConfiguration()))
.collect(Collectors.toList());
log.info("Generate entities from files:"); log.info("Generate entities from files:");
existingSourcePaths.forEach(log::info); existingSourcePaths.forEach(log::info);
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
JavaRDD<Oaf> inputRdd = sc.emptyRDD(); JavaRDD<Oaf> inputRdd = sc.emptyRDD();
for (final String sp : existingSourcePaths) { for (final String sp : existingSourcePaths) {

View File

@ -9,6 +9,7 @@ import java.util.Objects;
import java.util.Optional; import java.util.Optional;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import eu.dnetlib.dhp.oa.graph.raw.common.AbstractMigrationApplication;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.spark.SparkConf; import org.apache.spark.SparkConf;
@ -28,7 +29,7 @@ import eu.dnetlib.dhp.utils.ISLookupClientFactory;
import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpService; import eu.dnetlib.enabling.is.lookup.rmi.ISLookUpService;
import scala.Tuple2; import scala.Tuple2;
public class VerifyRecordsApplication { public class VerifyRecordsApplication extends AbstractMigrationApplication {
private static final Logger log = LoggerFactory.getLogger(VerifyRecordsApplication.class); private static final Logger log = LoggerFactory.getLogger(VerifyRecordsApplication.class);
@ -69,15 +70,13 @@ public class VerifyRecordsApplication {
private static void validateRecords(SparkSession spark, String sourcePaths, String invalidPath, private static void validateRecords(SparkSession spark, String sourcePaths, String invalidPath,
VocabularyGroup vocs) { VocabularyGroup vocs) {
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext()); final List<String> existingSourcePaths = listEntityPaths(spark, sourcePaths);
final List<String> existingSourcePaths = Arrays
.stream(sourcePaths.split(","))
.filter(p -> HdfsSupport.exists(p, sc.hadoopConfiguration()))
.collect(Collectors.toList());
log.info("Verify records in files:"); log.info("Verify records in files:");
existingSourcePaths.forEach(log::info); existingSourcePaths.forEach(log::info);
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
for (final String sp : existingSourcePaths) { for (final String sp : existingSourcePaths) {
RDD<String> invalidRecords = sc RDD<String> invalidRecords = sc
.sequenceFile(sp, Text.class, Text.class) .sequenceFile(sp, Text.class, Text.class)

View File

@ -3,9 +3,14 @@ package eu.dnetlib.dhp.oa.graph.raw.common;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import java.util.Set; import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
import eu.dnetlib.dhp.common.HdfsSupport;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -18,6 +23,8 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.schema.oaf.Oaf; import eu.dnetlib.dhp.schema.oaf.Oaf;
import eu.dnetlib.dhp.utils.DHPUtils; import eu.dnetlib.dhp.utils.DHPUtils;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SparkSession;
public class AbstractMigrationApplication implements Closeable { public class AbstractMigrationApplication implements Closeable {
@ -94,6 +101,15 @@ public class AbstractMigrationApplication implements Closeable {
} }
} }
protected static List<String> listEntityPaths(final SparkSession spark, final String paths) {
final JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
return Arrays
.stream(paths.split(","))
.filter(StringUtils::isNotBlank)
.filter(p -> HdfsSupport.exists(p, sc.hadoopConfiguration()) || p.contains("/*"))
.collect(Collectors.toList());
}
public ObjectMapper getObjectMapper() { public ObjectMapper getObjectMapper() {
return objectMapper; return objectMapper;
} }