dnet-hadoop/dhp-workflows/dhp-bmuse/src/main/java/eu/dnetlib/dhp/bmuse/bioschema/SparkScraper.java

115 lines
3.7 KiB
Java

package eu.dnetlib.dhp.bmuse.bioschema;
import static eu.dnetlib.dhp.bmuse.utils.SparkSessionSupport.runWithSparkSession;
import java.util.Objects;
import java.util.Optional;
import java.util.function.Function;
import java.util.stream.Stream;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SparkSession;
import org.apache.spark.util.LongAccumulator;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import eu.dnetlib.dhp.bmuse.utils.ArgumentApplicationParser;
import eu.dnetlib.dhp.bmuse.utils.BMUSEScraper;
import eu.dnetlib.dhp.bmuse.utils.FunctionalInterfaceSupport;
import eu.dnetlib.dhp.bmuse.utils.UrlParser;
public class SparkScraper {
static Logger logger = LoggerFactory.getLogger(SparkScraper.class);
public static void main(String[] args) throws Exception {
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
IOUtils
.toString(
SparkScraper.class
.getResourceAsStream(
"/eu/dnetlib/dhp/bmuse/bioschema/generate_dataset.json")));
parser.parseArgument(args);
Boolean isSparkSessionManaged = Optional
.ofNullable(parser.get("isSparkSessionManaged"))
.map(Boolean::valueOf)
.orElse(Boolean.TRUE);
final String nameNode = parser.get("nameNode");
final String workingPath = parser.get("workingPath");
final String rdfOutput = parser.get("rdfOutput");
final String sitemapUrl = parser.get("sitemapUrl");
final String sitemapURLKey = parser.get("sitemapURLKey");
final String dynamic = parser.get("dynamic");
final String maxScrapedPages = parser.get("maxScrapedPages");
Boolean dynamicValue = true;
if (Objects.nonNull(dynamic)) {
dynamicValue = Boolean.parseBoolean(dynamic);
}
final boolean scrapingType = dynamicValue.booleanValue();
SparkConf conf = new SparkConf();
runWithSparkSession(
conf,
isSparkSessionManaged,
spark -> {
final LongAccumulator scraped = spark.sparkContext().longAccumulator("scraped");
final LongAccumulator errors = spark.sparkContext().longAccumulator("errors");
JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
System.setProperty("webdriver.chrome.whitelistedIps", "");
BMUSEScraper scraper = new BMUSEScraper();
String url = sitemapUrl.toLowerCase();
Elements urls = UrlParser.getSitemapList(url, sitemapURLKey);
long total = urls.size();
Path output = new Path(
nameNode
.concat(workingPath)
.concat(rdfOutput));
try (SequenceFile.Writer writer = SequenceFile
.createWriter(
sc.hadoopConfiguration(),
SequenceFile.Writer.file(output),
SequenceFile.Writer.keyClass(Text.class),
SequenceFile.Writer.valueClass(Text.class),
SequenceFile.Writer.compression(SequenceFile.CompressionType.BLOCK, new GzipCodec()))) {
Stream<Element> urlStream = null;
if (Objects.nonNull(maxScrapedPages)) {
urlStream = urls.stream().limit(Long.parseLong(maxScrapedPages));
} else {
urlStream = urls.stream();
}
urlStream.forEach(u -> {
try {
final Text key = new Text(u.text());
final Text value = new Text(scraper.scrapeUrl(u.text(), scrapingType));
writer.append(key, value);
scraped.add(1l);
} catch (Exception e) {
logger.error(u.text(), e);
errors.add(1l);
}
});
}
logger
.info(
"Total pages to scrape: " + total + " Scraped: " + scraped.value() +
" Errors: " + errors.value());
});
}
}