dnet-hadoop/dhp-workflows/dhp-bmuse/src/main/java/eu/dnetlib/dhp/bmuse/bioschema/ScrapingJob.java

113 lines
3.6 KiB
Java

package eu.dnetlib.dhp.bmuse.bioschema;
import java.nio.charset.Charset;
import java.text.SimpleDateFormat;
import java.util.Arrays;
import java.util.Date;
import java.util.List;
import java.util.Objects;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Stream;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.io.compress.GzipCodec;
import org.apache.spark.util.LongAccumulator;
import org.jsoup.nodes.Element;
import org.jsoup.select.Elements;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.bmuse.utils.BMUSEScraper;
import eu.dnetlib.dhp.bmuse.utils.UrlParser;
import eu.dnetlib.dhp.utils.DHPUtils;
public class ScrapingJob {
static Logger logger = LoggerFactory.getLogger(ScrapingJob.class);
private static SimpleDateFormat formatter = new SimpleDateFormat("yyyy-MM-dd 'at' HH:mm:ss z");
public static void main(String[] args) throws Exception {
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
IOUtils
.toString(
ScrapingJob.class
.getResourceAsStream(
"/eu/dnetlib/dhp/bmuse/bioschema/generate_dataset.json")));
parser.parseArgument(args);
final String nameNode = parser.get("nameNode");
final String workingPath = parser.get("workingPath");
final String rdfOutput = parser.get("rdfOutput");
final String sitemapUrl = parser.get("sitemapUrl");
final String sitemapURLKey = parser.get("sitemapURLKey");
final String dynamic = parser.get("dynamic");
final String maxScrapedPages = parser.get("maxScrapedPages");
Boolean dynamicValue = true;
if (Objects.nonNull(dynamic)) {
dynamicValue = Boolean.parseBoolean(dynamic);
}
final boolean scrapingType = dynamicValue.booleanValue();
AtomicLong scraped = new AtomicLong(0l);
AtomicLong errors = new AtomicLong(0l);
logger
.info(
"*************************** STARTING SCRAPE: "
+ formatter.format(new Date(System.currentTimeMillis())));
logger.info("Default charset: " + Charset.defaultCharset());
BMUSEScraper scraper = new BMUSEScraper();
String url = sitemapUrl.toLowerCase();
Elements urls = UrlParser.getSitemapList(url, sitemapURLKey);
long total = urls.size();
Path output = new Path(
nameNode
.concat(workingPath)
.concat(rdfOutput));
Configuration conf = DHPUtils.getHadoopConfiguration(nameNode);
try (SequenceFile.Writer writer = SequenceFile
.createWriter(
conf,
SequenceFile.Writer.file(output),
SequenceFile.Writer.keyClass(Text.class),
SequenceFile.Writer.valueClass(Text.class),
SequenceFile.Writer.compression(SequenceFile.CompressionType.BLOCK, new GzipCodec()))) {
Stream<Element> urlStream = null;
if (Objects.nonNull(maxScrapedPages)) {
urlStream = urls.stream().limit(Long.parseLong(maxScrapedPages));
} else {
urlStream = urls.stream();
}
urlStream.forEach(u -> {
try {
final Text key = new Text(u.text());
final Text value = new Text(scraper.scrapeUrl(u.text(), scrapingType));
writer.append(key, value);
scraped.getAndIncrement();
} catch (Exception e) {
logger.error(u.text(), e);
errors.getAndIncrement();
}
});
}
logger
.info(
"*************************** ENDING SCRAPE: " + formatter.format(new Date(System.currentTimeMillis())));
logger
.info(
"Total pages to scrape: " + total + " Scraped: " + scraped.get() +
" Errors: " + errors.get());
}
}