dnet-hadoop/dhp-workflows/dhp-graph-provision/src/main/java/eu/dnetlib/dhp/sx/provision/SparkIndexCollectionOnSOLR....

103 lines
3.3 KiB
Java

package eu.dnetlib.dhp.sx.provision;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import java.io.IOException;
import java.util.Objects;
import java.util.Optional;
import org.apache.commons.cli.ParseException;
import org.apache.commons.io.IOUtils;
import org.apache.solr.common.SolrInputDocument;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.sql.Encoder;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.lucidworks.spark.util.SolrSupport;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.oa.provision.ProvisionConstants;
import eu.dnetlib.dhp.oa.provision.model.SerializableSolrInputDocument;
import eu.dnetlib.dhp.oa.provision.scholix.ScholixToSolr;
import eu.dnetlib.dhp.oa.provision.utils.ISLookupClient;
import eu.dnetlib.dhp.utils.ISLookupClientFactory;
public class SparkIndexCollectionOnSOLR {
private static final Integer DEFAULT_BATCH_SIZE = 1000;
// LOGGER initialized
private static final Logger log = LoggerFactory.getLogger(SparkIndexCollectionOnSOLR.class);
public static void main(String[] args) throws IOException, ParseException {
final ArgumentApplicationParser parser = new ArgumentApplicationParser(
IOUtils
.toString(
Objects
.requireNonNull(
SparkIndexCollectionOnSOLR.class
.getResourceAsStream("/eu/dnetlib/dhp/sx/provision/index_solr_parameters.json"))));
parser.parseArgument(args);
final String cluster = parser.get("cluster");
log.info("Cluster is {}", cluster);
final String format = parser.get("format");
log.info("Index format name is {}", format);
final String isLookupUrl = parser.get("isURL");
log.info("isURL is {}", isLookupUrl);
final String inputPath = parser.get("inputPath");
log.info("inputPath: {}", inputPath);
Boolean isSparkSessionManaged = Optional
.ofNullable(parser.get("isSparkSessionManaged"))
.map(Boolean::valueOf)
.orElse(Boolean.TRUE);
log.info("isSparkSessionManaged: {}", isSparkSessionManaged);
final Integer batchSize = Optional
.ofNullable(parser.get("batchSize"))
.map(Integer::valueOf)
.orElse(DEFAULT_BATCH_SIZE);
log.info("batchSize: {}", batchSize);
final SparkConf conf = new SparkConf();
conf.registerKryoClasses(new Class[] {
SerializableSolrInputDocument.class
});
runWithSparkSession(
conf,
isSparkSessionManaged,
spark -> {
final ISLookupClient isLookup = new ISLookupClient(ISLookupClientFactory.getLookUpService(isLookupUrl));
final String zkHost = isLookup.getZkHost();
log.info("zkHost: {}", zkHost);
final String collection = ProvisionConstants.getCollectionName(format);
log.info("collection: {}", collection);
feedScholixToSOLRIndex(spark, inputPath, collection, batchSize, zkHost);
});
}
public static void feedScholixToSOLRIndex(final SparkSession spark, final String inputPath, final String collection,
Integer batchSize, final String zkHost) {
final JavaRDD<SolrInputDocument> docs = spark
.read()
.text(inputPath)
.as(Encoders.STRING())
.map(new ScholixToSolr(), Encoders.kryo(SolrInputDocument.class))
.toJavaRDD();
SolrSupport.indexDocs(zkHost, collection, batchSize, docs.rdd());
}
}