dnet-and/dnet-and-test/src/main/java/eu/dnetlib/jobs/SparkLDATuning.java

91 lines
3.3 KiB
Java

package eu.dnetlib.jobs;
import eu.dnetlib.featureextraction.FeatureTransformer;
import eu.dnetlib.featureextraction.util.Utilities;
import eu.dnetlib.support.ArgumentApplicationParser;
import org.apache.spark.SparkConf;
import org.apache.spark.ml.clustering.LDAModel;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.Tuple2;
import java.io.IOException;
import java.util.*;
import java.util.stream.Stream;
public class SparkLDATuning extends AbstractSparkJob{
private static final Logger log = LoggerFactory.getLogger(SparkLDATuning.class);
public SparkLDATuning(ArgumentApplicationParser parser, SparkSession spark) {
super(parser, spark);
}
public static void main(String[] args) throws Exception {
ArgumentApplicationParser parser = new ArgumentApplicationParser(
readResource("/jobs/parameters/ldaTuning_parameters.json", SparkTokenizer.class)
);
parser.parseArgument(args);
SparkConf conf = new SparkConf();
new SparkLDATuning(
parser,
getSparkSession(conf)
).run();
}
@Override
public void run() throws IOException {
// read oozie parameters
final String workingPath = parser.get("workingPath");
final int maxIterations = Integer.parseInt(parser.get("maxIterations"));
final double trainRatio = Double.parseDouble(parser.get("trainRatio"));
int[] numTopics = Arrays.stream(parser.get("numTopics").split(",")).mapToInt(s -> Integer.parseInt(s)).toArray();
final String outputModelPath = parser.get("outputModelPath");
final int numPartitions = Optional
.ofNullable(parser.get("numPartitions"))
.map(Integer::valueOf)
.orElse(NUM_PARTITIONS);
log.info("workingPath: '{}'", workingPath);
log.info("numPartitions: '{}'", numPartitions);
log.info("maxIterations: '{}'", maxIterations);
log.info("numTopics: '{}'", numTopics.toString());
log.info("trainRatio: '{}'", trainRatio);
log.info("outputModelPath: '{}'", outputModelPath);
Dataset<Row> inputFeaturesDS = spark.read().load(workingPath + "/countVectorized");
Map<Integer, Tuple2<LDAModel, Double>> ldaModels =
FeatureTransformer.ldaTuning(inputFeaturesDS, trainRatio, numTopics, maxIterations);
double bestPerplexity = 100L;
LDAModel bestModel = null;
List<String> stats = new ArrayList<>();
stats.add("k,perplexity,path");
for(Integer k: ldaModels.keySet()) {
//save LDAModel
ldaModels.get(k)._1().write().overwrite().save(workingPath + "/lda_model_k" + k);
//prepare line
stats.add(k + "," + ldaModels.get(k)._2() + "," + workingPath + "/lda_model_k" + k);
//pick the best model
bestModel = (ldaModels.get(k)._2() <= bestPerplexity)? ldaModels.get(k)._1() : bestModel;
bestPerplexity = Math.min(ldaModels.get(k)._2(), bestPerplexity);
}
bestModel.write().overwrite().save(outputModelPath);
Utilities.writeLinesToHDFSFile(stats, workingPath + "/perplexity_stats.csv");
}
}