pom fixing and dependencies cleaning
This commit is contained in:
parent
3fd4350213
commit
6202c3e108
|
@ -1,2 +1,2 @@
|
|||
# Tue Nov 05 10:06:34 CET 2024
|
||||
# Wed Nov 20 10:38:53 CET 2024
|
||||
projectPropertyKey=projectPropertyValue
|
||||
|
|
|
@ -13,68 +13,46 @@
|
|||
<artifactId>dhp-raid</artifactId>
|
||||
<version>1.0.0-SNAPSHOT</version>
|
||||
|
||||
<properties>
|
||||
<java.version>1.8</java.version>
|
||||
<scala.binary.version>2.11</scala.binary.version>
|
||||
<scala.version>2.11.0</scala.version>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
||||
<scalatest.version>2.2.6</scalatest.version>
|
||||
<encoding>UTF-8</encoding>
|
||||
<hadoop-version>2.7.3</hadoop-version>
|
||||
<spark.version>2.3.2</spark.version>
|
||||
<redis.version>2.8.1</redis.version>
|
||||
<dhp-schemas.version>8.0.1</dhp-schemas.version>
|
||||
|
||||
</properties>
|
||||
|
||||
<dependencies>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-core_${scala.binary.version}</artifactId>
|
||||
<version>${spark.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-sql_${scala.binary.version}</artifactId>
|
||||
<version>${spark.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-mllib_${scala.binary.version}</artifactId>
|
||||
<version>${spark.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.scala-lang</groupId>
|
||||
<artifactId>scala-library</artifactId>
|
||||
<version>${scala.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
<version>RELEASE</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.jayway.jsonpath</groupId>
|
||||
<artifactId>json-path</artifactId>
|
||||
<version>2.4.0</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.json4s</groupId>
|
||||
<artifactId>json4s-jackson_2.11</artifactId>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>eu.dnetlib.dhp</groupId>
|
||||
<artifactId>dhp-schemas</artifactId>
|
||||
<version>${dhp-schemas.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
|
|
@ -127,7 +127,8 @@ public class SparkCreateDocuments extends AbstractSparkJob{
|
|||
|
||||
Dataset<Row> documents = nodeDS
|
||||
.join(labels, nodeDS.col(LONG_ID_COL).equalTo(labels.col(LONG_ID_COL)), "left")
|
||||
.select(col(STRING_ID_COL), col(LABELS_COL));
|
||||
.select(col(STRING_ID_COL), col(LABELS_COL))
|
||||
.repartition(numPartitions);
|
||||
|
||||
log.info("Labels generated: {}", documents.count());
|
||||
|
||||
|
|
|
@ -1,39 +1,23 @@
|
|||
package eu.dnetlib.raid.jobs;
|
||||
|
||||
import com.jayway.jsonpath.JsonPath;
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
import eu.dnetlib.raid.graph.GraphUtil;
|
||||
import eu.dnetlib.raid.support.ArgumentApplicationParser;
|
||||
import eu.dnetlib.raid.support.EdgeParam;
|
||||
import eu.dnetlib.raid.support.RAiDConfig;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
import org.apache.spark.graphx.Edge;
|
||||
import org.apache.spark.ml.feature.Normalizer;
|
||||
import org.apache.spark.ml.feature.Word2Vec;
|
||||
import org.apache.spark.rdd.RDD;
|
||||
import org.apache.spark.sql.*;
|
||||
import org.apache.spark.sql.catalyst.encoders.RowEncoder;
|
||||
import org.apache.spark.sql.types.DataTypes;
|
||||
import org.apache.spark.sql.types.StructField;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import scala.Tuple2;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.apache.spark.sql.functions.*;
|
||||
|
||||
import static org.apache.spark.sql.functions.col;
|
||||
import static org.apache.spark.sql.functions.size;
|
||||
|
||||
public class SparkCreateEmbeddings extends AbstractSparkJob{
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(SparkCreateEmbeddings.class);
|
||||
private static final String ID_PATH = "$.id";
|
||||
private static final String DELETEDBYINFERENCE_PATH = "$.dataInfo.deletedbyinference";
|
||||
private static final Encoder<Relation> REL_BEAN_ENC = Encoders.bean(Relation.class);
|
||||
|
||||
public SparkCreateEmbeddings(ArgumentApplicationParser parser, SparkSession spark) {
|
||||
super(parser, spark);
|
||||
|
@ -47,7 +31,9 @@ public class SparkCreateEmbeddings extends AbstractSparkJob{
|
|||
|
||||
parser.parseArgument(args);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
SparkConf conf = new SparkConf()
|
||||
.set("spark.kryoserializer.buffer", "64k")
|
||||
.set("spark.kryoserializer.buffer.max", "512m");
|
||||
|
||||
new SparkCreateEmbeddings(
|
||||
parser,
|
||||
|
@ -77,7 +63,7 @@ public class SparkCreateEmbeddings extends AbstractSparkJob{
|
|||
JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
|
||||
RAiDConfig config = loadRAiDConfig(raidConfPath);
|
||||
|
||||
Dataset<Row> documents = spark.read().load(documentsPath);
|
||||
Dataset<Row> documents = spark.read().load(documentsPath).filter(size(col(LABELS_COL)).gt(0)).filter(col(LABELS_COL).isNotNull());
|
||||
|
||||
Dataset<Row> word2vecEmbeddings = new Word2Vec()
|
||||
.setInputCol(LABELS_COL)
|
||||
|
|
|
@ -1,213 +0,0 @@
|
|||
package eu.dnetlib.raid.jobs;
|
||||
|
||||
import com.jayway.jsonpath.JsonPath;
|
||||
import eu.dnetlib.dhp.schema.oaf.Relation;
|
||||
import eu.dnetlib.raid.graph.GraphUtil;
|
||||
import eu.dnetlib.raid.support.ArgumentApplicationParser;
|
||||
import eu.dnetlib.raid.support.EdgeParam;
|
||||
import eu.dnetlib.raid.support.RAiDConfig;
|
||||
import eu.dnetlib.raid.support.RandomWalkParam;
|
||||
import eu.dnetlib.raid.walker.RandomWalk;
|
||||
import org.apache.spark.SparkConf;
|
||||
import org.apache.spark.api.java.JavaRDD;
|
||||
import org.apache.spark.api.java.JavaSparkContext;
|
||||
import org.apache.spark.ml.feature.Normalizer;
|
||||
import org.apache.spark.ml.feature.Word2Vec;
|
||||
import org.apache.spark.ml.feature.Word2VecModel;
|
||||
|
||||
import org.apache.spark.rdd.RDD;
|
||||
import org.apache.spark.sql.*;
|
||||
import org.apache.spark.sql.catalyst.encoders.RowEncoder;
|
||||
import org.apache.spark.sql.types.DataTypes;
|
||||
import org.apache.spark.sql.types.Metadata;
|
||||
import org.apache.spark.sql.types.StructField;
|
||||
import org.apache.spark.sql.types.StructType;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
import static org.apache.spark.sql.functions.col;
|
||||
import static org.apache.spark.sql.functions.lit;
|
||||
|
||||
public class SparkCreateEmbeddingsW2V extends AbstractSparkJob{
|
||||
|
||||
private static final Logger log = LoggerFactory.getLogger(SparkCreateEmbeddingsW2V.class);
|
||||
|
||||
private static final String ID_PATH = "$.id";
|
||||
private static final String DELETEDBYINFERENCE_PATH = "$.dataInfo.deletedbyinference";
|
||||
private static final Encoder<Relation> REL_BEAN_ENC = Encoders.bean(Relation.class);
|
||||
|
||||
public SparkCreateEmbeddingsW2V(ArgumentApplicationParser parser, SparkSession spark) {
|
||||
super(parser, spark);
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
||||
ArgumentApplicationParser parser = new ArgumentApplicationParser(
|
||||
readResource("/jobs/parameters/createEmbeddings_parameters.json", SparkCreateEmbeddingsW2V.class)
|
||||
);
|
||||
|
||||
parser.parseArgument(args);
|
||||
|
||||
SparkConf conf = new SparkConf();
|
||||
|
||||
new SparkCreateEmbeddingsW2V(
|
||||
parser,
|
||||
getSparkSession(conf)
|
||||
).run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() throws Exception {
|
||||
|
||||
// read oozie parameters
|
||||
final String graphBasePath = parser.get("graphBasePath");
|
||||
final String workingPath = parser.get("workingPath");
|
||||
final String outputPath = parser.get("outputPath");
|
||||
final String raidConfPath = parser.get("raidConfPath");
|
||||
final int numPartitions = Optional
|
||||
.ofNullable(parser.get("numPartitions"))
|
||||
.map(Integer::valueOf)
|
||||
.orElse(NUM_PARTITIONS);
|
||||
|
||||
log.info("graphBasePath: '{}'", graphBasePath);
|
||||
log.info("workingPath: '{}'", workingPath);
|
||||
log.info("outputPath: '{}'", outputPath);
|
||||
log.info("raidConfPath: '{}'", raidConfPath);
|
||||
log.info("numPartitions: '{}'", numPartitions);
|
||||
|
||||
JavaSparkContext sc = JavaSparkContext.fromSparkContext(spark.sparkContext());
|
||||
RAiDConfig config = loadRAiDConfig(raidConfPath);
|
||||
|
||||
// create nodes
|
||||
JavaRDD<String> nodes = sc.emptyRDD();
|
||||
for (String nodeType: config.getNodes()) {
|
||||
nodes = nodes.union(
|
||||
sc.textFile(graphBasePath + "/" + nodeType)
|
||||
.filter(json -> !((boolean) JsonPath.read(json, DELETEDBYINFERENCE_PATH)))
|
||||
.map(json -> JsonPath.read(json, ID_PATH)));
|
||||
}
|
||||
RDD<Row> nodeRDD = nodes.zipWithIndex().map(n -> RowFactory.create(n._1(), n._2())).rdd();
|
||||
|
||||
Dataset<Row> nodeDS = spark.createDataset(nodeRDD, RowEncoder.apply(
|
||||
new StructType(
|
||||
new StructField[]{
|
||||
new StructField(STRING_ID_COL, DataTypes.StringType, false, Metadata.empty()),
|
||||
new StructField(LONG_ID_COL, DataTypes.LongType, false, Metadata.empty())
|
||||
})
|
||||
));
|
||||
log.info("Number of nodes: {}", nodeDS.count());
|
||||
|
||||
Dataset<Row> relations = spark
|
||||
.read()
|
||||
.schema(REL_BEAN_ENC.schema())
|
||||
.json(graphBasePath + "/relation");
|
||||
log.info("Number of relations: {}", relations.count());
|
||||
|
||||
// create random walks
|
||||
Dataset<Row> randomWalks = spark.createDataFrame(
|
||||
new ArrayList<>(), new StructType(
|
||||
new StructField[]{
|
||||
new StructField(RANDOM_WALK_COL, DataTypes.createArrayType(DataTypes.IntegerType), false, Metadata.empty())
|
||||
}));
|
||||
for(EdgeParam edgeParam: config.getEdges()) {
|
||||
|
||||
log.info("Creating '{}' edges with '{}' metapath", edgeParam.getName(), edgeParam.getMetapath());
|
||||
Dataset<Row> edges = createEdges(nodeDS, relations, edgeParam.getMetapath());
|
||||
log.info("Number of '{}' edges: {}", edgeParam.getName(), edges.count());
|
||||
|
||||
|
||||
RandomWalkParam randomWalkParam = config.getRandomWalks().get(edgeParam.getName());
|
||||
|
||||
Dataset<Row> randomWalk = new RandomWalk()
|
||||
.setNumWalk(randomWalkParam.getNumWalks())
|
||||
.setWalkLength(randomWalkParam.getWalkLength())
|
||||
.setQ(randomWalkParam.getQ())
|
||||
.setP(randomWalkParam.getP())
|
||||
.setOutputCol(RANDOM_WALK_COL)
|
||||
.randomWalk(edges);
|
||||
log.info("Number of random walks for '{}' edges: {}", edgeParam.getName(), randomWalk.count());
|
||||
|
||||
randomWalks = randomWalks.union(randomWalk);
|
||||
|
||||
}
|
||||
|
||||
randomWalks.cache();
|
||||
|
||||
log.info("Creating embeddings");
|
||||
Word2VecModel word2VecModel = new Word2Vec()
|
||||
.setMaxSentenceLength(config.getParams().getOrDefault("maxWalkLength", 100).intValue())
|
||||
.setMinCount(config.getParams().getOrDefault("minCount", 2).intValue())
|
||||
.setWindowSize(config.getParams().getOrDefault("windowSize", 5).intValue())
|
||||
.setVectorSize(config.getParams().getOrDefault("embeddingSize", 128).intValue())
|
||||
.setMaxIter(config.getParams().getOrDefault("maxIter", 20).intValue())
|
||||
.setStepSize(config.getParams().getOrDefault("learningRate", 0.1).doubleValue())
|
||||
.setSeed(SEED)
|
||||
.setNumPartitions(numPartitions)
|
||||
.setInputCol(RANDOM_WALK_COL)
|
||||
.setOutputCol(EMBEDDING_COL)
|
||||
.fit(randomWalks);
|
||||
|
||||
Dataset<Row> embeddings = word2VecModel
|
||||
.getVectors()
|
||||
.toDF(LONG_ID_COL, EMBEDDING_COL);
|
||||
|
||||
Normalizer normalizer = new Normalizer()
|
||||
.setInputCol(EMBEDDING_COL)
|
||||
.setOutputCol("normalized_" + EMBEDDING_COL)
|
||||
.setP(2.0);
|
||||
|
||||
embeddings = normalizer
|
||||
.transform(embeddings)
|
||||
.select(col(LONG_ID_COL), col("normalized_" + EMBEDDING_COL).as(EMBEDDING_COL));
|
||||
|
||||
Dataset<Row> result = nodeDS
|
||||
.join(embeddings, nodeDS.col(LONG_ID_COL).equalTo(embeddings.col(LONG_ID_COL)))
|
||||
.select(col(STRING_ID_COL), col(EMBEDDING_COL));
|
||||
|
||||
log.info("Number of generated embeddings: {}", result.count());
|
||||
|
||||
result.write().save(outputPath);
|
||||
|
||||
}
|
||||
|
||||
public Dataset<Row> createEdges(Dataset<Row> nodeDS, Dataset<Row> relations, List<String> metapath) throws Exception {
|
||||
|
||||
Dataset<Row> edges;
|
||||
switch(metapath.size()) {
|
||||
case 1:
|
||||
edges = relations
|
||||
.where(col("relClass").equalTo(metapath.get(0)))
|
||||
.select(col("source").as("src"), col("target").as("dst"));
|
||||
break;
|
||||
case 2:
|
||||
Dataset<Row> edges_1 = relations
|
||||
.where(col("relClass").equalTo(metapath.get(0)))
|
||||
.select(col("source").as("source_1"), col("target").as("target_1"));
|
||||
Dataset<Row> edges_2 = relations
|
||||
.where(col("relClass").equalTo(metapath.get(1)))
|
||||
.select(col("source").as("source_2"), col("target").as("target_2"));
|
||||
|
||||
edges = edges_1
|
||||
.join(edges_2, edges_1.col("target_1").equalTo(edges_2.col("source_2")))
|
||||
.select(col("source_1").as("src"), col("target_2").as("dst"));
|
||||
break;
|
||||
default:
|
||||
throw new Exception("Metapath size not allowed");
|
||||
}
|
||||
|
||||
// join with nodes to get longs instead of string ids
|
||||
edges = edges
|
||||
.join(nodeDS, edges.col("src").equalTo(nodeDS.col(STRING_ID_COL)))
|
||||
.select(col(LONG_ID_COL).as("src"), col("dst"))
|
||||
.join(nodeDS, edges.col("dst").equalTo(nodeDS.col(STRING_ID_COL)))
|
||||
.select(col("src"), col(LONG_ID_COL).as("dst"))
|
||||
.withColumn("weight", lit(1.0));
|
||||
|
||||
return GraphUtil.preProcess(edges, "src", "dst", "weight");
|
||||
}
|
||||
|
||||
}
|
|
@ -83,7 +83,7 @@
|
|||
</configuration>
|
||||
</global>
|
||||
|
||||
<start to="CreateDocuments"/>
|
||||
<start to="CreateClusters"/>
|
||||
|
||||
<kill name="Kill">
|
||||
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
|
||||
|
|
140
pom.xml
140
pom.xml
|
@ -233,12 +233,19 @@
|
|||
|
||||
<google.gson.version>2.2.2</google.gson.version>
|
||||
<google.guava.version>15.0</google.guava.version>
|
||||
|
||||
<spark.version>2.2.0</spark.version>
|
||||
<!--<jackson.version>2.9.6</jackson.version>-->
|
||||
<jackson.version>2.6.5</jackson.version>
|
||||
<spark.version>2.4.0.cloudera2</spark.version>
|
||||
<jackson.version>2.14.2</jackson.version>
|
||||
<mockito-core.version>3.3.3</mockito-core.version>
|
||||
<dhp-schemas.version>[8.0.1]</dhp-schemas.version>
|
||||
<json4s.version>3.5.3</json4s.version>
|
||||
|
||||
<java.version>1.8</java.version>
|
||||
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
|
||||
<project.reporting.outputEncoding>UTF-8</project.reporting.outputEncoding>
|
||||
<scalatest.version>2.2.6</scalatest.version>
|
||||
<encoding>UTF-8</encoding>
|
||||
<hadoop-version>2.7.3</hadoop-version>
|
||||
<redis.version>2.8.1</redis.version>
|
||||
|
||||
<commons.lang.version>3.5</commons.lang.version>
|
||||
<commons.io.version>2.4</commons.io.version>
|
||||
|
@ -246,7 +253,8 @@
|
|||
<commons.logging.version>1.1.3</commons.logging.version>
|
||||
|
||||
<junit.version>4.9</junit.version>
|
||||
<scala.version>2.11.8</scala.version>
|
||||
<scala.binary.version>2.11</scala.binary.version>
|
||||
<scala.version>2.11.12</scala.version>
|
||||
|
||||
<maven.javadoc.failOnError>false</maven.javadoc.failOnError>
|
||||
<maven.compiler.plugin.version>3.6.0</maven.compiler.plugin.version>
|
||||
|
@ -275,119 +283,31 @@
|
|||
|
||||
<dependencyManagement>
|
||||
<dependencies>
|
||||
<dependency>
|
||||
<groupId>edu.cmu</groupId>
|
||||
<artifactId>secondstring</artifactId>
|
||||
<version>1.0.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.antlr</groupId>
|
||||
<artifactId>stringtemplate</artifactId>
|
||||
<version>3.2</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-databind</artifactId>
|
||||
<version>${jackson.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.dataformat</groupId>
|
||||
<artifactId>jackson-dataformat-xml</artifactId>
|
||||
<version>${jackson.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.module</groupId>
|
||||
<artifactId>jackson-module-jsonSchema</artifactId>
|
||||
<version>${jackson.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-core</artifactId>
|
||||
<version>${jackson.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.fasterxml.jackson.core</groupId>
|
||||
<artifactId>jackson-annotations</artifactId>
|
||||
<version>${jackson.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.mockito</groupId>
|
||||
<artifactId>mockito-core</artifactId>
|
||||
<version>${mockito-core.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.mockito</groupId>
|
||||
<artifactId>mockito-junit-jupiter</artifactId>
|
||||
<version>${mockito-core.version}</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-math3</artifactId>
|
||||
<version>3.6.1</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
<version>${google.guava.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.code.gson</groupId>
|
||||
<artifactId>gson</artifactId>
|
||||
<version>${google.gson.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.commons</groupId>
|
||||
<artifactId>commons-lang3</artifactId>
|
||||
<version>${commons.lang.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>commons-io</groupId>
|
||||
<artifactId>commons-io</artifactId>
|
||||
<version>${commons.io.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-collections</groupId>
|
||||
<artifactId>commons-collections</artifactId>
|
||||
<version>${commons.collections.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-logging</groupId>
|
||||
<artifactId>commons-logging</artifactId>
|
||||
<version>${commons.logging.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-core_2.11</artifactId>
|
||||
<artifactId>spark-core_${scala.binary.version}</artifactId>
|
||||
<version>${spark.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-graphx_2.11</artifactId>
|
||||
<artifactId>spark-graphx_${scala.binary.version}</artifactId>
|
||||
<version>${spark.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-sql_2.11</artifactId>
|
||||
<artifactId>spark-sql_${scala.binary.version}</artifactId>
|
||||
<version>${spark.version}</version>
|
||||
<scope>provided</scope>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.apache.spark</groupId>
|
||||
<artifactId>spark-mllib_${scala.binary.version}</artifactId>
|
||||
<version>${spark.version}</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.junit.jupiter</groupId>
|
||||
<artifactId>junit-jupiter</artifactId>
|
||||
|
@ -409,13 +329,7 @@
|
|||
<dependency>
|
||||
<groupId>com.jayway.jsonpath</groupId>
|
||||
<artifactId>json-path</artifactId>
|
||||
<version>2.4.0</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>com.ibm.icu</groupId>
|
||||
<artifactId>icu4j</artifactId>
|
||||
<version>70.1</version>
|
||||
<version>2.9.0</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
|
@ -433,7 +347,13 @@
|
|||
<dependency>
|
||||
<groupId>com.github.haifengl</groupId>
|
||||
<artifactId>smile-core</artifactId>
|
||||
<version>2.5.3</version> <!-- usa la versione più recente disponibile -->
|
||||
<version>2.5.3</version>
|
||||
</dependency>
|
||||
|
||||
<dependency>
|
||||
<groupId>org.json4s</groupId>
|
||||
<artifactId>json4s-jackson_2.11</artifactId>
|
||||
<version>${json4s.version}</version>
|
||||
</dependency>
|
||||
|
||||
</dependencies>
|
||||
|
|
Loading…
Reference in New Issue