diff --git a/dhp-workflows/dhp-bulktag/src/main/resources/eu/dnetlib/dhp/input_bulkTag_parameters.json b/dhp-workflows/dhp-bulktag/src/main/resources/eu/dnetlib/dhp/input_bulkTag_parameters.json
new file mode 100644
index 0000000000..3221924bf3
--- /dev/null
+++ b/dhp-workflows/dhp-bulktag/src/main/resources/eu/dnetlib/dhp/input_bulkTag_parameters.json
@@ -0,0 +1,27 @@
+[
+ {
+ "paramName":"is",
+ "paramLongName":"isLookupUrl",
+ "paramDescription": "URL of the isLookUp Service",
+ "paramRequired": true
+ },
+ {
+ "paramName":"mt",
+ "paramLongName":"master",
+ "paramDescription": "should be local or yarn",
+ "paramRequired": true
+ },
+ {
+ "paramName":"s",
+ "paramLongName":"sourcePath",
+ "paramDescription": "the path of the sequencial file to read",
+ "paramRequired": true
+ },
+ {
+ "paramName": "pm",
+ "paramLongName":"protoMap",
+ "paramDescription": "the json path associated to each selection field",
+ "paramRequired": true
+ }
+
+]
\ No newline at end of file
diff --git a/dhp-workflows/dhp-bulktag/src/main/resources/eu/dnetlib/dhp/oozie_app/config-default.xml b/dhp-workflows/dhp-bulktag/src/main/resources/eu/dnetlib/dhp/oozie_app/config-default.xml
new file mode 100644
index 0000000000..ea3a4d9223
--- /dev/null
+++ b/dhp-workflows/dhp-bulktag/src/main/resources/eu/dnetlib/dhp/oozie_app/config-default.xml
@@ -0,0 +1,22 @@
+
+
+ jobTracker
+ yarnRM
+
+
+ nameNode
+ hdfs://nameservice1
+
+
+ oozie.use.system.libpath
+ true
+
+
+ oozie.action.sharelib.for.spark
+ spark2
+
+
+ hive_metastore_uris
+ thrift://iis-cdh5-test-m3.ocean.icm.edu.pl:9083
+
+
\ No newline at end of file
diff --git a/dhp-workflows/dhp-bulktag/src/main/resources/eu/dnetlib/dhp/oozie_app/workflow.xml b/dhp-workflows/dhp-bulktag/src/main/resources/eu/dnetlib/dhp/oozie_app/workflow.xml
new file mode 100644
index 0000000000..1866bb0a01
--- /dev/null
+++ b/dhp-workflows/dhp-bulktag/src/main/resources/eu/dnetlib/dhp/oozie_app/workflow.xml
@@ -0,0 +1,61 @@
+
+
+
+ sourcePath
+ the source path
+
+
+ allowedsemrels
+ the semantic relationships allowed for propagation
+
+
+ sparkDriverMemory
+ memory for driver process
+
+
+ sparkExecutorMemory
+ memory for individual executor
+
+
+ sparkExecutorCores
+ number of cores used by single executor
+
+
+ isLookupUrl
+ the isLookup service endpoint
+
+
+
+
+
+
+ Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]
+
+
+
+
+ ${jobTracker}
+ ${nameNode}
+ yarn-cluster
+ cluster
+ ResultToCommunitySemRelPropagation
+ eu.dnetlib.dhp.resulttoorganizationfrominstrepo.SparkResultToOrganizationFromIstRepoJob
+ dhp-propagation-${projectVersion}.jar
+ --executor-memory ${sparkExecutorMemory}
+ --executor-cores ${sparkExecutorCores}
+ --driver-memory=${sparkDriverMemory}
+ --conf spark.extraListeners="com.cloudera.spark.lineage.NavigatorAppListener"
+ --conf spark.sql.queryExecutionListeners="com.cloudera.spark.lineage.NavigatorQueryListener"
+
+ -mt yarn-cluster
+ --sourcePath${sourcePath}
+
+ --hive_metastore_uris${hive_metastore_uris}
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/Author.java b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/Author.java
new file mode 100644
index 0000000000..18332bc8f1
--- /dev/null
+++ b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/Author.java
@@ -0,0 +1,4 @@
+package eu.dnetlib.dhp.orcidtoresultfromsemrel;
+
+public class Author {
+}
diff --git a/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/AutoritativeAuthor.java b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/AutoritativeAuthor.java
new file mode 100644
index 0000000000..7e496c7cf8
--- /dev/null
+++ b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/AutoritativeAuthor.java
@@ -0,0 +1,4 @@
+package eu.dnetlib.dhp.orcidtoresultfromsemrel;
+
+public class AutoritativeAuthor {
+}
diff --git a/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/ResultWithOrcid.java b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/ResultWithOrcid.java
new file mode 100644
index 0000000000..49fbea567f
--- /dev/null
+++ b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/ResultWithOrcid.java
@@ -0,0 +1,4 @@
+package eu.dnetlib.dhp.orcidtoresultfromsemrel;
+
+public class ResultWithOrcid {
+}
diff --git a/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/SparkOrcidToResultFromSemRelJob2.java b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/SparkOrcidToResultFromSemRelJob2.java
new file mode 100644
index 0000000000..73b8895e1d
--- /dev/null
+++ b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/orcidtoresultfromsemrel/SparkOrcidToResultFromSemRelJob2.java
@@ -0,0 +1,317 @@
+package eu.dnetlib.dhp.orcidtoresultfromsemrel;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import eu.dnetlib.dhp.TypedRow;
+import eu.dnetlib.dhp.application.ArgumentApplicationParser;
+import eu.dnetlib.dhp.schema.oaf.*;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.Text;
+import org.apache.spark.api.java.JavaPairRDD;
+import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.api.java.function.Function;
+import org.apache.spark.api.java.function.PairFunction;
+import org.apache.spark.sql.SparkSession;
+import scala.Tuple2;
+
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.List;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.Stream;
+
+import static eu.dnetlib.dhp.PropagationConstant.*;
+
+public class SparkOrcidToResultFromSemRelJob {
+ public static void main(String[] args) throws Exception {
+
+ final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkOrcidToResultFromSemRelJob.class.getResourceAsStream("/eu/dnetlib/dhp/orcidtoresultfromremrel/input_orcidtoresult_parameters.json")));
+ parser.parseArgument(args);
+ final SparkSession spark = SparkSession
+ .builder()
+ .appName(SparkOrcidToResultFromSemRelJob.class.getSimpleName())
+ .master(parser.get("master"))
+ .enableHiveSupport()
+ .getOrCreate();
+
+ final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
+ final String inputPath = parser.get("sourcePath");
+ final String outputPath = "/tmp/provision/propagation/orcidtoresult";
+
+ final List allowedsemrel = Arrays.asList(parser.get("allowedsemrels").split(";"));
+ boolean writeUpdate = TRUE.equals(parser.get("writeUpdate"));
+ boolean saveGraph = TRUE.equals(parser.get("saveGraph"));
+
+ createOutputDirs(outputPath, FileSystem.get(spark.sparkContext().hadoopConfiguration()));
+
+ JavaRDD relations = sc.textFile(inputPath + "/relation")
+ .map(item -> new ObjectMapper().readValue(item, Relation.class)).cache();
+
+ JavaPairRDD result_result = getResultResultSemRel(allowedsemrel, relations);
+
+ JavaRDD publications = sc.textFile(inputPath + "/publication")
+ .map(item -> new ObjectMapper().readValue(item, Publication.class));
+ JavaRDD datasets = sc.textFile(inputPath + "/dataset")
+ .map(item -> new ObjectMapper().readValue(item, Dataset.class));
+ JavaRDD software = sc.textFile(inputPath + "/software")
+ .map(item -> new ObjectMapper().readValue(item, Software.class));
+ JavaRDD other = sc.textFile(inputPath + "/otherresearchproduct")
+ .map(item -> new ObjectMapper().readValue(item, OtherResearchProduct.class));
+
+ //get the results having at least one author pid we are interested in
+ JavaPairRDD resultswithorcid = publications.map(p -> getTypedRow(p))
+ .filter(p -> !(p == null))
+ .mapToPair(toPair())
+ .union(datasets.map(p -> getTypedRow(p))
+ .filter(p -> !(p == null))
+ .mapToPair(toPair()))
+ .union(software.map(p -> getTypedRow(p))
+ .filter(p -> !(p == null))
+ .mapToPair(toPair()))
+ .union(other.map(p -> getTypedRow(p))
+ .filter(p -> !(p == null))
+ .mapToPair(toPair()));
+
+
+ JavaPairRDD to_add_orcid_to_result = resultswithorcid.join(result_result)
+ .map(p -> p._2()._1().setSourceId(p._2()._2().getTargetId())) //associate the pid of the result (target) which should get the orcid to the typed row containing the authors with the orcid from the result(source)
+ .mapToPair(toPair());
+
+ JavaPairRDD pubs = publications.mapToPair(p -> new Tuple2<>(p.getId(),p));
+ JavaPairRDD dss = datasets.mapToPair(p -> new Tuple2<>(p.getId(),p));
+ JavaPairRDD sfw = software.mapToPair(p -> new Tuple2<>(p.getId(),p));
+ JavaPairRDD orp = other.mapToPair(p -> new Tuple2<>(p.getId(),p));
+
+ if(writeUpdate){
+ writeResult(pubs, to_add_orcid_to_result, outputPath, "publication");
+ writeResult(dss, to_add_orcid_to_result, outputPath, "dataset");
+ writeResult(sfw, to_add_orcid_to_result, outputPath, "software");
+ writeResult(orp, to_add_orcid_to_result, outputPath, "otherresearchproduct");
+ }
+
+ if (saveGraph){
+ updateResult(pubs, to_add_orcid_to_result, outputPath, "publication");
+ updateResult(dss, to_add_orcid_to_result, outputPath, "dataset");
+ updateResult(sfw, to_add_orcid_to_result, outputPath, "software");
+ updateResult(orp, to_add_orcid_to_result, outputPath, "otherresearchproduct");
+ }
+
+
+ }
+
+
+ private static Author enrichAutor(Author autoritative_author, Author author) {
+ boolean toaddpid = false;
+
+ if (StringUtils.isNoneEmpty(autoritative_author.getSurname())) {
+ if (StringUtils.isNoneEmpty(author.getSurname())) {
+ if (autoritative_author.getSurname().trim().equalsIgnoreCase(author.getSurname().trim())) {
+
+ //have the same surname. Check the name
+ if (StringUtils.isNoneEmpty(autoritative_author.getName())) {
+ if (StringUtils.isNoneEmpty(author.getName())) {
+ if (autoritative_author.getName().trim().equalsIgnoreCase(author.getName().trim())) {
+ toaddpid = true;
+ }
+ //they could be differently written (i.e. only the initials of the name in one of the two
+ if (autoritative_author.getName().trim().substring(0, 0).equalsIgnoreCase(author.getName().trim().substring(0, 0))) {
+ toaddpid = true;
+ }
+ }
+ }
+ }
+ }
+ }
+ if (toaddpid){
+ StructuredProperty pid = new StructuredProperty();
+ for(StructuredProperty sp : autoritative_author.getPid()){
+ if (PROPAGATION_AUTHOR_PID.equals(sp.getQualifier().getClassid())){
+ pid.setValue(sp.getValue());
+ pid.setQualifier(getQualifier(sp.getQualifier().getClassid(),sp.getQualifier().getClassname() ));
+ pid.setDataInfo(getDataInfo(PROPAGATION_DATA_INFO_TYPE, PROPAGATION_ORCID_TO_RESULT_FROM_SEM_REL_CLASS_ID, PROPAGATION_ORCID_TO_RESULT_FROM_SEM_REL_CLASS_NAME));
+ if(author.getPid() == null){
+ author.setPid(Arrays.asList(pid));
+ }else{
+ author.getPid().add(pid);
+ }
+ }
+ }
+ return author;
+ }
+ return null;
+ }
+
+
+ private static List enrichAutors(List autoritative_authors, List to_enrich_authors, boolean filter){
+// List autoritative_authors = p._2()._2().get().getAuthors();
+// List to_enrich_authors = r.getAuthor();
+
+ return to_enrich_authors
+ .stream()
+ .map(a -> {
+ if (filter) {
+ if (containsAllowedPid(a)) {
+ return a;
+ }
+ }
+
+ List lst = autoritative_authors.stream()
+ .map(aa -> enrichAutor(aa, a)).filter(au -> !(au == null)).collect(Collectors.toList());
+ if (lst.size() == 0) {
+ return a;
+ }
+ return lst.get(0);//Each author can be enriched at most once. It cannot be the same as many different people
+
+ }).collect(Collectors.toList());
+ }
+
+ private static void writeResult(JavaPairRDD results, JavaPairRDD toupdateresult,
+ String outputPath, String type) {
+
+ results.join(toupdateresult)
+ .map(p -> {
+ Result r = p._2()._1();
+
+ List autoritative_authors = p._2()._2().getAuthors();
+ List to_enrich_authors = r.getAuthor();
+
+ r.setAuthor(enrichAutors(autoritative_authors, to_enrich_authors, false));
+// .stream()
+// .map(a -> {
+// if(filter) {
+// if (containsAllowedPid(a)) {
+// return a;
+// }
+// }
+//
+// List lst = autoritative_authors.stream()
+// .map(aa -> enrichAutor(aa, a)).filter(au -> !(au == null)).collect(Collectors.toList());
+// if(lst.size() == 0){
+// return a;
+// }
+// return lst.get(0);//Each author can be enriched at most once. It cannot be the same as many different people
+//
+// }).collect(Collectors.toList()));
+
+ return r;
+ })
+ .map(p -> new ObjectMapper().writeValueAsString(p))
+ .saveAsTextFile(outputPath + "/" + type + "_update");
+ }
+
+
+ private static void updateResult(JavaPairRDD results, JavaPairRDD toupdateresult,
+ String outputPath, String type) {
+ results.leftOuterJoin(toupdateresult)
+ .map(p -> {
+ Result r = p._2()._1();
+ if (p._2()._2().isPresent()){
+ List autoritative_authors = p._2()._2().get().getAuthors();
+ List to_enrich_authors = r.getAuthor();
+
+ r.setAuthor(enrichAutors(autoritative_authors, to_enrich_authors, true));
+// .stream()
+// .map(a -> {
+// if(filter) {
+// if (containsAllowedPid(a)) {
+// return a;
+// }
+// }
+//
+// List lst = autoritative_authors.stream()
+// .map(aa -> enrichAutor(aa, a)).filter(au -> !(au == null)).collect(Collectors.toList());
+// if(lst.size() == 0){
+// return a;
+// }
+// return lst.get(0);//Each author can be enriched at most once. It cannot be the same as many different people
+//
+// }).collect(Collectors.toList()));
+ }
+ return r;
+ })
+ .map(p -> new ObjectMapper().writeValueAsString(p))
+ .saveAsTextFile(outputPath+"/"+type);
+ }
+
+ private static TypedRow getTypedRow(Result p) {
+ TypedRow tp = new TypedRow();
+ tp.setSourceId(p.getId());
+ List authorList = p.getAuthor()
+ .stream()
+ .map(a -> {
+ if (a.getPid().stream().map(pid -> {
+ if (PROPAGATION_AUTHOR_PID.equals(pid.getQualifier().getClassid())) {
+ return a;
+ }
+ return null;
+ }).filter(aut -> !(aut == null)).collect(Collectors.toList()).size() > 0){
+ return a;
+ }
+ return null;
+ }).filter(a -> !(a == null)).collect(Collectors.toList());
+ tp.setAuthors(authorList);
+ if(authorList.size() > 0){
+ return tp;
+ }
+ return null;
+
+
+ }
+
+ private static boolean containsAllowedPid(Author a){
+
+
+ return (a.getPid().stream().map(pid -> {
+ if (PROPAGATION_AUTHOR_PID.equals(pid.getQualifier().getClassid())) {
+ return true;
+ }
+ return false;
+ }).filter(aut -> (aut == true)).collect(Collectors.toList()).size()) > 0;
+ }
+
+}
+
+
+/*private ResultProtos.Result.Metadata.Builder searchMatch(List author_list){
+ ResultProtos.Result.Metadata.Builder metadataBuilder = ResultProtos.Result.Metadata.newBuilder();
+ boolean updated = false;
+
+ for (FieldTypeProtos.Author a: author_list){
+ FieldTypeProtos.Author.Builder author = searchAuthor(a, autoritative_authors);
+ if(author != null){
+ updated = true;
+ metadataBuilder.addAuthor(author);
+ }else{
+ metadataBuilder.addAuthor(FieldTypeProtos.Author.newBuilder(a));
+ }
+ }
+ if(updated)
+ return metadataBuilder;
+ return null;
+ }
+ private FieldTypeProtos.Author.Builder searchAuthor(FieldTypeProtos.Author a, List author_list){
+ if(containsOrcid(a.getPidList()))
+ return null;
+ for(FieldTypeProtos.Author autoritative_author : author_list) {
+ if (equals(autoritative_author, a)) {
+ if(!containsOrcid(a.getPidList()))
+ return update(a, autoritative_author);
+ }
+ }
+ return null;
+
+ }
+
+ private boolean containsOrcid(List pidList){
+ if(pidList == null)
+ return false;
+ return pidList
+ .stream()
+ .filter(kv -> kv.getKey().equals(PropagationConstants.AUTHOR_PID))
+ .collect(Collectors.toList()).size() > 0;
+ }
+ */
\ No newline at end of file
diff --git a/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/projecttoresult/SparkResultToProjectThroughSemRelJob2.java b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/projecttoresult/SparkResultToProjectThroughSemRelJob2.java
new file mode 100644
index 0000000000..563fcb3bcb
--- /dev/null
+++ b/dhp-workflows/dhp-propagation/src/main/java/eu/dnetlib/dhp/projecttoresult/SparkResultToProjectThroughSemRelJob2.java
@@ -0,0 +1,222 @@
+package eu.dnetlib.dhp.projecttoresult;
+
+import com.fasterxml.jackson.databind.ObjectMapper;
+import eu.dnetlib.dhp.TypedRow;
+import eu.dnetlib.dhp.application.ArgumentApplicationParser;
+import eu.dnetlib.dhp.schema.oaf.*;
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.io.Text;
+import org.apache.spark.SparkConf;
+import org.apache.spark.api.java.JavaPairRDD;
+import org.apache.spark.api.java.JavaRDD;
+import org.apache.spark.api.java.JavaSparkContext;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Encoders;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.SparkSession;
+import scala.Tuple2;
+
+import java.io.File;
+import java.util.*;
+
+import static eu.dnetlib.dhp.PropagationConstant.*;
+import static eu.dnetlib.dhp.PropagationConstant.toPair;
+
+public class SparkResultToProjectThroughSemRelJob {
+ public static void main(String[] args) throws Exception {
+
+ final ArgumentApplicationParser parser = new ArgumentApplicationParser(IOUtils.toString(SparkResultToProjectThroughSemRelJob.class.getResourceAsStream("/eu/dnetlib/dhp/projecttoresult/input_projecttoresult_parameters.json")));
+ parser.parseArgument(args);
+ SparkConf conf = new SparkConf();
+ conf.set("hive.metastore.uris", parser.get("hive_metastore_uris"));
+ final SparkSession spark = SparkSession
+ .builder()
+ .appName(SparkResultToProjectThroughSemRelJob.class.getSimpleName())
+ .master(parser.get("master"))
+ .config(conf)
+ .enableHiveSupport()
+ .getOrCreate();
+
+ final JavaSparkContext sc = new JavaSparkContext(spark.sparkContext());
+ final String inputPath = parser.get("sourcePath");
+ final String outputPath = "/tmp/provision/propagation/projecttoresult";
+
+ final List allowedsemrel = Arrays.asList(parser.get("allowedsemrels").split(";"));
+
+ createOutputDirs(outputPath, FileSystem.get(spark.sparkContext().hadoopConfiguration()));
+
+ JavaRDD all_relations = sc.textFile(inputPath + "/relation")
+ .map(item -> new ObjectMapper().readValue(item, Relation.class));
+
+ JavaRDD relations = all_relations.filter(r -> !r.getDataInfo().getDeletedbyinference()).cache();
+
+ JavaRDD result_result = relations
+ .filter(r -> allowedsemrel.contains(r.getRelClass()) && RELATION_RESULTRESULT_REL_TYPE.equals(r.getRelType()));
+
+ org.apache.spark.sql.Dataset resres_relation = spark.createDataset(result_result.rdd(),
+ Encoders.bean(Relation.class));
+
+ JavaRDD result_project = relations
+ .filter(r -> RELATION_RESULT_PROJECT_REL_CLASS.equals(r.getRelClass())
+ && RELATION_RESULTPROJECT_REL_TYPE.equals(r.getRelType()));
+
+ org.apache.spark.sql.Dataset resproj_relation = spark.createDataset(result_project.rdd(),
+ Encoders.bean(Relation.class));
+
+ resres_relation.createOrReplaceTempView("resres_relation");
+ resproj_relation.createOrReplaceTempView("resproj_relation");
+
+ String query ="SELECT proj, collect_set(r1target) result_set " +
+ "FROM (" +
+ " SELECT r1.source as sourcer, r1.relclass as r1rel, r1.target as r1target, r2.target as proj " +
+ " FROM resres_relation r1 " +
+ " JOIN resproj_relation r2 " +
+ " ON r1.source = r2.source " +
+ " ) tmp " +
+ "GROUP BY proj ";
+
+ Dataset toaddrelations = spark.sql(query);
+
+
+ JavaPairRDD project_resultlist = relations
+ .filter(r -> RELATION_PROJECT_RESULT_REL_CLASS.equals(r.getRelClass()))
+ .map(r -> {
+ TypedRow tp = new TypedRow();
+ tp.setSourceId(r.getSource());
+ tp.add(r.getTarget());
+ return tp;
+ }).mapToPair(toPair())
+ .reduceByKey((a, b) -> {
+ if (a == null) {
+ return b;
+ }
+ if (b == null) {
+ return a;
+ }
+
+ a.addAll(b.getAccumulator());
+ return a;
+ }).cache();
+
+
+ JavaRDD new_relations = toaddrelations.toJavaRDD().mapToPair(r -> new Tuple2<>(r.getString(0), r.getList(1)))
+ .leftOuterJoin(project_resultlist)
+ .flatMap(c -> {
+ List