1
0
Fork 0

[ECclassification] new implementation first try

This commit is contained in:
Miriam Baglioni 2023-02-28 14:44:00 +01:00
parent 9e4ec0023c
commit 4f2df876cd
14 changed files with 1700 additions and 36 deletions

View File

@ -3,12 +3,23 @@ package eu.dnetlib.dhp.actionmanager.project;
import static eu.dnetlib.dhp.common.SparkSessionSupport.runWithSparkSession;
import java.io.BufferedOutputStream;
import java.io.IOException;
import java.util.*;
import java.util.zip.GZIPOutputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.function.FlatMapFunction;
import org.apache.spark.api.java.function.MapFunction;
import org.apache.spark.rdd.RDD;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SaveMode;
@ -19,6 +30,7 @@ import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.actionmanager.project.utils.model.CSVProject;
import eu.dnetlib.dhp.actionmanager.project.utils.model.Project;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
import eu.dnetlib.dhp.common.HdfsSupport;
import scala.Tuple2;
@ -54,6 +66,9 @@ public class PrepareProjects {
final String projectPath = parser.get("projectPath");
log.info("projectPath {}: ", projectPath);
final String workingPath = parser.get("workingPath");
log.info("workingPath {}: ", workingPath);
final String outputPath = parser.get("outputPath");
log.info("outputPath {}: ", outputPath);
@ -76,7 +91,7 @@ public class PrepareProjects {
}
private static void exec(SparkSession spark, String projectPath, String dbProjectPath, String outputPath) {
Dataset<CSVProject> project = readPath(spark, projectPath, CSVProject.class);
Dataset<Project> project = readPath(spark, projectPath, Project.class);
Dataset<ProjectSubset> dbProjects = readPath(spark, dbProjectPath, ProjectSubset.class);
dbProjects
@ -90,14 +105,14 @@ public class PrepareProjects {
}
private static FlatMapFunction<Tuple2<ProjectSubset, CSVProject>, CSVProject> getTuple2CSVProjectFlatMapFunction() {
private static FlatMapFunction<Tuple2<ProjectSubset, Project>, CSVProject> getTuple2CSVProjectFlatMapFunction() {
return value -> {
Optional<CSVProject> csvProject = Optional.ofNullable(value._2());
List<CSVProject> csvProjectList = new ArrayList<>();
if (csvProject.isPresent()) {
if (Optional.ofNullable(value._2()).isPresent()) {
Project project = value._2();
String[] programme = csvProject.get().getProgramme().split(";");
String topic = csvProject.get().getTopics();
String[] programme = project.getLegalBasis().split(";");
String topic = project.getTopics();
Arrays
.stream(programme)
@ -106,7 +121,7 @@ public class PrepareProjects {
proj.setTopics(topic);
proj.setProgramme(p);
proj.setId(csvProject.get().getId());
proj.setId(project.getId());
csvProjectList.add(proj);
});
}

View File

@ -0,0 +1,126 @@
package eu.dnetlib.dhp.actionmanager.project.utils;
import java.io.*;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.zip.GZIPOutputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.actionmanager.project.PrepareProjects;
import eu.dnetlib.dhp.actionmanager.project.utils.model.Project;
import eu.dnetlib.dhp.application.ArgumentApplicationParser;
/**
* @author miriam.baglioni
* @Date 28/02/23
*/
public class ExtractProjects implements Serializable {
private static final Logger log = LoggerFactory.getLogger(PrepareProjects.class);
private static final ObjectMapper OBJECT_MAPPER = new ObjectMapper();
public static void main(String[] args) throws Exception {
String jsonConfiguration = IOUtils
.toString(
PrepareProjects.class
.getResourceAsStream(
"/eu/dnetlib/dhp/actionmanager/project/extract_project_parameters.json"));
final ArgumentApplicationParser parser = new ArgumentApplicationParser(jsonConfiguration);
parser.parseArgument(args);
final String projectPath = parser.get("projectPath");
log.info("projectPath {}: ", projectPath);
final String workingPath = parser.get("workingPath");
log.info("workingPath {}: ", workingPath);
final String hdfsNameNode = parser.get("hdfsNameNode");
log.info("hdfsNameNode {}", hdfsNameNode);
Configuration conf = new Configuration();
conf.set("fs.defaultFS", hdfsNameNode);
FileSystem fs = FileSystem.get(conf);
doExtract(projectPath, workingPath, fs);
readProjects(workingPath + "json/project.json", workingPath + "projects/h2020_projects_nld.json", fs);
}
private static void doExtract(String inputFile, String workingPath, FileSystem fileSystem)
throws IOException {
final Path path = new Path(inputFile);
FSDataInputStream project_zip = fileSystem.open(path);
try (ZipInputStream zis = new ZipInputStream(project_zip)) {
ZipEntry entry = null;
while ((entry = zis.getNextEntry()) != null) {
if (!entry.isDirectory()) {
String fileName = entry.getName();
byte buffer[] = new byte[1024];
int count;
try (
FSDataOutputStream out = fileSystem
.create(new Path(workingPath + fileName))) {
while ((count = zis.read(buffer, 0, buffer.length)) != -1)
out.write(buffer, 0, count);
}
}
}
}
}
private static void readProjects(String inputFile, String workingPath, FileSystem fs) throws IOException {
Path hdfsreadpath = new Path(inputFile);
FSDataInputStream inputStream = fs.open(hdfsreadpath);
ArrayList<Project> projects = OBJECT_MAPPER
.readValue(
IOUtils.toString(inputStream, "UTF-8"),
new TypeReference<List<Project>>() {
});
Path hdfsWritePath = new Path(workingPath);
if (fs.exists(hdfsWritePath)) {
fs.delete(hdfsWritePath, false);
}
FSDataOutputStream fos = fs.create(hdfsWritePath);
try (BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(fos, StandardCharsets.UTF_8))) {
for (Project p : projects) {
writer.write(OBJECT_MAPPER.writeValueAsString(p));
writer.newLine();
}
}
}
}

View File

@ -13,7 +13,7 @@ public class CSVProject implements Serializable {
@CsvBindByName(column = "id")
private String id;
@CsvBindByName(column = "programme")
@CsvBindByName(column = "legalBasis")
private String programme;
@CsvBindByName(column = "topics")

View File

@ -17,9 +17,27 @@ public class EXCELTopic implements Serializable {
private String title;
private String shortTitle;
private String objective;
private String subjects;
private String keywords;
private String legalBasis;
private String call;
private String id;
private String contentUpdateDate;
public String getContentUpdateDate() {
return contentUpdateDate;
}
public void setContentUpdateDate(String contentUpdateDate) {
this.contentUpdateDate = contentUpdateDate;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getRcn() {
return rcn;
@ -101,12 +119,12 @@ public class EXCELTopic implements Serializable {
this.objective = objective;
}
public String getSubjects() {
return subjects;
public String getKeywords() {
return keywords;
}
public void setSubjects(String subjects) {
this.subjects = subjects;
public void setKeywords(String keywords) {
this.keywords = keywords;
}
public String getLegalBasis() {

View File

@ -0,0 +1,191 @@
package eu.dnetlib.dhp.actionmanager.project.utils.model;
import java.io.Serializable;
/**
* @author miriam.baglioni
* @Date 24/02/23
*/
public class Project implements Serializable {
private String acronym;
private String contentUpdateDate;
private String ecMaxContribution;
private String ecSignatureDate;
private String endDate;
private String frameworkProgramme;
private String fundingScheme;
private String grantDoi;
private String id;
private String legalBasis;
private String masterCall;
private String nature;
private String objective;
private String rcn;
private String startDate;
private String status;
private String subCall;
private String title;
private String topics;
private String totalCost;
public String getAcronym() {
return acronym;
}
public void setAcronym(String acronym) {
this.acronym = acronym;
}
public String getContentUpdateDate() {
return contentUpdateDate;
}
public void setContentUpdateDate(String contentUpdateDate) {
this.contentUpdateDate = contentUpdateDate;
}
public String getEcMaxContribution() {
return ecMaxContribution;
}
public void setEcMaxContribution(String ecMaxContribution) {
this.ecMaxContribution = ecMaxContribution;
}
public String getEcSignatureDate() {
return ecSignatureDate;
}
public void setEcSignatureDate(String ecSignatureDate) {
this.ecSignatureDate = ecSignatureDate;
}
public String getEndDate() {
return endDate;
}
public void setEndDate(String endDate) {
this.endDate = endDate;
}
public String getFrameworkProgramme() {
return frameworkProgramme;
}
public void setFrameworkProgramme(String frameworkProgramme) {
this.frameworkProgramme = frameworkProgramme;
}
public String getFundingScheme() {
return fundingScheme;
}
public void setFundingScheme(String fundingScheme) {
this.fundingScheme = fundingScheme;
}
public String getGrantDoi() {
return grantDoi;
}
public void setGrantDoi(String grantDoi) {
this.grantDoi = grantDoi;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getLegalBasis() {
return legalBasis;
}
public void setLegalBasis(String legalBasis) {
this.legalBasis = legalBasis;
}
public String getMasterCall() {
return masterCall;
}
public void setMasterCall(String masterCall) {
this.masterCall = masterCall;
}
public String getNature() {
return nature;
}
public void setNature(String nature) {
this.nature = nature;
}
public String getObjective() {
return objective;
}
public void setObjective(String objective) {
this.objective = objective;
}
public String getRcn() {
return rcn;
}
public void setRcn(String rcn) {
this.rcn = rcn;
}
public String getStartDate() {
return startDate;
}
public void setStartDate(String startDate) {
this.startDate = startDate;
}
public String getStatus() {
return status;
}
public void setStatus(String status) {
this.status = status;
}
public String getSubCall() {
return subCall;
}
public void setSubCall(String subCall) {
this.subCall = subCall;
}
public String getTitle() {
return title;
}
public void setTitle(String title) {
this.title = title;
}
public String getTopics() {
return topics;
}
public void setTopics(String topics) {
this.topics = topics;
}
public String getTotalCost() {
return totalCost;
}
public void setTotalCost(String totalCost) {
this.totalCost = totalCost;
}
}

View File

@ -83,9 +83,12 @@ public class SparkAtomicActionUsageJob implements Serializable {
private static void prepareData(String dbname, SparkSession spark, String workingPath, String tableName,
String attribute_name) {
spark
.sql(String.format(
.sql(
String
.format(
"select %s as id, sum(downloads) as downloads, sum(views) as views " +
"from %s.%s group by %s", attribute_name, dbname, tableName, attribute_name))
"from %s.%s group by %s",
attribute_name, dbname, tableName, attribute_name))
.as(Encoders.bean(UsageStatsModel.class))
.write()
.mode(SaveMode.Overwrite)

View File

@ -0,0 +1,23 @@
[
{
"paramName": "pjp",
"paramLongName": "projectPath",
"paramDescription": "the path where the projects are stored ",
"paramRequired": true
},
{
"paramName": "wp",
"paramLongName": "workingPath",
"paramDescription": "the path for the extracted folder",
"paramRequired": true
},
{
"paramName": "hnn",
"paramLongName": "hdfsNameNode",
"paramDescription": "the hdfs namenode",
"paramRequired": true
}
]

View File

@ -1,25 +1,25 @@
<workflow-app name="H2020Classification" xmlns="uri:oozie:workflow:0.5">
<parameters>
<property>
<name>projectFileURL</name>
<description>the url where to get the projects file</description>
</property>
<property>
<name>programmeFileURL</name>
<value>noneed</value>
<description>the url where to get the programme file</description>
</property>
<property>
<name>topicFileURL</name>
<value>noneed</value>
<description>the url where to get the topic file</description>
</property>
<property>
<name>outputPath</name>
<value>noneed</value>
<description>path where to store the action set</description>
</property>
<property>
<name>sheetName</name>
<value>noneed</value>
<description>the name of the sheet to read</description>
</property>
</parameters>
@ -35,7 +35,7 @@
<delete path='${workingDir}'/>
<mkdir path='${workingDir}'/>
</fs>
<ok to="fork_get_info"/>
<ok to="fork_get_projects"/>
<error to="Kill"/>
</action>
@ -48,19 +48,21 @@
</fork>
<fork name="fork_get_projects">
<path start="get_project_file"/>
<path start="extract_projects"/>
<path start="read_projects"/>
</fork>
<action name="get_project_file">
<action name="extract_projects">
<java>
<main-class>eu.dnetlib.dhp.actionmanager.project.utils.ReadCSV</main-class>
<main-class>eu.dnetlib.dhp.actionmanager.project.utils.ExtractProjects</main-class>
<arg>--hdfsNameNode</arg><arg>${nameNode}</arg>
<arg>--fileURL</arg><arg>${projectFileURL}</arg>
<arg>--hdfsPath</arg><arg>${workingDir}/projects</arg>
<arg>--classForName</arg><arg>eu.dnetlib.dhp.actionmanager.project.utils.model.CSVProject</arg>
<arg>--projectPath</arg><arg>/tmp/miriam/cordis-h2020projects-json_.zip</arg>
<!-- <arg>&#45;&#45;workingPath</arg><arg>/tmp/miriam/cordis_h2020/</arg>-->
<!-- <arg>&#45;&#45;projectPath</arg><arg>${projectPath}</arg>-->
<arg>--workingPath</arg><arg>${workingDir}/</arg>
</java>
<ok to="wait_projects"/>
<!-- <ok to="End"/>-->
<error to="Kill"/>
</action>
@ -152,7 +154,8 @@
<arg>--outputPath</arg><arg>${workingDir}/preparedProjects</arg>
<arg>--dbProjectPath</arg><arg>${workingDir}/dbProjects</arg>
</spark>
<ok to="wait"/>
<!-- <ok to="wait"/>-->
<ok to="End"/>
<error to="Kill"/>
</action>

View File

@ -1,12 +1,10 @@
package eu.dnetlib.dhp.actionmanager.project;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.BufferedReader;
import java.io.File;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.*;
import java.nio.file.Files;
import org.apache.commons.io.FileUtils;
@ -95,7 +93,7 @@ public class DownloadCsvTest {
count += 1;
}
Assertions.assertEquals(767, count);
assertEquals(767, count);
}
@Disabled
@ -137,7 +135,7 @@ public class DownloadCsvTest {
count += 1;
}
Assertions.assertEquals(34957, count);
assertEquals(34957, count);
}
@AfterAll
@ -145,4 +143,61 @@ public class DownloadCsvTest {
FileUtils.deleteQuietly(new File(workingDir));
}
@Test
void getLocalProgrammeFileTest() throws Exception {
GetCSV
.getCsv(
fs, new BufferedReader(
new FileReader(
getClass().getResource("/eu/dnetlib/dhp/actionmanager/project/h2020_programme.csv").getPath())),
workingDir + "/programme",
CSVProgramme.class.getName(), ';');
BufferedReader in = new BufferedReader(new InputStreamReader(fs.open(new Path(workingDir + "/programme"))));
String line;
int count = 0;
while ((line = in.readLine()) != null) {
CSVProgramme csvp = new ObjectMapper().readValue(line, CSVProgramme.class);
if (count == 528) {
assertEquals("H2020-EU.5.f.", csvp.getCode());
assertTrue(
csvp
.getTitle()
.startsWith(
"Develop the governance for the advancement of responsible research and innovation by all stakeholders"));
assertTrue(csvp.getTitle().endsWith("promote an ethics framework for research and innovation"));
assertTrue(csvp.getShortTitle().equals(""));
assertTrue(csvp.getLanguage().equals("en"));
}
if (count == 11) {
assertEquals("H2020-EU.3.5.4.", csvp.getCode());
assertTrue(
csvp
.getTitle()
.equals(
"Grundlagen für den Übergang zu einer umweltfreundlichen Wirtschaft und Gesellschaft durch Öko-Innovation"));
assertTrue(csvp.getShortTitle().equals("A green economy and society through eco-innovation"));
assertTrue(csvp.getLanguage().equals("de"));
}
if (count == 34) {
assertTrue(csvp.getCode().equals("H2020-EU.3.2."));
assertTrue(
csvp
.getTitle()
.equals(
"SOCIETAL CHALLENGES - Food security, sustainable agriculture and forestry, marine, maritime and inland water research, and the bioeconomy"));
assertTrue(
csvp.getShortTitle().equals("Food, agriculture, forestry, marine research and bioeconomy"));
assertTrue(csvp.getLanguage().equals("en"));
}
assertTrue(csvp.getCode() != null);
assertTrue(csvp.getCode().startsWith("H2020"));
count += 1;
}
assertEquals(769, count);
}
}

View File

@ -1,6 +1,8 @@
package eu.dnetlib.dhp.actionmanager.project;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
@ -43,4 +45,21 @@ public class EXCELParserTest {
Assertions.assertEquals(3878, pl.size());
}
@Test
void test2() throws IOException, ClassNotFoundException, InvalidFormatException, IllegalAccessException,
InstantiationException {
;
EXCELParser excelParser = new EXCELParser();
List<Object> pl = excelParser
.parse(
new FileInputStream(
getClass().getResource("/eu/dnetlib/dhp/actionmanager/project/h2020_topic.xlsx").getPath()),
"eu.dnetlib.dhp.actionmanager.project.utils.model.EXCELTopic",
"DATA");
Assertions.assertEquals(3905, pl.size());
}
}

View File

@ -4,12 +4,14 @@ package eu.dnetlib.dhp.actionmanager.project;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.spark.SparkConf;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.api.java.function.ForeachFunction;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.SparkSession;
@ -20,9 +22,12 @@ import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import eu.dnetlib.dhp.actionmanager.project.utils.model.CSVProject;
import eu.dnetlib.dhp.actionmanager.project.utils.model.Project;
public class PrepareProjectTest {
@ -74,7 +79,7 @@ public class PrepareProjectTest {
"-isSparkSessionManaged",
Boolean.FALSE.toString(),
"-projectPath",
getClass().getResource("/eu/dnetlib/dhp/actionmanager/project/projects_subset.json").getPath(),
getClass().getResource("/eu/dnetlib/dhp/actionmanager/project/projects.json").getPath(),
"-outputPath",
workingDir.toString() + "/preparedProjects",
"-dbProjectPath",
@ -96,4 +101,41 @@ public class PrepareProjectTest {
Assertions.assertEquals(0, verificationDataset.filter("length(programme) = 0").count());
}
@Test
void readJsonNotMultiline() throws IOException {
String projects = IOUtils
.toString(
PrepareProjectTest.class
.getResourceAsStream(("/eu/dnetlib/dhp/actionmanager/project/projects.json")));
ArrayList<Project> a = OBJECT_MAPPER.readValue(projects, new TypeReference<List<Project>>() {
});
a.forEach(p -> {
try {
OBJECT_MAPPER.writeValueAsString(p);
} catch (JsonProcessingException e) {
e.printStackTrace();
}
});
JavaRDD<Project> b = new JavaSparkContext(spark.sparkContext()).parallelize(a);
// System.out.println("pr");
// Dataset<Project> prova = spark
// .read()
// .textFile(inputPath)
// .map((MapFunction<String, Project>) value -> OBJECT_MAPPER.readValue(value, new TypeReference<List<Project>>() {
// }), Encoders.bean(Project.class));
// prova.foreach(
// (ForeachFunction<Project>) p -> System.out.println(OBJECT_MAPPER.writeValueAsString(p)));
// objectMapper.readValue(jsonArray, new TypeReference<List<Student>>() {})
// Dataset<Project> p = readPath(spark, inputPath, Projects.class)
// .flatMap((FlatMapFunction<Projects, Project>) ps -> ps.getProjects().iterator(), Encoders.bean(Project.class
// ));
//import com.fasterxml.jackson.core.type.TypeReference;
// System.out.println(p.count());
}
}

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,399 @@
[{"acronym": "GiSTDS",
"contentUpdateDate": "2022-10-08 18:28:27",
"ecMaxContribution": 203149.44,
"ecSignatureDate": "2020-03-16",
"endDate": "2022-11-30",
"frameworkProgramme": "H2020",
"fundingScheme": "MSCA-IF-EF-SE",
"grantDoi": "10.3030/886988",
"id": 894593,
"legalBasis": "H2020-EU.1.3.",
"masterCall": "H2020-MSCA-IF-2019",
"nature": "",
"objective": "Coordination of different players in active distribution systems by increasing the penetration of distributed energy resources and rapid advances on the aggregators, microgrids and prosumers with private territory individuals establishes new challenges in control and management systems from the owners point of views. Undertaking digitalization of future distribution systems, GiSTDS introduces an edge computing framework based on GridEye, the core production of DEPsys, which provides real time visibility and monitoring. Relevant drawbacks in the distribution system management platforms in handling the scalability of players, look ahead preventive management systems regarding contingency condition and lack of physical boundaries for third party entities (aggregators) will be addressed by GiSTDS. The main novelties of this project in comparison to the GridEye are: 1) Developed P2P trading module provides automated double auction negotiation in real time fashion which enables all private entities with and without specific physical boundaries to participate in local and flexible electricity markets. 2) Modification of GridEyes modules to address the scalability and resilient operation in both the normal and contingency conditions. 3) To present a look ahead energy managements schemes for the operators, GiSTDS will be equipped to the forecasting module based on auto-regressive with exogenous variables (ARX) and machine learning techniques such as long short term memory (LSTM) and recursive neural network (RNN). Therefore, GiSTDS based on modified and developed modules explores comprehensive distributed framework for control, monitoring and operation of energy systems with multiple dispersed players in different scales. The edge computing solutions in GiSTDS eectively digitalis energy systems and creates major opportunities in terms of avoiding big data concerns and getting a bottom-up monitoring approach for the network supervision.",
"rcn": 227870,
"startDate": "2020-12-01",
"status": "TERMINATED",
"subCall": "H2020-MSCA-IF-2019",
"title": "GridEye Scalable Transactive Distribution Systems",
"topics": "MSCA-IF-2019",
"totalCost": 203149.44
},{
"acronym": "REAL",
"contentUpdateDate": "2022-04-27 21:10:20",
"ecMaxContribution": 1498830,
"ecSignatureDate": "2020-09-29",
"endDate": "2026-03-31",
"frameworkProgramme": "H2020",
"fundingScheme": "ERC-STG",
"grantDoi": "10.3030/947908",
"id": 897004,
"legalBasis": "H2020-EU.1.1.",
"masterCall": "ERC-2020-STG",
"nature": "",
"objective": "In the last decade, machine learning (ML) has become a fundamental tool with a growing impact in many disciplines, from science to industry. However, nowadays, the scenario is changing: data are exponentially growing compared to the computational resources (post Moore's law era), and ML algorithms are becoming crucial building blocks in complex systems for decision making, engineering, science. Current machine learning is not suitable for the new scenario, both from a theoretical and a practical viewpoint: (a) the lack of cost-effectiveness of the algorithms impacts directly the economic/energetic costs of large scale ML, making it barely affordable by universities or research institutes; (b) the lack of reliability of the predictions affects critically the safety of the systems where ML is employed. To deal with the challenges posed by the new scenario, REAL will lay the foundations of a solid theoretical and algorithmic framework for reliable and cost-effective large scale machine learning on modern computational architectures. In particular, REAL will extend the classical ML framework to provide algorithms with two additional guarantees: (a) the predictions will be reliable, i.e., endowed with explicit bounds on their uncertainty guaranteed by the theory; (b) the algorithms will be cost-effective, i.e., they will be naturally adaptive to the new architectures and will provably achieve the desired reliability and accuracy level, by using minimum possible computational resources. The algorithms resulting from REAL will be released as open-source libraries for distributed and multi-GPU settings, and their effectiveness will be extensively tested on key benchmarks from computer vision, natural language processing, audio processing, and bioinformatics. The methods and the techniques developed in this project will help machine learning to take the next step and become a safe, effective, and fundamental tool in science and engineering for large scale data problems.",
"rcn": 231448,
"startDate": "2021-04-01",
"status": "SIGNED",
"subCall": "ERC-2020-STG",
"title": "Reliable and cost-effective large scale machine learning",
"topics": "ERC-2020-STG",
"totalCost": 1498830
},{
"acronym": "CARL-PdM",
"contentUpdateDate": "2022-08-09 09:09:33",
"ecMaxContribution": 50000,
"ecSignatureDate": "2017-07-13",
"endDate": "2018-01-31",
"frameworkProgramme": "H2020",
"fundingScheme": "SME-1",
"grantDoi": "10.3030/781123",
"id": 896300,
"legalBasis": "H2020-EU.2.1.1.",
"masterCall": "H2020-SMEInst-2016-2017",
"nature": "",
"objective": "\"\"\"Industry 4.0 preaches a complete revolution of industrial process and promises huge efficiency gains by a complete virtualization of the factory, numerical design tools, automation of the logistics and the routing of the parts, smart machines, 3D printing, cyber-physical systems, predictive maintenance and control of the whole factory by an intelligent system. \nIn the next 10 years, industry 4.0 is expected to change the way we operate our factories and to create 1250 Billion € of additional value added in Europe.\nAlso , according to ARC Advisory Group, the predictive maintenance market is estimated to grow from 1,404.3M€ in 2016 to 4,904.0M€ by 2021.\nCARL-PdM is a innovative IIoT data powered predictive maintenance platform encompass the core of \"\"Industry 4.0\"\" with a new maintenance paradigm : maintenance is a production function whose aim should be to optimize production output and quality.\nWe will leverage the IoT revolution to achieve these goal.\nThis software solution, CARL-PdM, provides many core capabilities in industrial scenarios, including edge analytics who provide a way to pre-process the data so that only the pertinent information is sent to the predictive layer (Auto Classification and Machine learning).\nThe predictive layer will categorize data into abstract class which represent technical assets behavior. It is a reliable and reproducible approach.\nCompetitive advantages: \n- Reduce failure by 50%, maintenance cost by 30%, production stops by 70%, energetic consumption by 20%, Time To Repair by 30%\n- Increase production flexibility\n- System agnostic to machines\n- Machine-learning algorithm that compares the fault prediction and sensor data with historical data, predicting best maintenance activity regarding to production and quality objectives \n\nThe solution will be implemented at a global scale, starting in European markets: France, Italy, Belgium for early market uptake and testing; and then the biggest EU markets (Germany, UK, Poland and Spain).\n\"",
"rcn": 211479,
"startDate": "2017-08-01",
"status": "CLOSED",
"subCall": "H2020-SMEINST-1-2016-2017",
"title": "Next Generation Holistic Predictive Maintenance Software",
"topics": "SMEInst-01-2016-2017",
"totalCost": 71429
},{
"acronym": "OPTIMAL",
"contentUpdateDate": "2022-11-02 12:00:16",
"ecMaxContribution": 772800,
"ecSignatureDate": "2020-12-01",
"endDate": "2025-12-31",
"frameworkProgramme": "H2020",
"fundingScheme": "MSCA-RISE",
"grantDoi": "10.3030/101007963",
"id": 892890,
"legalBasis": "H2020-EU.1.3.",
"masterCall": "H2020-MSCA-RISE-2020",
"nature": "",
"objective": "The proposed project is to develop and maintain long term collaborations between Europe and China towards CO2 neutral Olefin production. We will realize this objective by carrying out joint research in big data and artificial intelligence (AI) for ethylene plants integrated with carbon capture and CO2 utilisation. Specifically this requires a universal set of skills such as pilot scale experimental study, process modelling and analysis, optimisation, catalysis and reaction kinetics that will be strengthened by the individual mobility of researchers between Europe and China. There are 12 partners involved in OPTIMAL with 3 industrial partners. These partners are world leading in their respective research areas. OPTIMAL is planned to start from Aug. 2021 and will continue for 48 months. There will be 28 experienced and 35 early stage researchers participating in OPTIMAL with exchange visits of 262 person months. The funding of €772,800 will be requested from European Commission to support these planned secondments. The European beneficiaries are experts at catalysis, CO2 utilisation, intensified carbon capture, reaction mechanism and kinetics & CFD studies, hybrid modelling, molecular simulation and dynamic optimisation, whilst the Chinese partners are experts at exergy analysis, process control and optimisation, solvent-based carbon capture & data-driven model development, deep reinforced learning based model free control, intelligent predictive control, physics-based reduced order model development, soft exergy sensor development and optimisation under uncertainty. Transfer of knowledge will take place through these exchange visits. We will generate at least 25 Journal publications and 25 Conference papers. 2 Special Issues will be established in leading journals such as Applied Energy. 2 Workshops and 2 Special Sessions in major international conferences will also be organised to disseminate project results.",
"rcn": 232682,
"startDate": "2021-08-01",
"status": "SIGNED",
"subCall": "H2020-MSCA-RISE-2020",
"title": "Smart and CO2 neutral Olefin Production by arTificial Intelligence and MAchine Learning",
"topics": "MSCA-RISE-2020",
"totalCost": 1205200
},{
"acronym": "e-DNA BotStop",
"contentUpdateDate": "2022-08-15 14:18:25",
"ecMaxContribution": 50000,
"ecSignatureDate": "2019-04-11",
"endDate": "2019-10-31",
"frameworkProgramme": "H2020",
"fundingScheme": "SME-1",
"grantDoi": "10.3030/854460",
"id": 886828,
"legalBasis": "H2020-EU.2.3.",
"masterCall": "H2020-EIC-SMEInst-2018-2020",
"nature": "",
"objective": "In the last decade there has been an explosion in Online Travel Agents (OTAs) worldwide. OTAs undertake the mammoth task of undercutting the flight prices of major airlines through the use of Bots (an internet Bot, also known as web robot, WWW robot or simply bot, is a software application that runs automated tasks (scripts) over the Internet.). Bots are used to scrape airlines for valuable data to benchmark aggregate flight costs, which drives down prices for the consumer.\n\nWhilst beneficial to consumers, scraping harms travel companies because:\n•\tBots can engage with a websites server hardware and cause website traffic to run slower, in some cases causing server downtime and Direct Denial of Service (DDoS)\n•\tLong term Search Engine Optimization (SEO) damage; distorting analytical marketing metrics.\n•\tDiverting customers to purchase products via third party resellers, limiting chances for up-sell and cross sell opportunities. \n\nThis problem is tackled by anti-scrape approaches. However, current anti-scrape/booking bot solutions are only capable of distinguishing between human traffic and bot traffic through supervised algorithms that do not work to the degree of efficacy required. \n\n\nOur proposed solution is BotStop an algorithmic approach to identifying Bots and scrapers and to policing malicious application traffic. eDNA will provide a solution which reintroduces transparency into the process of purchasing flights and will streamline customer website experience to ensure a more stress-free experience",
"rcn": 223866,
"startDate": "2019-05-01",
"status": "CLOSED",
"subCall": "H2020-SMEInst-2018-2020-1",
"title": "e-DNA BotStop",
"topics": "EIC-SMEInst-2018-2020",
"totalCost": 71429
},{
"acronym": "NAUTIC",
"contentUpdateDate": "2022-08-25 21:32:49",
"ecMaxContribution": 184707.84,
"ecSignatureDate": "2021-04-27",
"endDate": "2023-09-30",
"frameworkProgramme": "H2020",
"fundingScheme": "MSCA-IF-EF-ST",
"grantDoi": "10.3030/101033666",
"id": 8867767,
"legalBasis": "H2020-EU.1.3.",
"masterCall": "H2020-MSCA-IF-2020",
"nature": "",
"objective": "Bringing a new drug to the European market takes at least 10 years and 2.5 BEUR of R&D effort. Computational methods significantly shorten this journey but they require knowledge of the structure and interactions of the involved biomolecules - most often proteins. In recent years, a tremendous progress has been made in the field of a single protein 3D structure prediction. However, predicting protein assemblies -the most crucial step - still remains very challenging. The aim of this IF project is to revolutionise protein complexes prediction methods. This will be achieved first by developing novel, effective and fast approaches for the calculation of the vibrational entropy, key to protein-protein docking mechanisms. Then, in an innovative and multi-disciplinary approach, the Experienced Researcher (ER) aims to combine advanced physics-based models with machine learning methods using data from structural and sequence databases. Finally, this project will link all the pieces together and release them in the form of a web-server in order to allow the community to benefit from the results of this research.\nThe ER will carry out the fellowship in the Centre National de la Recherche Scientifique - CNRS in Grenoble, France. CNRS carries out research in all scientific fields of knowledge and the Supervisor is a renowned expert in data science, computing, and software engineering. Through a well-thought two-way knowledge transfer and training plan, this project will benefit both the host institution and the ER in terms of scientific knowledge, network and open the path for new applications to potentially exploit at the European or global level. The project will also place the ER as a highly visible researcher in the field and ideally set her as a valuable resource for European industrial actors.",
"rcn": 235804,
"startDate": "2021-07-01",
"status": "TERMINATED",
"subCall": "H2020-MSCA-IF-2020",
"title": "Novel computational avenues in protein-protein docking",
"topics": "MSCA-IF-2020",
"totalCost": 184707.84
},{
"acronym": "EnzVolNet",
"contentUpdateDate": "2022-08-15 12:50:20",
"ecMaxContribution": 158121.6,
"ecSignatureDate": "2017-02-14",
"endDate": "2019-04-30",
"frameworkProgramme": "H2020",
"fundingScheme": "MSCA-IF-EF-ST",
"grantDoi": "10.3030/753045",
"id": 101003374,
"legalBasis": "H2020-EU.1.3.",
"masterCall": "H2020-MSCA-IF-2016",
"nature": "",
"objective": "Natural enzymes have evolved to perform their functions under complex selective pressures, being capable of accelerating reactions by several orders of magnitude. In particular, heteromeric enzyme complexes catalyze an enormous array of useful reactions that are often allosterically regulated by different protein partners. Unfortunately, the underlying physical principles of this regulation are still under debate, which makes the alteration of enzyme structure towards useful isolated subunits a tremendous challenge for modern chemical biology. Exploitation of isolated enzyme subunits, however, is advantageous for biosynthetic applications as it reduces the metabolic stress on the host cell and greatly simplifies efforts to engineer specific properties of the enzyme. Current approaches to alter natural enzyme complexes are based on the evaluation of thousands of variants, which make them economically unviable and the resulting catalytic efficiencies lag far behind their natural counterparts. The revolutionary nature of EnzVolNet relies on the application of conformational network models (e.g Markov State Models) to extract the essential functional protein dynamics and key conformational states, reducing the complexity of the enzyme design paradigm and completely reformulating previous computational design approaches. Initial mutations are extracted from costly random mutagenesis experiments and chemoinformatic tools are used to identify beneficial mutations leading to more proficient enzymes. This new strategy will be applied to develop stand-alone enzymes from heteromeric protein complexes, with advantageous biosynthetic properties and improve activity and substrate scope. Experimental evaluation of our computational predictions will finally elucidate the potential of the present approach for mimicking Natures rules of evolution.",
"rcn": 208408,
"startDate": "2017-05-01",
"status": "CLOSED",
"subCall": "H2020-MSCA-IF-2016",
"title": "COMPUTATIONAL EVOLUTION OF ENZYME VARIANTS THROUGH CONFORMATIONAL NETWORKS",
"topics": "MSCA-IF-2016",
"totalCost": 158121.6
},{
"acronym": "FASTPARSE",
"contentUpdateDate": "2022-08-18 09:56:14",
"ecMaxContribution": 1481747,
"ecSignatureDate": "2016-12-08",
"endDate": "2022-07-31",
"frameworkProgramme": "H2020",
"fundingScheme": "ERC-STG",
"grantDoi": "10.3030/714150",
"id": 886776,
"legalBasis": "H2020-EU.1.1.",
"masterCall": "ERC-2016-STG",
"nature": "",
"objective": "The popularization of information technology and the Internet has resulted in an unprecedented growth in the scale at which individuals and institutions generate, communicate and access information. In this context, the effective leveraging of the vast amounts of available data to discover and address people's needs is a fundamental problem of modern societies.\n\nSince most of this circulating information is in the form of written or spoken human language, natural language processing (NLP) technologies are a key asset for this crucial goal. NLP can be used to break language barriers (machine translation), find required information (search engines, question answering), monitor public opinion (opinion mining), or digest large amounts of unstructured text into more convenient forms (information extraction, summarization), among other applications.\n\nThese and other NLP technologies rely on accurate syntactic parsing to extract or analyze the meaning of sentences. Unfortunately, current state-of-the-art parsing algorithms have high computational costs, processing less than a hundred sentences per second on standard hardware. While this is acceptable for working on small sets of documents, it is clearly prohibitive for large-scale processing, and thus constitutes a major roadblock for the widespread application of NLP.\n\nThe goal of this project is to eliminate this bottleneck by developing fast parsers that are suitable for web-scale processing. To do so, FASTPARSE will improve the speed of parsers on several fronts: by avoiding redundant calculations through the reuse of intermediate results from previous sentences; by applying a cognitively-inspired model to compress and recode linguistic information; and by exploiting regularities in human language to find patterns that the parsers can take for granted, avoiding their explicit calculation. The joint application of these techniques will result in much faster parsers that can power all kinds of web-scale NLP applications.",
"rcn": 206936,
"startDate": "2017-02-01",
"status": "SIGNED",
"subCall": "ERC-2016-STG",
"title": "Fast Natural Language Parsing for Large-Scale NLP",
"topics": "ERC-2016-STG",
"totalCost": 1481747
},{
"acronym": "StarLink",
"contentUpdateDate": "2022-08-10 09:42:53",
"ecMaxContribution": 50000,
"ecSignatureDate": "2018-05-04",
"endDate": "2018-08-31",
"frameworkProgramme": "H2020",
"fundingScheme": "SME-1",
"grantDoi": "10.3030/815698",
"id": 815698,
"legalBasis": "H2020-EU.2.3.",
"masterCall": "H2020-EIC-SMEInst-2018-2020",
"nature": "",
"objective": "Vacuum pumps are used in thousands of industrial applications, playing a vital role in food processing, semiconductors, chemicals, pharmaceuticals and many other manufacturing and assembly processes. However, todays pumps are currently unable to provide any type of insights that could help users anticipate a pump malfunction, plan maintenance procedures or setting the adjustments. Pump malfunctions or breakdowns, due to unplanned maintenance or improper settings, cost millions of euros in lost revenues every year as production and logistic lines lie idle waiting for pumps to be fixed, and when they are not optimized their productivity decrease or their energy consumption go up. \n\nBut now, DVP, a vacuum pump manufacturer, has developed the solution to these challenges through StarLink, the worlds first intelligent vacuum pump system. StarLink is a patent-pending system that uses data analytics and machine learning to identify pump malfunctions before they happen, propose actions to be taken, and automatically adjust the operation parameters if the problem relates to the setting. This will reduce pump downtime-related costs by 30%, increase their productivity by 50% and make easier the operation manager tasks. \n\nThe combination of our deep knowledge of vacuum pumps needs with the machine learning expertise of the university of Ferrara will create the most intelligent device to improve the competitiveness of European companies. Additionally, StarLink will contribute to DVPs growth in terms of employees and product portfolio since we will be able to offer a wider range of products and services related to vacuum pumps, which will allow us to enter new markets and sell more units. By 2023, it will generate €3M in yearly revenue with net profits of €2M to our company.",
"rcn": 217721,
"startDate": "2018-05-01",
"status": "CLOSED",
"subCall": "H2020-SMEInst-2018-2020-1",
"title": "StarLink: The World's First Intelligent Vacuum Pump System",
"topics": "EIC-SMEInst-2018-2020",
"totalCost": 71429
},{
"acronym": "ARMOUR",
"contentUpdateDate": "2022-08-18 16:42:12",
"ecMaxContribution": 191149.44,
"ecSignatureDate": "2020-03-16",
"endDate": "2022-10-14",
"frameworkProgramme": "H2020",
"fundingScheme": "MSCA-IF-EF-SE",
"grantDoi": "10.3030/890844",
"id": 890844,
"legalBasis": "H2020-EU.1.3.",
"masterCall": "H2020-MSCA-IF-2019",
"nature": "",
"objective": "General awareness about the smart grid technologies has improved in the last decade due to various energy liberalization actions taken by the European Union. However, the lack of well-developed technologies, has been main cause of slow acceptance of smart grids. This calls for the identification of unexplored research areas in smart grids. Positive outcomes of the research can help in laying down new and well-defined standards for the smart grids and associated intelligent technologies. A convenient and easily integrable product can also help in encouraging various distribution system operators to accept the new technologies. Massive amount of data is already being collected from the distribution networks using smart meters. Rapid advancements in machine learning research have opened up new avenues for data utilization in smart grid. \nForerunners like DEPsys (a smart grid technology company based in Switzerland), have now simplified the distribution system data for further analysis and research. A critical concern raised by DEPsys customers, is their inability to trace the source of power quality issues in the distribution network, which in-turn leads to both energy and economic losses over time. This project builds up on existing infrastructure of DEPsys and aims to be an AMROUR (by improving robustness) for distribution networks against power quality events. The main objectives are: (i) leveraging machine learning for condition monitoring and tracing power quality events, and (ii) to develop a smart grid technology which assists the distribution system operators in prevention and diagnosis of power quality events.",
"rcn": 227886,
"startDate": "2020-10-15",
"status": "SIGNED",
"subCall": "H2020-MSCA-IF-2019",
"title": "smARt Monitoring Of distribUtion netwoRks for robust power quality",
"topics": "MSCA-IF-2019",
"totalCost": 191149.44
},{
"acronym": "Target5LO",
"contentUpdateDate": "2022-08-16 11:09:20",
"ecMaxContribution": 195454.8,
"ecSignatureDate": "2018-03-19",
"endDate": "2020-02-29",
"frameworkProgramme": "H2020",
"fundingScheme": "MSCA-IF-EF-CAR",
"grantDoi": "10.3030/792495",
"id": 792495,
"legalBasis": "H2020-EU.1.3.",
"masterCall": "H2020-MSCA-IF-2017",
"nature": "",
"objective": "Drug efficacy is cornerstone for successful drug discovery programs. Considering that, on average, FDA-approved drugs modulate dozens of off-targets it remains imperative to find strategies to overcome adverse drug reactions correlated with pernicious polypharmacology. In fact, several chemical entities displaying promising anticancer are discontinued from drug development pipelines due to narrow therapeutic windows in pre-clinical models. Here, we propose the development of antibody-drug conjugates exploring the unique bioactivity profile of the naphthoquinone natural product-lapachone (Lp) against acute myeloid leukemia (AML), an unmet medical need. Using a machine learning method, we disclosed Lp as an allosteric modulator of 5-lipoxygenase (5-LO), correlated its anticancer activity with 5-LO expression in blood cancers and showed its efficacy in a disseminated mouse model of AML.\n\nIn this project, a comprehensive investigation of novel means for the targeted delivery of Lp to leukaemia cells is sought after, considering both the promising bioactivity profile but also the significant toxicity in untargeted dosage forms. We apply state-of-the-art synthetic medicinal chemistry to design and access cleavable linkers, and site-specifically conjugate Lp to an anti-IL7R antibody, a validated biomarker in AML and other leukaemias. We aim at employing biophysical and chemical biology approaches to validate quantitative and fast release of Lp with accurate spatiotemporal control in in vitro disease models. Finally, we will validate the deployment of the constructs through preclinical in vivo models of AML. We foresee broad applicability of the developed technology, which may have profound implications in drug discovery. Upon successful completion of this research program, we hope to yield a new targeted drug to treat AML patients with improved efficacy and reduced side-effects.",
"rcn": 215065,
"startDate": "2018-03-01",
"status": "CLOSED",
"subCall": "H2020-MSCA-IF-2017",
"title": "Targeting 5-lipoxygenase in the context of Acute Myeloid Leukemia",
"topics": "MSCA-IF-2017",
"totalCost": 195454.8
},{
"acronym": "Smart Library",
"contentUpdateDate": "2022-08-11 19:59:53",
"ecMaxContribution": 1200000,
"ecSignatureDate": "2017-02-26",
"endDate": "2018-12-31",
"frameworkProgramme": "H2020",
"fundingScheme": "SME-2",
"grantDoi": "10.3030/756826",
"id": 756826,
"legalBasis": "H2020-EU.3.6.",
"masterCall": "H2020-SMEInst-2016-2017",
"nature": "",
"objective": "Children today are natives of technology, having frequent access to digital devices both at home and at school. Digital devices are today even more used than TV. Worryingly, the offering of high quality educational apps is very limited and expensive. Parents and educators are concerned about this and are actively searching for better alternatives.\n\nTo help resolve these issues, Smile and Learn places technology at the service of education with the mission of helping children 2 to 12 years old learn while having fun using digital devices. Like the north American educational philosopher John Dewey, we believe that “if we teach todays students as we taught yesterdays, we rob them of tomorrow.” Our vision is to become the global leader in Edutainment (Entertainment plus Education). To do so we have developed the Smart Digital Library, a single platform of interactive games and stories that, as of today, provides access to up to 30 individual proprietary apps (100 apps by end 2018). The “Library” can be used at home, on the go or at school and provides “smart” recommendations to children, their parents and educators.\n\nIn August 2016, Smile and Learn successfully completed phase I of SME Instrument, finalizing our first release of the Smart Library rolled out in real production environments both at pilot schools (today more than 100 schools use the Library, including 10 special education schools) and with families (+7,000 active users) in different markets, including the US, Spain, the UK, France, Mexico and Colombia, with very positive feedback. We already have more than 30,000 users worldwide with no marketing expenditure.\n\nWe are now moving forward to make the Smart Library a global state-of-the-art product in the edutainment industry by scaling it up and rolling out a powerful dissemination plan, that we expect to conduct with the support of Phase 2 H2020",
"rcn": 208757,
"startDate": "2017-03-01",
"status": "CLOSED",
"subCall": "H2020-SMEINST-2-2016-2017",
"title": "Smart Library of Edutainment: technology and gamification at the service of Education",
"topics": "SMEInst-12-2016-2017",
"totalCost": 1827500
},{
"acronym": "PALGLAC",
"contentUpdateDate": "2022-08-25 10:28:12",
"ecMaxContribution": 2425298.75,
"ecSignatureDate": "2018-05-14",
"endDate": "2024-09-30",
"frameworkProgramme": "H2020",
"fundingScheme": "ERC-ADG",
"grantDoi": "10.3030/787263",
"id": 787263,
"legalBasis": "H2020-EU.1.1.",
"masterCall": "ERC-2017-ADG",
"nature": "",
"objective": "Ice sheets regulate Earths climate by reflecting sunlight away, enabling suitable temperatures for human habitation. Warming is reducing these ice masses and raising sea level. Glaciologists predict ice loss using computational ice sheet models which interact with climate and oceans, but with caveats that highlight processes are inadequately encapsulated. Weather forecasting made a leap in skill by comparing modelled forecasts with actual outcomes to improve physical realism of their models. This project sets out an ambitious programme to adopt this data-modelling approach in ice sheet modelling. Given their longer timescales (100-1000s years) we will use geological and geomorphological records of former ice sheets to provide the evidence; the rapidly growing field of palaeoglaciology.\n\nFocussing on the most numerous and spatially-extensive records of palaeo ice sheet activity - glacial landforms - the project aims to revolutionise understanding of past, present and future ice sheets. Our mapping campaign (Work-Package 1), including by machine learning techniques (WP2), should vastly increase the evidence-base. Resolution of how subglacial landforms are generated and how hydrological networks develop (WP3) would be major breakthroughs leading to possible inversions to information on ice thickness or velocity, and with key implications for ice flow models and hydrological effects on ice dynamics. By pioneering techniques and coding for combining ice sheet models with landform data (WP4) we will improve knowledge of the role of palaeo-ice sheets in Earth system change. Trialling of numerical models in these data-rich environments will highlight deficiencies in process-formulations, leading to better models. Applying our coding to combine landforms and geochronology to optimise modelling (WP4) of the retreat of the Greenland and Antarctic ice sheets since the last glacial will provide spin up glaciological conditions for models that forecast sea level rise.",
"rcn": 216167,
"startDate": "2018-10-01",
"status": "SIGNED",
"subCall": "ERC-2017-ADG",
"title": "Palaeoglaciological advances to understand Earths ice sheets by landform analysis",
"topics": "ERC-2017-ADG",
"totalCost": 2425298.75
},{
"acronym": "Konetik eLCV",
"contentUpdateDate": "2022-08-10 09:21:56",
"ecMaxContribution": 50000,
"ecSignatureDate": "2018-11-29",
"endDate": "2019-01-31",
"frameworkProgramme": "H2020",
"fundingScheme": "SME-1",
"grantDoi": "10.3030/837614",
"id": 837614,
"legalBasis": "H2020-EU.2.3.",
"masterCall": "H2020-EIC-SMEInst-2018-2020",
"nature": "",
"objective": "Light Commercial vehicle fleets are important for the EV adoption A LCV is a business tool, so the utilisation rate and ensuring business continuity are key. Integrating and managing electric LCV is challenging due to the limited driving range and charging infrastructure.\n\nIn this project, our aim is to make a feasibility study of developing the first AI based charging assistant for Light Commercial Vehicle fleets. As part of the project aim is to research into the technical feasibility of analyzing vehicle charging data from the electric LCVs and combine that with consumption data from public, home and office chargers to ensure business continuity of eLCV fleets and save money on charging and reducing idle time.\n\nAccording to the IEA, EV/HEVs stock is projected to reach 200 Million units by 2030. The total EV/HEV market is expected to grow up 233EUR bn by 2021 growing at a 40.65%\n\nThe project will allow us to facilitate the market spread of eLCVs with the first machine learning based smart charging assistant tool based on our unique algorithm that combines advanced energy management and telematics. This will imply to disrupt into the European and international market by saving significant money on eLCV charging and reducing downtimes for our client while generating 5,1 M€ profit until 2022 and a generation of 42 new direct jobs on the company level for Konetik.\n\nKonetik is a telematics company focusing on products helping the widespread of electric vehicles. Konetik serves 300+ companies 3 energy utilities already engaged (NKM, ENGIE, EnBW) regarding a pilot program. Selected as one of the top 100 Berlin based startups",
"rcn": 219747,
"startDate": "2018-11-01",
"status": "CLOSED",
"subCall": "H2020-SMEInst-2018-2020-1",
"title": "Artificial Intelligence based Smart Charging Assistant for Electric Light Commercial Vehicle Fleets",
"topics": "EIC-SMEInst-2018-2020",
"totalCost": 71429
},{
"acronym": "INSENSION",
"contentUpdateDate": "2022-09-04 01:10:17",
"ecMaxContribution": 2255875,
"ecSignatureDate": "2017-11-07",
"endDate": "2021-10-31",
"frameworkProgramme": "H2020",
"fundingScheme": "RIA",
"grantDoi": "10.3030/780819",
"id": 780819,
"legalBasis": "H2020-EU.2.1.1.",
"masterCall": "H2020-ICT-2016-2017",
"nature": "",
"objective": "The INSENSION project will create an ICT platform that enables persons with profound and multiple learning disabilities (PMLD) to use digital applications and services that can enhance the quality of their lives, increase their ability to self-determination and enrich their lives. The target end users of the proposed solution are capable of using only nonconventional, nonsymbolic means of interaction with their environment. Therefore, the platform aims to provide technological means for seamless, and adaptable recognition of a range of highly individual nonsymbolic behavioral signals of people with PMLD to detect behavioral patterns happening in the context of specific situations. These patterns are translated into the affective intents of the end user (their approval or disapproval to the given situation) and allow to communicate them to assistive services. This way an individual with PMLD gains a possibility to seamlessly influence their living environment, through new means of communication with other people, changing conditions of their environment or use new types of assistive digital applications. The project employs recent advances in a range of ICT disciplines equipping the proposed assistive ICT platform with natural behavior recognition mechanisms based on gesture, facial expression and vocalization recognition technologies. This is complemented by novel techniques of artificial intelligence and state-of-the-art Internet of Things models. The research and development of the project is conducted within the inclusive design paradigm, with individual with PMLD and their caregivers directly participating in the R+D process throughout the whole duration of the project. This process links a highly interdisciplinary team of experts of ICT specialists and researchers and practitioners of disability studies and care, with due participation of an assistive technology industry representatives.",
"rcn": 213171,
"startDate": "2018-01-01",
"status": "SIGNED",
"subCall": "H2020-ICT-2017-1",
"title": "Personalized intelligent platform enabling interaction with digital services to individuals with profound and multiple learning disabilities",
"topics": "ICT-23-2017",
"totalCost": 2255875
},{
"acronym": "MANET",
"contentUpdateDate": "2022-06-13 17:36:10",
"ecMaxContribution": 171473.28,
"ecSignatureDate": "2021-04-30",
"endDate": "2024-06-30",
"frameworkProgramme": "H2020",
"fundingScheme": "MSCA-IF-EF-ST",
"grantDoi": "10.3030/101033173",
"id": 101033173,
"legalBasis": "H2020-EU.1.3.",
"masterCall": "H2020-MSCA-IF-2020",
"nature": "",
"objective": "Curbing greenhouse gas emissions is a challenge of the utmost importance for our society future and requires urgent decisions on the implementation of clear-cut climate economic policies. Integrated Assessment Models (IAMs) allow to explore alternative energy scenarios in the next 30-70 years. They are key to support the design of climate policies as they highlight the nexus between climate modelling, social science, and energy systems. However, the use of IAMs to inform climate policies does not come free of controversial aspects. Primarily, the inherent uncertainty of IAMs long-term outputs has created several difficulties for the integration of the modelling insights in the policy design. Modelling outputs diverge across IAMs models quite dramatically when they are asked for example to quantify the uptake of key technologies for the decarbonisation, such as renewables and carbon capture and storage. Uncertainty in IAMs descends from lack of knowledge of the future and from IAMs incomplete representations of the future. Uncertainty cannot be removed, but reduced, understood, and conveyed appropriately to policy makers to avoid that different projections cause delayed actions. \nThis project aims to fill this gap providing a methodology which defines the sources of uncertainty, either due to IAMs inputs or IAMs structure, and quantify their relative importance. The methodology will be embodied in an emulator of IAMs, MANET (the eMulAtor of iNtegratAd assEssmenT models) formulated using machine learning techniques to reproduce IAMs outputs. The project will provide a proof of concept of MANET focusing on the uptake of key decarbonisation technologies. The emulator will provide a simplified version of the IAM outputs as a response surface of the model to any variation of the inputs. MANET will be a flexible tool for policy makers and scientists for a direct comparison of IAMs with no limitation of the solution domain.",
"rcn": 235834,
"startDate": "2022-07-01",
"status": "SIGNED",
"subCall": "H2020-MSCA-IF-2020",
"title": "Climate economic policies: assessing values and costs of uncertainty in energy scenarios",
"topics": "MSCA-IF-2020",
"totalCost": 171473.28
},{
"acronym": "PRINTOUT",
"contentUpdateDate": "2022-11-12 14:18:08",
"ecMaxContribution": 183473.28,
"ecSignatureDate": "2020-04-21",
"endDate": "2022-06-14",
"frameworkProgramme": "H2020",
"fundingScheme": "MSCA-IF-EF-ST",
"grantDoi": "10.3030/892757",
"id": 892757,
"legalBasis": "H2020-EU.1.3.",
"masterCall": "H2020-MSCA-IF-2019",
"nature": "",
"objective": "With the extensive range of document generation devices nowadays, the establishment of computational techniques to find manipulation, detect illegal copies and link documents to their source are useful because (i) finding manipulation can help to detect fake news and manipulated documents; (ii) exposing illegal copies can avoid frauds and copyright violation; and (iii) indicating the owner of an illegal document can provide strong arguments to the prosecution of a suspect. Different machine learning techniques have been proposed in the scientific literature to act in these problems, but many of them are limited as: (i) there is a lack of methodology, which may require different experts to solve different problems; (ii) the limited range of known elements being considered for multi-class classification problems such as source attribution, which do not consider unknown classes in a real-world testing; and (iii) they dont consider adversarial attacks from an experienced forger. In this research project, we propose to address these problems on two fronts: resilient characterization and classification. In the characterization front, we intend to use multi-analysis approaches. Proposed by the candidate in his Ph.D. research, it is a methodology to fuse/ensemble machine learning approaches by considering several investigative scenarios, creating robust classifiers that minimize the risk of attacks. Additionally, we aim at proposing the use of open-set classifiers, which are trained to avoid misclassification of classes not included in the classifier training. We envision solutions to several printed document forensics applications with this setup: source attribution, forgery of documents and illegal copies detection. All the approaches we aim at creating in this project will be done in partnership with a document authentication company, which will provide real-world datasets and new applications.",
"rcn": 229161,
"startDate": "2020-06-15",
"status": "CLOSED",
"subCall": "H2020-MSCA-IF-2019",
"title": "Printed Documents Authentication",
"topics": "MSCA-IF-2019",
"totalCost": 183473.28
},{
"acronym": "SKIDLESS",
"contentUpdateDate": "2022-08-16 00:57:32",
"ecMaxContribution": 50000,
"ecSignatureDate": "2019-01-21",
"endDate": "2019-07-31",
"frameworkProgramme": "H2020",
"fundingScheme": "SME-1",
"grantDoi": "10.3030/855496",
"id": 855496,
"legalBasis": "H2020-EU.2.3.",
"masterCall": "H2020-EIC-SMEInst-2018-2020",
"nature": "",
"objective": "When we drive, our safety is protected by a set of technologies that silently watch over the cars behaviour, intervening to\nminimise the risk of accidents. The Electronic Stability Control (ESC) is by far the most impactful safety technology in cars,\nhaving reduced by around 40% the number of fatal accidents caused by the vehicles loss of control. Although effective, any\nESC on the market suffer from one significant flaw: it cannot directly measure the sideslip angle, which is the key indicator of\nskidding, namely the situation when the car deviates from the drivers intended direction. The result is that present ESC can\ndetect up to 80% of skidding events, thus still leaving room for improvements that can save lives. To address this issue and\ncatch a huge market opportunity, Modelway has developed a machine learning technology able to accurately estimate the\nvehicles sideslip angle in real time. And without adding any new sensor to the car. The key to obtain this result is the\nproprietary and patented Direct Virtual Sensor technology, which can be embedded in standard ESC units to further improve\nthe vehicles capacity to detect a skidding event. The DVS technology has been prototyped and extensive tests have been\ncarried out with car manufacturers and their Tier-1 suppliers, showing that the performances are already in line with the\nexpectations of a highly regulated industry as automotive. Now the development roadmap focuses on understanding the\nfeasibility of the integration of the DVS technology in commercial ESC units (Phase 1), to enable a co-development effort\nwith global ESC manufacturers (e.g. Bosch, Magneti Marelli) leading to a pre-commercial validation test-bed (Phase 2). In\nterms of business potential, with around 100 million cars sold each year globally and around 50 in Europe and the US where\nthe use of ESC is mandatory since 2014, we target more than 4 million DSV installed in cars by 2025, leading to more than\n28 M€ of revenues.",
"rcn": 220470,
"startDate": "2019-02-01",
"status": "CLOSED",
"subCall": "H2020-SMEInst-2018-2020-1",
"title": "Enhancing car safety through accurate and real time side-slip angle assessment",
"topics": "EIC-SMEInst-2018-2020",
"totalCost": 71429
},{
"acronym": "Z-Fact0r",
"contentUpdateDate": "2022-08-18 09:44:24",
"ecMaxContribution": 4206252.88,
"ecSignatureDate": "2016-08-09",
"endDate": "2020-03-31",
"frameworkProgramme": "H2020",
"fundingScheme": "IA",
"grantDoi": "10.3030/723906",
"id": 723906,
"legalBasis": "H2020-EU.2.1.5.",
"masterCall": "H2020-IND-CE-2016-17",
"nature": "",
"objective": "Manufacturing represents approximately 21 % of the EUs GDP and 20 % of its employment, providing more than 30 million jobs in 230 000 enterprises, mostly SMEs. Moreover, each job in industry is considered to be linked to two more in related services. European manufacturing is also a dominant element in international trade, leading the world in areas such as automotive, machinery and agricultural engineering. Already threatened by both the lower-wage economies and other high-tech rivals, the situation of EU companies was even made more difficult by the downturn.\nThe Z-Fact0r consortium has conducted an extensive state-of-the-art research (see section 1.4) and realised that although a number of activities (see section 1.3) have been trying to address the need for zero-defect manufacturing, still there is a vast business opportunity for innovative, high-ROI (Return on Investment) solutions to ensure, better quality and higher productivity in the European manufacturing industries.\nThe Z-Fact0r solution comprises the introduction of five (5) multi-stage production-based strategies targeting (i) the early detection of the defect (Z-DETECT), (ii) the prediction of the defect generation (Z-PREDICT), (iii) the prevention of defect generation by recalibrating the production line (multi-stage), as well as defect propagation in later stages of the production (Z-PREVENT), (iv) the reworking/remanufacturing of the product, if this is possible, using additive and subtractive manufacturing techniques (Z-REPAIR) and (v) the management of the aforementioned strategies through event modelling, KPI (key performance indicators) monitoring and real-time decision support (Z-MANAGE).\nTo do that we have brought together a total of thirteen (13) EU-based partners, representing both industry and academia, having ample experience in cutting-edge technologies and active presence in the EU manufacturing.",
"rcn": 205465,
"startDate": "2016-10-01",
"status": "CLOSED",
"subCall": "H2020-FOF-2016",
"title": "Zero-defect manufacturing strategies towards on-line production management for European factories",
"topics": "FOF-03-2016",
"totalCost": 6063018.75
}]