diff --git a/dnet-ariadneplus-graphdb-publisher/src/main/java/eu/dnetlib/ariadneplus/graphdb/GraphDBClient.java b/dnet-ariadneplus-graphdb-publisher/src/main/java/eu/dnetlib/ariadneplus/graphdb/GraphDBClient.java index 67dc991..3b876f9 100644 --- a/dnet-ariadneplus-graphdb-publisher/src/main/java/eu/dnetlib/ariadneplus/graphdb/GraphDBClient.java +++ b/dnet-ariadneplus-graphdb-publisher/src/main/java/eu/dnetlib/ariadneplus/graphdb/GraphDBClient.java @@ -348,7 +348,7 @@ public class GraphDBClient { String recordsIndexReport = ""; String collectionIndexReport = ""; try { - log.info("Start indexing from "+ datasource + " " + collectionId); + log.debug("Start indexing from "+ datasource + " " + collectionId); runSPQRLQuery.setupConnection( getWriterUser(), getWriterPwd(), this.graphDBServerUrl, getRepository()); runSPQRLQuery.setParser(parseRDFJSON); runSPQRLQuery.setResourceManager(resourceManager); @@ -409,7 +409,7 @@ public class GraphDBClient { return "empty identifier"; } List identifiers = Arrays.asList(identifier); - log.info("Start indexing from "+ datasource + " " + collectionId); + log.info("Indexing "+ datasource + " " + collectionId + " " + identifier); runSPQRLQuery.setupConnection( getWriterUser(), getWriterPwd(), this.graphDBServerUrl, getRepository()); runSPQRLQuery.setParser(parseRDFJSON); runSPQRLQuery.setResourceManager(resourceManager); diff --git a/dnet-ariadneplus-graphdb-publisher/src/main/java/eu/dnetlib/ariadneplus/reader/RunSPARQLQueryService.java b/dnet-ariadneplus-graphdb-publisher/src/main/java/eu/dnetlib/ariadneplus/reader/RunSPARQLQueryService.java index 6ddd598..50241ca 100644 --- a/dnet-ariadneplus-graphdb-publisher/src/main/java/eu/dnetlib/ariadneplus/reader/RunSPARQLQueryService.java +++ b/dnet-ariadneplus-graphdb-publisher/src/main/java/eu/dnetlib/ariadneplus/reader/RunSPARQLQueryService.java @@ -77,7 +77,7 @@ public class RunSPARQLQueryService { recordIds.forEach(recordId -> { int waitAmount=1; int retryResult = 0; - int operationResult = executeQueryGraph(selectQueryTemplate, recordId, isCollection); + int operationResult = executeQueryGraph(selectQueryTemplate, datasource, collectionId, recordId, isCollection); if (operationResult!=200) { log.error(recordId + " error_code: "+ operationResult); if (operationResult==-5) { @@ -90,7 +90,7 @@ public class RunSPARQLQueryService { } catch (InterruptedException ie) { log.error(ie); } - retryResult = executeQueryGraph(selectQueryTemplate, recordId, isCollection); + retryResult = executeQueryGraph(selectQueryTemplate, datasource, collectionId, recordId, isCollection); log.debug("retryResult: " + retryResult); } while (retryResult!=200); operationResult = retryResult; @@ -125,11 +125,11 @@ public class RunSPARQLQueryService { String report = "Total indexed records: "+ successCodesCount.get(0).intValue() + " , " + "Total errors: "+ errorCodesCount.get(0).intValue(); - log.info(report); + log.debug(report); return report; } - private int executeQueryGraph(String selectQueryTemplate, String recordId, boolean isCollection){ + private int executeQueryGraph(String selectQueryTemplate, String datasource, String collectionId, String recordId, boolean isCollection){ // decrease queries execution rate to avoid heap overload on graphdb try { Thread.sleep(50); @@ -149,6 +149,7 @@ public class RunSPARQLQueryService { long end = System.currentTimeMillis(); int triples = resultsModel.size(); if (resultsModel.size()==0) { + log.error("No data found on graphdb for "+datasource+" "+collectionId+" "+recordId); return -2; } recordWriter = new StringWriter(); @@ -156,23 +157,27 @@ public class RunSPARQLQueryService { Rio.write(resultsModel, rdfRecordWriter); parser.setCollection(isCollection); String bufferedRecord = recordWriter.toString(); -// log.debug(bufferedRecord); int size = parser.parse(bufferedRecord); log.debug(recordId+" is_coll: "+isCollection+" query_time(sec): "+(end-start)/1000 +" triples: "+triples +" json: "+size); if (size==-1) { + log.error("RDF parsing failed for "+datasource+" "+collectionId+" "+recordId); return -4; } resourceManager.manage(parser); - return bulkUpload.index(resourceManager, isCollection); + int ret = bulkUpload.index(resourceManager, isCollection); + if (ret!=200) { + log.error("Indexing error for "+datasource+" "+collectionId+" "+recordId); + } + return ret; } catch (HTTPQueryEvaluationException qe) { - log.error(qe); + log.error("HTTPQueryEvaluationException for "+datasource+" "+collectionId+" "+recordId, qe); if (qe.getMessage()!=null && qe.getMessage().contains("Insufficient free Heap Memory")) { return -5; } return -6; } catch(Exception e){ - log.error(e); + log.error("Generic error for "+datasource+" "+collectionId+" "+recordId, e); return -1; } finally{ closeConnection();