diff --git a/dnet-ariadneplus-graphdb-publisher/src/main/java/eu/dnetlib/ariadneplus/reader/RunSPARQLQueryService.java b/dnet-ariadneplus-graphdb-publisher/src/main/java/eu/dnetlib/ariadneplus/reader/RunSPARQLQueryService.java index b29be6e..96222b4 100644 --- a/dnet-ariadneplus-graphdb-publisher/src/main/java/eu/dnetlib/ariadneplus/reader/RunSPARQLQueryService.java +++ b/dnet-ariadneplus-graphdb-publisher/src/main/java/eu/dnetlib/ariadneplus/reader/RunSPARQLQueryService.java @@ -7,6 +7,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.eclipse.rdf4j.model.Model; import org.eclipse.rdf4j.query.*; +import org.eclipse.rdf4j.repository.http.HTTPQueryEvaluationException; import org.eclipse.rdf4j.repository.Repository; import org.eclipse.rdf4j.repository.RepositoryConnection; import org.eclipse.rdf4j.repository.manager.RemoteRepositoryManager; @@ -70,17 +71,35 @@ public class RunSPARQLQueryService { final List successCodesCount = Arrays.asList(new Integer(0)); final List counter = Arrays.asList(new Integer(0)); recordIds.forEach(recordId -> { - log.debug(recordId+" >"); + int waitAmount=1; int operationResult = executeQueryGraph(selectQueryTemplate, recordId, isCollection); - log.debug(" "+operationResult); if (operationResult!=200) { log.error(recordId + " error_code: "+ operationResult); - if (isCollection) { - throw new RuntimeException("Collection indexing problem - errcode: "+operationResult); + if (operationResult==-5) { + do { + // let's wait if heap space decreases + try { + log.warn("Waiting more free space on heap for " + waitAmount + " seconds ..."); + Thread.sleep(waitAmount * 1000); + waitAmount = waitAmount*2; + } catch (InterruptedException ie) { + log.error(ie); + } + int retryResult = 0; + retryResult = executeQueryGraph(selectQueryTemplate, recordId, isCollection); + log.debug("retryResult: " + retryResult); + } while (retryResult!=200); + } + if (operationResult!=200) { + int currentErrorsCount = errorCodesCount.get(0).intValue(); + currentErrorsCount += 1; + errorCodesCount.set(0, new Integer(currentErrorsCount)); + } + else { + int currentSuccessCount = successCodesCount.get(0).intValue(); + currentSuccessCount+=1; + successCodesCount.set(0, new Integer(currentSuccessCount)); } - int currentErrorsCount = errorCodesCount.get(0).intValue(); - currentErrorsCount+=1; - errorCodesCount.set(0, new Integer(currentErrorsCount)); } else { int currentSuccessCount = successCodesCount.get(0).intValue(); @@ -140,7 +159,10 @@ public class RunSPARQLQueryService { } resourceManager.manage(parser); return bulkUpload.index(resourceManager, isCollection); - } catch(Exception e){ + } catch (HTTPQueryEvaluationException qe) { + log.error(qe); + return -5; + } catch(Exception e){ log.error(e); return -1; } finally{