- Tighten the thread-safety protection on the "BulkImportReport.getJsonReport()" method.

- Update dependencies.
- Code polishing.
This commit is contained in:
Lampros Smyrnaios 2024-05-27 10:40:05 +03:00
parent b6ad2af48b
commit d7697ef3f8
4 changed files with 16 additions and 8 deletions

View File

@ -49,7 +49,7 @@ dependencies {
implementation group: 'org.apache.commons', name: 'commons-lang3', version: '3.14.0'
// https://mvnrepository.com/artifact/org.apache.commons/commons-compress
implementation("org.apache.commons:commons-compress:1.26.1") {
implementation("org.apache.commons:commons-compress:1.26.2") {
exclude group: 'com.github.luben', module: 'zstd-jni'
}
implementation 'com.github.luben:zstd-jni:1.5.6-3' // Even though this is part of the above dependency, the Apache commons rarely updates it, while the zstd team makes improvements very often.
@ -116,7 +116,7 @@ dependencies {
implementation 'org.json:json:20240303' // This is used only in "ParquetFileUtils.createRemoteParquetDirectories()". TODO - Replace it with "gson".
// https://mvnrepository.com/artifact/com.google.code.gson/gson
implementation 'com.google.code.gson:gson:2.10.1'
implementation 'com.google.code.gson:gson:2.11.0'
// https://mvnrepository.com/artifact/io.micrometer/micrometer-registry-prometheus
runtimeOnly 'io.micrometer:micrometer-registry-prometheus:1.13.0'

View File

@ -9,7 +9,10 @@ import com.google.gson.Gson;
import eu.openaire.urls_controller.util.GenericUtils;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
@JsonInclude(JsonInclude.Include.NON_NULL)
@ -34,6 +37,8 @@ public class BulkImportReport {
@JsonProperty
private Map<String, Collection<String>> eventsMap;
transient private final Lock reportLock = new ReentrantLock(true);
public BulkImportReport(String provenance, String reportLocation, String reportID) {
this.provenance = provenance;
@ -47,13 +52,16 @@ public class BulkImportReport {
}
/**
* Synchronize it to avoid concurrency issues when concurrent calls are made to the same bulkImport-Report object.
* Synchronize it with a lock, to avoid concurrency issues when concurrent calls are made to the same bulkImport-Report object.
* */
public synchronized String getJsonReport()
public String getJsonReport()
{
reportLock.lock();
//Convert the LinkedHashMultiMap<String, String> to Map<String, Collection<String>>, since Gson cannot serialize Multimaps.
eventsMap = eventsMultimap.asMap();
return gson.toJson(this, BulkImportReport.class);
eventsMap = new HashMap<>(eventsMultimap.asMap()); // Make sure we use a clone of the original data, in order to avoid any exception in the "gson.toJson()" method, when at the same time another thread modifies the "eventsMultimap".
String reportToReturn = gson.toJson(this, BulkImportReport.class);
reportLock.unlock();
return reportToReturn;
}
public String getProvenance() {

View File

@ -564,7 +564,7 @@ public class UrlsServiceImpl implements UrlsService {
Throwable cause = e.getCause();
String exMsg;
if ( (cause != null) && ((exMsg = cause.getMessage()) != null) && exMsg.contains("Connection refused") ) {
logger.error(errorMsg + " | The worker has probably crashed, since we received a \"Connection refused\"!");
logger.error(errorMsg + " | The worker has probably crashed, since we received a \"Connection refused\" message!");
workerInfo.setHasShutdown(true); // Avoid sending possible shutdown-Requests later on. Also show a specific message if this Worker requests new assignments in the future.
} else
logger.error(errorMsg, e);

View File

@ -462,7 +462,7 @@ public class FileUtils {
if ( shouldLockThreads ) // In case multiple threads write to the same file. for ex. during the bulk-import procedure.
fileAccessLock.lock();
// TODO - Make this method to be synchronized be specific file, not in general.
// TODO - Make this method to be synchronized for the specific file, not in general.
// TODO - NOW: Multiple bulkImport procedures (with diff DIRs), are blocked while writing to DIFFERENT files..
try ( BufferedWriter bufferedWriter = new BufferedWriter(Files.newBufferedWriter(Paths.get(fileFullPath)), halfMb) )