forked from lsmyrnaios/UrlsController
Check and show warning/error message for failed payloads.
This commit is contained in:
parent
068b97dd60
commit
0f5d4dac78
|
@ -207,6 +207,7 @@ public class FileUtils {
|
|||
workerIp = workerInfo.getWorkerIP(); // This won't be null.
|
||||
|
||||
// Get the file-locations.
|
||||
final AtomicInteger numPayloadsToBeHandled = new AtomicInteger();
|
||||
final AtomicInteger numFullTextsFound = new AtomicInteger();
|
||||
final AtomicInteger numFilesFoundFromPreviousAssignmentsBatches = new AtomicInteger();
|
||||
|
||||
|
@ -228,6 +229,8 @@ public class FileUtils {
|
|||
if ( fileLocation == null )
|
||||
return null; // The full-text was not retrieved, go to the next UrlReport.
|
||||
|
||||
numPayloadsToBeHandled.incrementAndGet();
|
||||
|
||||
// Query the payload-table FOR EACH RECORD to get the fileLocation of A PREVIOUS RECORD WITH THE SAME FILE-HASH.
|
||||
// If no result is returned, then this record is not previously found, so go ahead and add it in the list of files to request from the worker.
|
||||
// If a file-location IS returned (for this hash), then this file is already uploaded to the S3. Update the record to point to that file-location and do not request that file from the Worker.
|
||||
|
@ -364,14 +367,28 @@ public class FileUtils {
|
|||
failedBatches ++;
|
||||
} // End of batches.
|
||||
|
||||
if ( failedBatches == numOfBatches )
|
||||
logger.error("None of the " + numOfBatches + " batches could be handled for assignments_" + assignmentsBatchCounter + ", for worker: " + workerId);
|
||||
|
||||
updateUrlReportsToHaveNoFullTextFiles(urlReports, true); // Make sure all records without an S3-Url have < null > file-data (some batches or uploads might have failed).
|
||||
deleteDirectory(new File(curAssignmentsBaseLocation));
|
||||
|
||||
if ( failedBatches == numOfBatches ) {
|
||||
logger.error("None of the " + numOfBatches + " batches could be handled for assignments_" + assignmentsBatchCounter + ", for worker: " + workerId);
|
||||
// Check and warn about the number of failed payloads.
|
||||
// Possible reasons: failed to check their hash in the DB, the file was not found inside the worker, whole batch failed to be delivered from the worker, files failed t be uploaded to S3
|
||||
long finalPayloadsCounter = urlReports.parallelStream().filter(urlReport -> {
|
||||
Payload payload = urlReport.getPayload();
|
||||
return ((payload != null) && (payload.getLocation() != null));
|
||||
}).count();
|
||||
int numInitialPayloads = numPayloadsToBeHandled.get();
|
||||
long numFailedPayloads = (numInitialPayloads - finalPayloadsCounter);
|
||||
if ( numFailedPayloads == numInitialPayloads ) {
|
||||
// This will also be the case if there was no DB failure, but all the batches have failed.
|
||||
logger.error("None of the " + numInitialPayloads + " payloads could be handled for assignments_" + assignmentsBatchCounter + ", for worker: " + workerId);
|
||||
return UploadFullTextsResponse.unsuccessful;
|
||||
} else
|
||||
return UploadFullTextsResponse.successful;
|
||||
} else if ( numFailedPayloads > 0 )
|
||||
logger.warn(numFailedPayloads + " payloads (out of " + numInitialPayloads + ") failed to be processed for assignments_" + assignmentsBatchCounter + ", for worker: " + workerId);
|
||||
|
||||
return UploadFullTextsResponse.successful;
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue