forked from lsmyrnaios/UrlsController
- Make sure the temp table "current_assignment" from a cancelled previous execution, is dropped and purged on startup.
- Improve logging. - Code cleanup.
This commit is contained in:
parent
83f40a23d9
commit
ab99bc6168
|
@ -139,6 +139,8 @@ public final class ImpalaConnector {
|
|||
statement.execute("CREATE TABLE IF NOT EXISTS " + databaseName + ".assignment (id string, original_url string, workerid string, `date` timestamp) stored as parquet");
|
||||
statement.execute("COMPUTE STATS " + databaseName + ".assignment");
|
||||
|
||||
statement.execute("DROP TABLE IF EXISTS " + ImpalaConnector.databaseName + ".current_assignment PURGE");
|
||||
|
||||
statement.execute("CREATE TABLE IF NOT EXISTS " + databaseName + ".attempt (id string, original_url string, `date` timestamp, status string, error_class string, error_message string) stored as parquet");
|
||||
statement.execute("COMPUTE STATS " + databaseName + ".attempt");
|
||||
|
||||
|
|
|
@ -82,7 +82,6 @@ public class UrlController {
|
|||
List<Assignment> assignments = new ArrayList<>(assignmentsLimit);
|
||||
|
||||
ImpalaConnector.databaseLock.lock();
|
||||
|
||||
Connection con = ImpalaConnector.getInstance().getConnection();
|
||||
if ( con == null ) { // This is already logged in "getConnection()".
|
||||
ImpalaConnector.databaseLock.unlock();
|
||||
|
@ -358,12 +357,12 @@ public class UrlController {
|
|||
preparedInsertPayloadStatement.setString(5, payload.getMime_type());
|
||||
|
||||
// The column "size" in the table is of type "String" so we cast the Long to String. The Parquet-format in the database does not work well with integers.
|
||||
String stringSize = null;
|
||||
String sizeStr = null;
|
||||
Long size = payload.getSize();
|
||||
if ( size != null )
|
||||
stringSize = String.valueOf(size);
|
||||
sizeStr = String.valueOf(size);
|
||||
|
||||
preparedInsertPayloadStatement.setString(6, stringSize);
|
||||
preparedInsertPayloadStatement.setString(6, sizeStr);
|
||||
preparedInsertPayloadStatement.setString(7, payload.getHash());
|
||||
preparedInsertPayloadStatement.setString(8, payload.getLocation());
|
||||
preparedInsertPayloadStatement.setString(9, payload.getProvenance());
|
||||
|
@ -383,7 +382,7 @@ public class UrlController {
|
|||
preparedInsertAttemptStatement.setString(2, payload.getOriginal_url());
|
||||
preparedInsertAttemptStatement.setTimestamp(3, payload.getTimestamp_acquired());
|
||||
preparedInsertAttemptStatement.setString(4, urlReport.getStatus().toString());
|
||||
preparedInsertAttemptStatement.setString(5, String.valueOf(error.getType())); // This covers the case of "null".
|
||||
preparedInsertAttemptStatement.setString(5, String.valueOf(error.getType())); // This covers the case of "null" too.
|
||||
preparedInsertAttemptStatement.setString(6, error.getMessage());
|
||||
preparedInsertAttemptStatement.executeUpdate();
|
||||
} catch (SQLException sqle) {
|
||||
|
@ -403,6 +402,9 @@ public class UrlController {
|
|||
closeStatements(preparedInsertPayloadStatement, preparedInsertAttemptStatement, null); // Do not close the connection here, as we might move forward.
|
||||
}
|
||||
|
||||
if ( payloadErrorMsg != null )
|
||||
logger.debug("Finished inserting the payloads and the attempts into the \"payload\" and \"attempt\" tables, although " + payloadErrorMsg + " Going to merge the parquet files for those tables.");
|
||||
else
|
||||
logger.debug("Finished inserting the payloads and the attempts into the \"payload\" and \"attempt\" tables. Going to merge the parquet files for those tables.");
|
||||
|
||||
String mergeErrorMsg = FileUtils.mergeParquetFiles("payload", con, "", null);
|
||||
|
@ -442,6 +444,7 @@ public class UrlController {
|
|||
ImpalaConnector.closeConnection(con);
|
||||
}
|
||||
|
||||
logger.debug("Finished merging the database tables.");
|
||||
return ResponseEntity.status(HttpStatus.OK).body(payloadErrorMsg);
|
||||
}
|
||||
|
||||
|
|
|
@ -157,8 +157,9 @@ public class FileUtils {
|
|||
numFullTextUrlsFound ++;
|
||||
|
||||
Payload payload = urlReport.getPayload();
|
||||
if ( payload != null )
|
||||
{
|
||||
if ( payload == null )
|
||||
continue;
|
||||
|
||||
String fileLocation = null;
|
||||
|
||||
// Query the payload-table FOR EACH RECORD to get the fileLocation of A PREVIOUS RECORD WITH THE SAME FILE-HASH.
|
||||
|
@ -174,22 +175,21 @@ public class FileUtils {
|
|||
}
|
||||
|
||||
try ( ResultSet resultSet = getFileLocationForHashPreparedStatement.executeQuery() ) {
|
||||
if ( resultSet.next() ) { // Move the "cursor" to the first row. If there is any data..
|
||||
if ( resultSet.next() ) { // Move the "cursor" to the first row. If there is any data, then take the first result (there should not be more, but we still want the first anyway).
|
||||
fileLocation = resultSet.getString(1);
|
||||
if ( fileLocation != null ) { // If the full-text of this record is already-found and uploaded.
|
||||
payload.setLocation(fileLocation); // Set the location to the older identical file, which was uploaded to S3.
|
||||
payload.setLocation(fileLocation); // Set the location to the older identical file, which was uploaded to S3. The other file-data is identical.
|
||||
//logger.debug("The record with ID \"" + payload.getId() + "\" has an \"alreadyRetrieved\" file, with hash \"" + fileHash + "\" and location \"" + fileLocation + "\"."); // DEBUG!
|
||||
numFilesFoundFromPreviousAssignmentsBatches ++;
|
||||
continue;
|
||||
continue; // Do not request the file from the worker, it's already uploaded. Move on.
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.error("Error when executing or acquiring data from the the \"getFileLocationForHashQuery\"!\n" + e.getMessage());
|
||||
|
||||
// TODO - SHOULD WE RETURN A "UploadFullTextsResponse.databaseError" AND force the caller to not even insert the payloads to the database??
|
||||
// TODO - Since the database will have problems.. there is not point in trying to insert the payloads to Impala (handling it like we tried to insert and got an error).
|
||||
// TODO - In case we DO return, UNLOCK the database-lock and close the Prepared statement (it's not autoclosed here)and the Database connection.
|
||||
|
||||
// TODO - Since the database will have problems.. there is no point in trying to insert the payloads to Impala (we will handle it like: we tried to insert and got an error).
|
||||
// TODO - In case we DO return, UNLOCK the database-lock and close the Prepared statement (it's not auto-closed here)and the Database connection.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -205,8 +205,7 @@ public class FileUtils {
|
|||
continue;
|
||||
}
|
||||
allFileNamesWithIDsHashMap.put(fileNameWithExtension, payload.getId()); // The keys and the values are not duplicate. Task with ID-1 might have an "ID-1.pdf" file.
|
||||
// While a task with ID-2 can also have an "ID-1.pdf" file, as the pdf-url-2 might be the same with pdf-url-1.
|
||||
}
|
||||
// While a task with ID-2 can also have an "ID-1.pdf" file, as the pdf-url-2 might be the same with pdf-url-1, thus, the ID-2 file was not downloaded again.
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -299,7 +298,7 @@ public class FileUtils {
|
|||
if ( fileFullPath.equals(zipFileFullPath) ) // Exclude the zip-file from uploading.
|
||||
continue;
|
||||
|
||||
// Check if this stored file is related to one or more IDs from the Set.
|
||||
// Check if this stored file is related to one or more IDs from the Set. Defend against malicious file injection. It does not add more overhead, since we already need the "fileRelatedIDs".
|
||||
Set<String> fileRelatedIDs = allFileNamesWithIDsHashMap.get(fileName);
|
||||
if ( fileRelatedIDs.isEmpty() ) { // In case the "fileName" is not inside the "allFileNamesWithIDsHashMap" HashMultimap.
|
||||
logger.error("The stored file \"" + fileName + "\" is not related to any ID which had a file requested from the Worker!");
|
||||
|
@ -314,8 +313,8 @@ public class FileUtils {
|
|||
|
||||
String s3Url = S3ObjectStoreMinIO.uploadToS3(fileName, fileFullPath);
|
||||
if ( s3Url != null ) {
|
||||
setFullTextForMultipleIDs(payloadsHashMultimap, fileRelatedIDs, s3Url); // It checks weather (s3Url != null) and acts accordingly.
|
||||
numUploadedFiles++;
|
||||
setFullTextForMultipleIDs(fileRelatedIDs, payloadsHashMultimap, s3Url); // It checks weather (s3Url != null) and acts accordingly.
|
||||
numUploadedFiles ++;
|
||||
}
|
||||
// Else, the record will have its file-data set to "null", in the end of this method.
|
||||
}
|
||||
|
@ -557,11 +556,11 @@ public class FileUtils {
|
|||
|
||||
/**
|
||||
* Set the fileLocation for all those IDs related to the File. The IDs may have one or more payloads.
|
||||
* @param payloadsHashMultimap
|
||||
* @param fileIDs
|
||||
* @param payloadsHashMultimap
|
||||
* @param s3Url
|
||||
*/
|
||||
public static void setFullTextForMultipleIDs(HashMultimap<String, Payload> payloadsHashMultimap, Set<String> fileIDs, String s3Url)
|
||||
public static void setFullTextForMultipleIDs(Set<String> fileIDs, HashMultimap<String, Payload> payloadsHashMultimap, String s3Url)
|
||||
{
|
||||
for ( String id : fileIDs ) {
|
||||
Set<Payload> payloads = payloadsHashMultimap.get(id);
|
||||
|
|
Loading…
Reference in New Issue