- Increase the waiting-time before checking the docker containers' status, in order to catch configuration-crashes.

- Code polishing.
This commit is contained in:
Lampros Smyrnaios 2023-04-10 22:28:53 +03:00
parent c39fef2654
commit 4dc34429f8
4 changed files with 8 additions and 8 deletions

View File

@ -52,9 +52,9 @@ if [[ justInstall -eq 0 ]]; then
sudo docker --version || handle_error "Docker was not found!" 3
(sudo mkdir -p "$HOME"/tmp/config && sudo cp ./src/main/resources/application.yml "$HOME"/tmp/config) || true # This also replaces an existing "application.yml".
sudo mkdir -p "$HOME"/logs || true
(sudo docker compose up --build -d && echo -e "The Urls_Controller, Prometheus and Grafana docker-containers started running.\nWaiting 30 seconds before getting their status.") || handle_error "Could not list docker containers!" 4
(sudo docker compose up --build -d && echo -e "The Urls_Controller, Prometheus and Grafana docker-containers started running.\nWaiting 65 seconds before getting their status.") || handle_error "Could not list docker containers!" 4
# Run in "detached mode" -d (in the background).
sleep 30
sleep 65
sudo docker ps -a || handle_error "Could not get the status of docker-containers!" 5
# Using -a to get the status of failed containers as well.
fi

View File

@ -39,8 +39,8 @@ public class ImpalaConnector {
@PostConstruct
public void init() {
int mb = 1048576;
logger.info("Max available memory to the Controller: " + (Runtime.getRuntime().maxMemory() / mb) + " Mb.");
int OneMb = 1048576;
logger.info("Max available memory to the Controller: " + (Runtime.getRuntime().maxMemory() / OneMb) + " Mb.");
try {
boolean supportsBatchUpdates = jdbcTemplate.getDataSource().getConnection().getMetaData().supportsBatchUpdates();
logger.info("The database " + (supportsBatchUpdates ? "supports" : "does not support") + " \"BatchUpdates\"!");

View File

@ -211,7 +211,6 @@ public class UrlsServiceImpl implements UrlsService {
public ResponseEntity<?> addWorkerReport(String curWorkerId, long curReportAssignments, List<UrlReport> urlReports, int sizeOfUrlReports, HttpServletRequest request)
{
// Before continuing with inserts, take and upload the fullTexts from the Worker. Also, update the file-"location".
FileUtils.UploadFullTextsResponse uploadFullTextsResponse = fileUtils.getAndUploadFullTexts(urlReports, request, curReportAssignments, curWorkerId);
if ( uploadFullTextsResponse == FileUtils.UploadFullTextsResponse.databaseError ) {
@ -246,7 +245,7 @@ public class UrlsServiceImpl implements UrlsService {
boolean hasAttemptParquetFileProblem = false;
boolean hasPayloadParquetFileProblem = false;
try { // Invoke all the tasks and wait for them to finish before moving to the next batch.
try { // Invoke all the tasks and wait for them to finish.
List<Future<ParquetReport>> futures = insertsExecutor.invokeAll(callableTasks);
SumParquetSuccess sumParquetSuccess = parquetFileUtils.checkParquetFilesSuccess(futures);

View File

@ -112,12 +112,13 @@ public class ParquetFileUtils {
else
this.parquetBaseLocalDirectoryPath = parquetBaseDirectoryPath;
// Create the local parquet file base directory, if it does not exist.
java.nio.file.Path parquetDirPath = Paths.get(this.parquetBaseLocalDirectoryPath);
if ( !Files.isDirectory(parquetDirPath) )
Files.createDirectories(parquetDirPath);
// Create the remote directories for uploading the parquet-files, if those directories do not exist.
// The limited-permissions user in use, does not have permission to acces other users' created directories, so we have to make sure it creates its own.
// The limited-permissions user in use, does not have permission to access other users' created directories, so we have to make sure it creates its own.
if ( !hdfsParquetBaseDir.endsWith("/") )
hdfsParquetBaseDir += "/";
@ -215,7 +216,7 @@ public class ParquetFileUtils {
try {
record = new GenericData.Record(attemptsSchema);
record.put("id",payload.getId());
record.put("id", payload.getId());
record.put("original_url", payload.getOriginal_url());
Timestamp timestamp = payload.getTimestamp_acquired();
record.put("date", (timestamp != null) ? timestamp.getTime() : System.currentTimeMillis());