- Improve the "shutdownController.sh" script.

- Set names for the Prometheus and Grafana containers.
- Code polishing.
This commit is contained in:
Lampros Smyrnaios 2023-07-27 18:27:48 +03:00
parent 0699acc999
commit 860c73ea91
5 changed files with 19 additions and 14 deletions

View File

@ -4,6 +4,7 @@ services:
prometheus:
image: 'prom/prometheus:latest'
container_name: prometheus
ports:
- '9090:9090'
command: '--config.file=/etc/prometheus/config.yml'
@ -12,6 +13,7 @@ services:
grafana:
image: 'grafana/grafana:latest'
container_name: grafana
ports:
- '3000:3000'
depends_on:

View File

@ -1,7 +1,10 @@
echo "Running compose down.."
sudo docker compose -f ./prometheus/docker-compose-prometheus.yml down
sudo docker compose -f docker-compose.yml -f ./prometheus/docker-compose-prometheus.yml down
# This script shuts down (ONLY!) the Controller, by stopping and killing the related containers.
# It is used during testing.
# It does not shuts down the whole service! The workers will keep running and their work will be lost.
echo "Running compose down.."
sudo docker compose -f docker-compose.yml down
sudo docker compose -f ./prometheus/docker-compose-prometheus.yml down
# In case we need to hard-remove the containers, use the following commands:
#sudo docker stop $(sudo docker ps -aqf "name=^(?:urlscontroller-urls_controller|prometheus-(?:prometheus|grafana))-1$") || true # There may be no active containers

View File

@ -4,8 +4,8 @@ import org.springframework.http.ResponseEntity;
public interface StatsService {
ResponseEntity<?> getNumberOfPayloads(String getPayloadsNumberQuery, String extraMsg, int retryNum);
ResponseEntity<?> getNumberOfPayloads(String getPayloadsNumberQuery, String extraMsg, int retryCount);
ResponseEntity<?> getNumberOfRecordsInspectedByServiceThroughCrawling(int retryNum);
ResponseEntity<?> getNumberOfRecordsInspectedByServiceThroughCrawling(int retryCount);
}

View File

@ -24,10 +24,10 @@ public class StatsServiceImpl implements StatsService {
// Thus, we need to have an "error-detection-and-retry" mechanism, in order to avoid returning error that we know will exist in certain times and we can overcome them.
// The final time-to-return of the results-retrieval methods may be somewhat large, but the alternative of returning predictable errors or locking the DB and slowing down the aggregation system are even worse.
public ResponseEntity<?> getNumberOfPayloads(String getNumberQuery, String message, int retryNum)
public ResponseEntity<?> getNumberOfPayloads(String getNumberQuery, String message, int retryCount)
{
if ( retryNum > 10 ) {
String errorMsg = "Could not find the requested payload-type table in an non-merging state, after " + retryNum + " retries!";
if ( retryCount > 10 ) {
String errorMsg = "Could not find the requested payload-type table in an non-merging state, after " + retryCount + " retries!";
logger.error(errorMsg);
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(errorMsg);
}
@ -46,7 +46,7 @@ public class StatsServiceImpl implements StatsService {
String exMsg = e.getMessage();
if ( (exMsg != null) && (exMsg.contains("Could not resolve table reference") || exMsg.contains("Failed to open HDFS file")) ) {
sleep1min();
return getNumberOfPayloads(getNumberQuery, message, (++retryNum));
return getNumberOfPayloads(getNumberQuery, message, (++retryCount));
}
String errorMsg = "Problem when executing \"getNumberQuery\": " + getNumberQuery;
logger.error(errorMsg, e);
@ -56,10 +56,10 @@ public class StatsServiceImpl implements StatsService {
}
public ResponseEntity<?> getNumberOfRecordsInspectedByServiceThroughCrawling(int retryNum)
public ResponseEntity<?> getNumberOfRecordsInspectedByServiceThroughCrawling(int retryCount)
{
if ( retryNum > 10 ) {
String errorMsg = "Could not find the requested attempt table in an non-merging state, after " + retryNum + " retries!";
if ( retryCount > 10 ) {
String errorMsg = "Could not find the requested attempt table in an non-merging state, after " + retryCount + " retries!";
logger.error(errorMsg);
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(errorMsg);
}
@ -84,7 +84,7 @@ public class StatsServiceImpl implements StatsService {
String exMsg = e.getMessage();
if ( (exMsg != null) && (exMsg.contains("Could not resolve table reference") || exMsg.contains("Failed to open HDFS file")) ) {
sleep1min();
return getNumberOfRecordsInspectedByServiceThroughCrawling(++retryNum);
return getNumberOfRecordsInspectedByServiceThroughCrawling(++retryCount);
}
String errorMsg = "Problem when executing \"getInspectedRecordsNumberQuery\": " + getInspectedRecordsNumberQuery;
logger.error(errorMsg, e);

View File

@ -170,7 +170,7 @@ public class UrlsServiceImpl implements UrlsService {
assignment.setAssignmentsBatchCounter(curAssignmentsBatchCounter);
assignment.setTimestamp(timestamp);
Datasource datasource = new Datasource();
try { // For each of the 4 columns returned. The indexing starts from 1
try { // For each of the 4 columns returned, do the following. The column-indexing starts from 1
assignment.setId(rs.getString(1));
assignment.setOriginalUrl(rs.getString(2));
datasource.setId(rs.getString(3));