forked from lsmyrnaios/UrlsController
- Improve the "shutdownController.sh" script.
- Set names for the Prometheus and Grafana containers. - Code polishing.
This commit is contained in:
parent
0699acc999
commit
860c73ea91
|
@ -4,6 +4,7 @@ services:
|
||||||
|
|
||||||
prometheus:
|
prometheus:
|
||||||
image: 'prom/prometheus:latest'
|
image: 'prom/prometheus:latest'
|
||||||
|
container_name: prometheus
|
||||||
ports:
|
ports:
|
||||||
- '9090:9090'
|
- '9090:9090'
|
||||||
command: '--config.file=/etc/prometheus/config.yml'
|
command: '--config.file=/etc/prometheus/config.yml'
|
||||||
|
@ -12,6 +13,7 @@ services:
|
||||||
|
|
||||||
grafana:
|
grafana:
|
||||||
image: 'grafana/grafana:latest'
|
image: 'grafana/grafana:latest'
|
||||||
|
container_name: grafana
|
||||||
ports:
|
ports:
|
||||||
- '3000:3000'
|
- '3000:3000'
|
||||||
depends_on:
|
depends_on:
|
||||||
|
|
|
@ -1,7 +1,10 @@
|
||||||
echo "Running compose down.."
|
# This script shuts down (ONLY!) the Controller, by stopping and killing the related containers.
|
||||||
sudo docker compose -f ./prometheus/docker-compose-prometheus.yml down
|
# It is used during testing.
|
||||||
sudo docker compose -f docker-compose.yml -f ./prometheus/docker-compose-prometheus.yml down
|
# It does not shuts down the whole service! The workers will keep running and their work will be lost.
|
||||||
|
|
||||||
|
echo "Running compose down.."
|
||||||
|
sudo docker compose -f docker-compose.yml down
|
||||||
|
sudo docker compose -f ./prometheus/docker-compose-prometheus.yml down
|
||||||
|
|
||||||
# In case we need to hard-remove the containers, use the following commands:
|
# In case we need to hard-remove the containers, use the following commands:
|
||||||
#sudo docker stop $(sudo docker ps -aqf "name=^(?:urlscontroller-urls_controller|prometheus-(?:prometheus|grafana))-1$") || true # There may be no active containers
|
#sudo docker stop $(sudo docker ps -aqf "name=^(?:urlscontroller-urls_controller|prometheus-(?:prometheus|grafana))-1$") || true # There may be no active containers
|
||||||
|
|
|
@ -4,8 +4,8 @@ import org.springframework.http.ResponseEntity;
|
||||||
|
|
||||||
public interface StatsService {
|
public interface StatsService {
|
||||||
|
|
||||||
ResponseEntity<?> getNumberOfPayloads(String getPayloadsNumberQuery, String extraMsg, int retryNum);
|
ResponseEntity<?> getNumberOfPayloads(String getPayloadsNumberQuery, String extraMsg, int retryCount);
|
||||||
|
|
||||||
ResponseEntity<?> getNumberOfRecordsInspectedByServiceThroughCrawling(int retryNum);
|
ResponseEntity<?> getNumberOfRecordsInspectedByServiceThroughCrawling(int retryCount);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,10 +24,10 @@ public class StatsServiceImpl implements StatsService {
|
||||||
// Thus, we need to have an "error-detection-and-retry" mechanism, in order to avoid returning error that we know will exist in certain times and we can overcome them.
|
// Thus, we need to have an "error-detection-and-retry" mechanism, in order to avoid returning error that we know will exist in certain times and we can overcome them.
|
||||||
// The final time-to-return of the results-retrieval methods may be somewhat large, but the alternative of returning predictable errors or locking the DB and slowing down the aggregation system are even worse.
|
// The final time-to-return of the results-retrieval methods may be somewhat large, but the alternative of returning predictable errors or locking the DB and slowing down the aggregation system are even worse.
|
||||||
|
|
||||||
public ResponseEntity<?> getNumberOfPayloads(String getNumberQuery, String message, int retryNum)
|
public ResponseEntity<?> getNumberOfPayloads(String getNumberQuery, String message, int retryCount)
|
||||||
{
|
{
|
||||||
if ( retryNum > 10 ) {
|
if ( retryCount > 10 ) {
|
||||||
String errorMsg = "Could not find the requested payload-type table in an non-merging state, after " + retryNum + " retries!";
|
String errorMsg = "Could not find the requested payload-type table in an non-merging state, after " + retryCount + " retries!";
|
||||||
logger.error(errorMsg);
|
logger.error(errorMsg);
|
||||||
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(errorMsg);
|
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(errorMsg);
|
||||||
}
|
}
|
||||||
|
@ -46,7 +46,7 @@ public class StatsServiceImpl implements StatsService {
|
||||||
String exMsg = e.getMessage();
|
String exMsg = e.getMessage();
|
||||||
if ( (exMsg != null) && (exMsg.contains("Could not resolve table reference") || exMsg.contains("Failed to open HDFS file")) ) {
|
if ( (exMsg != null) && (exMsg.contains("Could not resolve table reference") || exMsg.contains("Failed to open HDFS file")) ) {
|
||||||
sleep1min();
|
sleep1min();
|
||||||
return getNumberOfPayloads(getNumberQuery, message, (++retryNum));
|
return getNumberOfPayloads(getNumberQuery, message, (++retryCount));
|
||||||
}
|
}
|
||||||
String errorMsg = "Problem when executing \"getNumberQuery\": " + getNumberQuery;
|
String errorMsg = "Problem when executing \"getNumberQuery\": " + getNumberQuery;
|
||||||
logger.error(errorMsg, e);
|
logger.error(errorMsg, e);
|
||||||
|
@ -56,10 +56,10 @@ public class StatsServiceImpl implements StatsService {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
public ResponseEntity<?> getNumberOfRecordsInspectedByServiceThroughCrawling(int retryNum)
|
public ResponseEntity<?> getNumberOfRecordsInspectedByServiceThroughCrawling(int retryCount)
|
||||||
{
|
{
|
||||||
if ( retryNum > 10 ) {
|
if ( retryCount > 10 ) {
|
||||||
String errorMsg = "Could not find the requested attempt table in an non-merging state, after " + retryNum + " retries!";
|
String errorMsg = "Could not find the requested attempt table in an non-merging state, after " + retryCount + " retries!";
|
||||||
logger.error(errorMsg);
|
logger.error(errorMsg);
|
||||||
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(errorMsg);
|
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(errorMsg);
|
||||||
}
|
}
|
||||||
|
@ -84,7 +84,7 @@ public class StatsServiceImpl implements StatsService {
|
||||||
String exMsg = e.getMessage();
|
String exMsg = e.getMessage();
|
||||||
if ( (exMsg != null) && (exMsg.contains("Could not resolve table reference") || exMsg.contains("Failed to open HDFS file")) ) {
|
if ( (exMsg != null) && (exMsg.contains("Could not resolve table reference") || exMsg.contains("Failed to open HDFS file")) ) {
|
||||||
sleep1min();
|
sleep1min();
|
||||||
return getNumberOfRecordsInspectedByServiceThroughCrawling(++retryNum);
|
return getNumberOfRecordsInspectedByServiceThroughCrawling(++retryCount);
|
||||||
}
|
}
|
||||||
String errorMsg = "Problem when executing \"getInspectedRecordsNumberQuery\": " + getInspectedRecordsNumberQuery;
|
String errorMsg = "Problem when executing \"getInspectedRecordsNumberQuery\": " + getInspectedRecordsNumberQuery;
|
||||||
logger.error(errorMsg, e);
|
logger.error(errorMsg, e);
|
||||||
|
|
|
@ -170,7 +170,7 @@ public class UrlsServiceImpl implements UrlsService {
|
||||||
assignment.setAssignmentsBatchCounter(curAssignmentsBatchCounter);
|
assignment.setAssignmentsBatchCounter(curAssignmentsBatchCounter);
|
||||||
assignment.setTimestamp(timestamp);
|
assignment.setTimestamp(timestamp);
|
||||||
Datasource datasource = new Datasource();
|
Datasource datasource = new Datasource();
|
||||||
try { // For each of the 4 columns returned. The indexing starts from 1
|
try { // For each of the 4 columns returned, do the following. The column-indexing starts from 1
|
||||||
assignment.setId(rs.getString(1));
|
assignment.setId(rs.getString(1));
|
||||||
assignment.setOriginalUrl(rs.getString(2));
|
assignment.setOriginalUrl(rs.getString(2));
|
||||||
datasource.setId(rs.getString(3));
|
datasource.setId(rs.getString(3));
|
||||||
|
|
Loading…
Reference in New Issue