2017-09-05 17:13:50 +02:00
package org.gcube.accounting.aggregator.elaboration ;
import java.io.File ;
import java.text.DateFormat ;
import java.util.Calendar ;
import java.util.Date ;
import org.gcube.accounting.aggregator.aggregation.AggregationInfo ;
import org.gcube.accounting.aggregator.aggregation.AggregationType ;
import org.gcube.accounting.aggregator.aggregation.Aggregator ;
import org.gcube.accounting.aggregator.directory.FileSystemDirectoryStructure ;
import org.gcube.accounting.aggregator.persist.Persist ;
import org.gcube.accounting.aggregator.persistence.CouchBaseConnector ;
import org.gcube.accounting.aggregator.persistence.CouchBaseConnector.SUFFIX ;
import org.gcube.accounting.aggregator.plugin.AccountingAggregatorPlugin ;
import org.gcube.accounting.aggregator.status.AggregationState ;
import org.gcube.accounting.aggregator.status.AggregationStatus ;
2017-09-22 15:07:54 +02:00
import org.gcube.accounting.aggregator.utility.Constant ;
2017-09-05 17:13:50 +02:00
import org.gcube.accounting.aggregator.utility.Utility ;
import org.gcube.documentstore.records.DSMapper ;
import org.slf4j.Logger ;
import org.slf4j.LoggerFactory ;
import com.couchbase.client.java.Bucket ;
/ * *
* @author Luca Frosini ( ISTI - CNR )
* /
public class Elaborator {
private static Logger logger = LoggerFactory . getLogger ( Elaborator . class ) ;
public final static String ORIGINAL_SUFFIX = " .original.json " ;
public final static String AGGREGATED_SUFFIX = " .aggregated.json " ;
2018-02-22 17:13:54 +01:00
protected AggregationStatus aggregationStatus ;
2017-09-05 17:13:50 +02:00
protected final Date persistStartTime ;
protected final Date persistEndTime ;
public Elaborator ( AggregationStatus aggregationStatus , Date persistStartTime , Date persistEndTime ) throws Exception {
this . aggregationStatus = aggregationStatus ;
this . persistStartTime = persistStartTime ;
this . persistEndTime = persistEndTime ;
}
public boolean isAggregationAllowed ( ) {
AggregationInfo aggregationInfo = aggregationStatus . getAggregationInfo ( ) ;
Date aggregationStartDate = aggregationInfo . getAggregationStartDate ( ) ;
AggregationType aggregationType = aggregationInfo . getAggregationType ( ) ;
boolean allowed = false ;
Calendar calendar = Utility . getUTCCalendarInstance ( ) ;
switch ( aggregationType ) {
case DAILY :
break ;
case MONTHLY :
calendar . set ( Calendar . DAY_OF_MONTH , 1 ) ;
break ;
case YEARLY :
calendar . set ( Calendar . DAY_OF_MONTH , 1 ) ;
calendar . set ( Calendar . MONTH , Calendar . JANUARY ) ;
break ;
default :
break ;
}
calendar . set ( Calendar . HOUR_OF_DAY , 0 ) ;
calendar . set ( Calendar . MINUTE , 0 ) ;
calendar . set ( Calendar . SECOND , 0 ) ;
calendar . set ( Calendar . MILLISECOND , 0 ) ;
calendar . add ( aggregationType . getCalendarField ( ) , - aggregationType . getNotAggregableBefore ( ) ) ;
logger . trace ( " Checking if {} is before {} " ,
aggregationType . getDateFormat ( ) . format ( aggregationStartDate ) ,
aggregationType . getDateFormat ( ) . format ( calendar . getTime ( ) ) ) ;
if ( aggregationStartDate . before ( calendar . getTime ( ) ) ) {
allowed = true ;
}
return allowed ;
}
2018-02-22 14:49:54 +01:00
public void elaborate ( boolean forceEarlyAggregation , boolean forceRerun , boolean forceRestart ) throws Exception {
2017-09-05 17:13:50 +02:00
Calendar startTime = Utility . getUTCCalendarInstance ( ) ;
2018-02-22 17:13:54 +01:00
final AggregationInfo aggregationInfo = aggregationStatus . getAggregationInfo ( ) ;
final Date aggregationStartDate = aggregationInfo . getAggregationStartDate ( ) ;
final AggregationType aggregationType = aggregationInfo . getAggregationType ( ) ;
2017-09-05 17:13:50 +02:00
if ( ! isAggregationAllowed ( ) ) {
2018-02-22 14:49:54 +01:00
if ( ! forceEarlyAggregation ) {
logger . info ( " Too early to start aggregation {}. {} Aggregation is not allowed for the last {} {} " ,
DSMapper . getObjectMapper ( ) . writeValueAsString ( aggregationStatus ) ,
aggregationType ,
aggregationType . getNotAggregableBefore ( ) ,
aggregationType . name ( ) . toLowerCase ( ) . replace ( " ly " , " s " ) . replaceAll ( " dais " , " days " ) ) ;
return ;
} else {
logger . info ( " The aggregation has been forced even is too early to start it {}. {} aggregation should not be made for the last {} {} " ,
DSMapper . getObjectMapper ( ) . writeValueAsString ( aggregationStatus ) ,
aggregationType ,
aggregationType . getNotAggregableBefore ( ) ,
aggregationType . name ( ) . toLowerCase ( ) . replace ( " ly " , " s " ) . replace ( " dais " , " days " ) ) ;
}
2017-09-05 17:13:50 +02:00
}
if ( aggregationStatus . getAggregationState ( ) = = null ) {
2018-02-22 14:49:54 +01:00
aggregationStatus . setAggregationState ( AggregationState . STARTED , startTime , true ) ;
2017-09-21 10:03:56 +02:00
} else {
if ( aggregationStatus . getAggregationState ( ) = = AggregationState . COMPLETED ) {
2018-02-22 14:49:54 +01:00
if ( ! forceRerun ) {
logger . info ( " {} is {}. Nothing to do :-). \ n Details {} " ,
AggregationStatus . class . getSimpleName ( ) ,
aggregationStatus . getAggregationState ( ) ,
DSMapper . getObjectMapper ( ) . writeValueAsString ( aggregationStatus ) ) ;
return ;
} else {
logger . info ( " Last {} is {} and the aggreation should not be needed but it has been forced. Details {}. " ,
AggregationStatus . class . getSimpleName ( ) ,
aggregationStatus . getAggregationState ( ) ,
DSMapper . getObjectMapper ( ) . writeValueAsString ( aggregationStatus ) ) ;
2018-02-22 17:13:54 +01:00
aggregationStatus = new AggregationStatus ( aggregationStatus ) ;
2018-02-22 14:49:54 +01:00
aggregationStatus . setAggregationState ( AggregationState . RESTARTED , startTime , false ) ;
aggregationStatus . setAggregationState ( AggregationState . STARTED , startTime , true ) ;
/ *
* Last update time has been just modified restart must be forced otherwise the the aggregation
* cannot continue
* /
forceRestart = true ;
}
2017-09-22 15:07:54 +02:00
}
Calendar now = Utility . getUTCCalendarInstance ( ) ;
now . add ( Constant . CALENDAR_FIELD_TO_SUBSTRACT_TO_CONSIDER_UNTERMINATED , - Constant . UNIT_TO_SUBSTRACT_TO_CONSIDER_UNTERMINATED ) ;
2018-02-22 14:49:54 +01:00
2017-09-22 15:07:54 +02:00
if ( aggregationStatus . getLastUpdateTime ( ) . after ( now ) ) {
2018-02-22 14:49:54 +01:00
if ( ! forceRestart ) {
logger . info ( " Cannot elaborate {} because has been modified in the last {} " ,
DSMapper . getObjectMapper ( ) . writeValueAsString ( aggregationStatus ) ,
Constant . UNIT_TO_SUBSTRACT_TO_CONSIDER_UNTERMINATED , Constant . CALENDAR_FIELD_TO_SUBSTRACT_TO_CONSIDER_UNTERMINATED = = Calendar . HOUR_OF_DAY ? " hours " : " unit " ) ;
return ;
} else {
logger . info ( " Normally {} cannot be elaborated because has been modified in the last {} but the restart has been forced " ,
DSMapper . getObjectMapper ( ) . writeValueAsString ( aggregationStatus ) ,
Constant . UNIT_TO_SUBSTRACT_TO_CONSIDER_UNTERMINATED , Constant . CALENDAR_FIELD_TO_SUBSTRACT_TO_CONSIDER_UNTERMINATED = = Calendar . HOUR_OF_DAY ? " hours " : " unit " ) ;
}
2017-09-21 10:03:56 +02:00
}
2017-09-22 15:16:04 +02:00
aggregationStatus . updateLastUpdateTime ( true ) ;
2017-09-05 17:13:50 +02:00
}
String recordType = aggregationInfo . getRecordType ( ) ;
FileSystemDirectoryStructure fileSystemDirectoryStructure = new FileSystemDirectoryStructure ( ) ;
File elaborationDirectory = fileSystemDirectoryStructure . getTargetFolder ( aggregationType , aggregationStartDate ) ;
Bucket srcBucket = CouchBaseConnector . getInstance ( ) . getBucket ( recordType , aggregationInfo . getAggregationType ( ) , SUFFIX . src ) ;
2021-03-12 11:24:09 +01:00
// Bucket dstBucket = CouchBaseConnector.getInstance().getBucket(recordType, aggregationInfo.getAggregationType(), SUFFIX.dst);
2017-09-05 17:13:50 +02:00
File originalRecordsbackupFile = getOriginalRecordsBackupFile ( elaborationDirectory , recordType ) ;
File aggregateRecordsBackupFile = getAggregatedRecordsBackupFile ( originalRecordsbackupFile ) ;
Aggregator aggregator = new Aggregator ( aggregationStatus , srcBucket , originalRecordsbackupFile ,
aggregateRecordsBackupFile ) ;
aggregator . aggregate ( ) ;
2017-09-18 15:07:34 +02:00
2017-09-05 17:13:50 +02:00
Calendar now = Utility . getUTCCalendarInstance ( ) ;
/ *
* now is passed as argument to isTimeElapsed function to avoid situation
* ( even rare ) where both check are valid because the first invocation happen
* before midnight and the second after midnight ( so in the next day ) .
* /
2021-04-15 14:58:12 +02:00
// if (Utility.isTimeElapsed(now, persistStartTime) && !Utility.isTimeElapsed(now, persistEndTime)) {
2021-03-12 11:24:09 +01:00
// Persist persist = new Persist(aggregationStatus, srcBucket, dstBucket, originalRecordsbackupFile, aggregateRecordsBackupFile, recordType);
Persist persist = new Persist ( aggregationStatus , srcBucket , originalRecordsbackupFile , aggregateRecordsBackupFile , recordType ) ;
2017-09-05 17:13:50 +02:00
persist . recover ( ) ;
2021-04-15 14:58:12 +02:00
/ *
2017-09-05 17:13:50 +02:00
} else {
logger . info ( " Cannot delete/insert document before {} and after {}. " , AccountingAggregatorPlugin . LOCAL_TIME_DATE_FORMAT . format ( persistStartTime ) , AccountingAggregatorPlugin . LOCAL_TIME_DATE_FORMAT . format ( persistEndTime ) ) ;
}
2021-04-15 14:58:12 +02:00
* /
2017-09-05 17:13:50 +02:00
}
protected File getOriginalRecordsBackupFile ( File elaborationDirectory , String name ) throws Exception {
AggregationInfo aggregationInfo = aggregationStatus . getAggregationInfo ( ) ;
Date aggregationStartDate = aggregationInfo . getAggregationStartDate ( ) ;
AggregationType aggregationType = aggregationInfo . getAggregationType ( ) ;
DateFormat dateFormat = aggregationType . getDateFormat ( ) ;
String dateString = dateFormat . format ( aggregationStartDate ) ;
String [ ] splittedDate = dateString . split ( AggregationType . DATE_SEPARATOR ) ;
String backupFileName = splittedDate [ splittedDate . length - 1 ] + " - " + name ;
File originalRecordsbackupFile = new File ( elaborationDirectory , backupFileName + ORIGINAL_SUFFIX ) ;
return originalRecordsbackupFile ;
}
protected File getAggregatedRecordsBackupFile ( File originalRecordsbackupFile ) throws Exception {
File aggregateRecordsBackupFile = new File ( originalRecordsbackupFile . getParentFile ( ) ,
originalRecordsbackupFile . getName ( ) . replace ( ORIGINAL_SUFFIX , AGGREGATED_SUFFIX ) ) ;
return aggregateRecordsBackupFile ;
}
}