137 lines
5.5 KiB
Java
137 lines
5.5 KiB
Java
package org.gcube.accounting.aggregator.persist;
|
|
|
|
import java.io.File;
|
|
import java.util.ArrayList;
|
|
import java.util.Calendar;
|
|
import java.util.List;
|
|
|
|
import org.gcube.accounting.aggregator.directory.WorkSpaceDirectoryStructure;
|
|
import org.gcube.accounting.aggregator.elaboration.Elaborator;
|
|
import org.gcube.accounting.aggregator.status.AggregationState;
|
|
import org.gcube.accounting.aggregator.status.AggregationStatus;
|
|
import org.gcube.accounting.aggregator.utility.Utility;
|
|
import org.gcube.accounting.aggregator.workspace.WorkSpaceManagement;
|
|
import org.gcube.accounting.datamodel.usagerecords.ServiceUsageRecord;
|
|
import org.gcube.common.storagehub.client.dsl.FolderContainer;
|
|
import org.slf4j.Logger;
|
|
import org.slf4j.LoggerFactory;
|
|
|
|
/**
|
|
* @author Luca Frosini (ISTI - CNR)
|
|
*/
|
|
public class Persist {
|
|
|
|
protected static Logger logger = LoggerFactory.getLogger(Persist.class);
|
|
|
|
protected final AggregationStatus aggregationStatus;
|
|
|
|
// protected final Bucket originalRecordBucket;
|
|
// protected final Bucket aggregatedRecordBucket;
|
|
|
|
protected final File originalRecordsbackupFile;
|
|
protected final File aggregateRecordsBackupFile;
|
|
|
|
protected final String recordType;
|
|
|
|
/*
|
|
public Persist(AggregationStatus aggregationStatus,
|
|
Bucket originalRecordBucket, Bucket aggregatedRecordBucket,
|
|
File originalRecordsbackupFile, File aggregateRecordsBackupFile, String recordType) {
|
|
*/
|
|
public Persist(AggregationStatus aggregationStatus, File originalRecordsbackupFile, File aggregateRecordsBackupFile, String recordType) {
|
|
super();
|
|
this.aggregationStatus = aggregationStatus;
|
|
|
|
// this.originalRecordBucket = originalRecordBucket;
|
|
// this.aggregatedRecordBucket = aggregatedRecordBucket;
|
|
|
|
this.originalRecordsbackupFile = originalRecordsbackupFile;
|
|
this.aggregateRecordsBackupFile = aggregateRecordsBackupFile;
|
|
|
|
this.recordType = recordType;
|
|
}
|
|
|
|
private void setAggregationStateToCompleted(Calendar now) throws Exception {
|
|
originalRecordsbackupFile.delete();
|
|
aggregateRecordsBackupFile.delete();
|
|
File malformedRecords = Utility.getMalformatedFile(aggregateRecordsBackupFile);
|
|
if(malformedRecords.exists()){
|
|
malformedRecords.delete();
|
|
}
|
|
aggregationStatus.setAggregationState(AggregationState.COMPLETED, now, true);
|
|
}
|
|
|
|
public void recover() throws Exception{
|
|
if(aggregationStatus.getAggregatedRecordsNumber()==aggregationStatus.getOriginalRecordsNumber()){
|
|
Calendar now = Utility.getUTCCalendarInstance();
|
|
|
|
if(aggregationStatus.getAggregatedRecordsNumber()==0) {
|
|
setAggregationStateToCompleted(now);
|
|
return;
|
|
}
|
|
|
|
// Giving the rewrite rules the number of records could be the same but the calledMAthods has been replaced
|
|
//
|
|
// if(aggregationStatus.getOriginalRecordsNumber()==aggregationStatus.getAggregatedRecordsNumber()){
|
|
//
|
|
// logger.info("{} - OriginalRecords are {}. AggregatedRecords are {} ({}=={}). All records were already aggregated. Setting {} to {}",
|
|
// aggregationStatus.getAggregationInfo(),
|
|
// aggregationStatus.getOriginalRecordsNumber(),
|
|
// aggregationStatus.getAggregatedRecordsNumber(),
|
|
// aggregationStatus.getOriginalRecordsNumber(),
|
|
// aggregationStatus.getAggregatedRecordsNumber(),
|
|
// AggregationState.class.getSimpleName(), AggregationState.COMPLETED);
|
|
// setAggregationStateToCompleted(now);
|
|
// return;
|
|
// }
|
|
//
|
|
|
|
}
|
|
|
|
if(AggregationState.canContinue(aggregationStatus.getAggregationState(),AggregationState.AGGREGATED)){
|
|
// For Each original row stored on file it remove them from Bucket.
|
|
// At the end of elaboration set AgrgegationStatus to DELETED
|
|
// Then save the file in Workspace and set AgrgegationStatus to COMPLETED
|
|
DeleteDocument deleteDocument = new DeleteDocument(aggregationStatus, originalRecordsbackupFile);
|
|
deleteDocument.elaborate();
|
|
}
|
|
|
|
InsertDocument insertDocument = new InsertDocument(aggregationStatus, aggregateRecordsBackupFile);
|
|
boolean serviceUsageRecordElaboration = recordType.compareTo(ServiceUsageRecord.class.newInstance().getRecordType())==0 ? true : false;
|
|
insertDocument.setServiceUsageRecordElaboration(serviceUsageRecordElaboration);
|
|
if(AggregationState.canContinue(aggregationStatus.getAggregationState(),AggregationState.DELETED)){
|
|
// For Each aggregated row stored on file it add them to Bucket. At the end of elaboration set AggregationStatus to ADDED
|
|
insertDocument.elaborate();
|
|
}
|
|
|
|
if(AggregationState.canContinue(aggregationStatus.getAggregationState(),AggregationState.ADDED)){
|
|
Calendar now = Utility.getUTCCalendarInstance();
|
|
WorkSpaceDirectoryStructure workspaceDirectoryStructure = new WorkSpaceDirectoryStructure();
|
|
FolderContainer targetFolder = workspaceDirectoryStructure.getTargetFolder(aggregationStatus.getAggregationInfo().getAggregationType(), aggregationStatus.getAggregationInfo().getAggregationStartDate());
|
|
|
|
List<File> files = new ArrayList<>();
|
|
files.add(originalRecordsbackupFile);
|
|
files.add(aggregateRecordsBackupFile);
|
|
|
|
String zipFilename = originalRecordsbackupFile.getName().replace(Elaborator.ORIGINAL_SUFFIX, "");
|
|
|
|
File malformedRecords = Utility.getMalformatedFile(aggregateRecordsBackupFile);
|
|
if(malformedRecords.exists()){
|
|
files.add(malformedRecords);
|
|
zipFilename = originalRecordsbackupFile.getName().replace(Elaborator.ORIGINAL_SUFFIX, "-with-malformed");
|
|
}
|
|
|
|
if(serviceUsageRecordElaboration) {
|
|
files.add(insertDocument.getCalledMethodCSVFile());
|
|
}
|
|
|
|
WorkSpaceManagement.getInstance().zipAndBackupFiles(targetFolder, zipFilename, files);
|
|
|
|
|
|
setAggregationStateToCompleted(now);
|
|
}
|
|
|
|
}
|
|
|
|
}
|