Merged from private branch. Refactored accounting-aggregator-plugin. Fixes #9419

git-svn-id: https://svn.d4science.research-infrastructures.eu/gcube/trunk/accounting/accounting-aggregator-se-plugin@152682 82a268e6-3cf1-43bd-a215-b396298e98cf
This commit is contained in:
Luca Frosini 2017-09-05 15:13:50 +00:00
parent dfa53c5f2e
commit d3c44831b7
48 changed files with 3129 additions and 1926 deletions

89
pom.xml
View File

@ -16,8 +16,6 @@
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<distroDirectory>distro</distroDirectory>
<serviceClass>Accounting</serviceClass>
<!-- <maven.compiler.source>1.7</maven.compiler.source> -->
</properties>
<scm>
@ -29,11 +27,11 @@
<dependencies>
<dependency>
<groupId>org.gcube.distribution</groupId>
<artifactId>maven-smartgears-bom</artifactId>
<artifactId>gcube-bom</artifactId>
<version>LATEST</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependency>
</dependencies>
</dependencyManagement>
@ -44,82 +42,44 @@
<version>[1.0.0-SNAPSHOT, 2.0.0-SNAPSHOT)</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.gcube.common</groupId>
<artifactId>common-authorization</artifactId>
</dependency>
<dependency>
<groupId>org.gcube.common</groupId>
<artifactId>authorization-client</artifactId>
</dependency>
<dependency>
<groupId>org.gcube.core</groupId>
<artifactId>common-scope</artifactId>
<scope>provided</scope>
</dependency>
<!-- Home Library -->
<dependency>
<groupId>org.gcube.common</groupId>
<artifactId>home-library-jcr</artifactId>
<version>[2.0.0-SNAPSHOT,3.0.0-SNAPSHOT)</version>
</dependency>
<dependency>
<groupId>org.gcube.common</groupId>
<artifactId>home-library</artifactId>
<version>[2.0.0-SNAPSHOT,3.0.0-SNAPSHOT)</version>
<scope>compile</scope>
</dependency>
<!-- END Home Library -->
<dependency>
<groupId>org.gcube.common</groupId>
<artifactId>common-authorization</artifactId>
</dependency>
<dependency>
<groupId>org.gcube.common</groupId>
<artifactId>authorization-client</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<groupId>org.gcube.common</groupId>
<artifactId>common-authorization</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.gcube.resources.discovery</groupId>
<artifactId>ic-client</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.gcube.core</groupId>
<artifactId>common-encryption</artifactId>
<scope>provided</scope>
</dependency>
<!-- CouchBase libraries -->
<dependency>
<groupId>com.couchbase.client</groupId>
<artifactId>java-client</artifactId>
<version>2.2.3</version>
</dependency>
<dependency>
<groupId>com.couchbase.client</groupId>
<artifactId>core-io</artifactId>
<version>[1.2.3,2.0.0)</version>
<scope>compile</scope>
</dependency>
<!-- END CouchBase libraries -->
<!-- Document Store and accounting lib -->
<dependency>
<groupId>org.gcube.data.publishing</groupId>
<artifactId>document-store-lib-couchbase</artifactId>
<version>[1.0.1-SNAPSHOT, 2.0.0-SNAPSHOT)</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.gcube.data.publishing</groupId>
<artifactId>document-store-lib</artifactId>
<scope>provided</scope>
</dependency>
<!-- Document Store and Accounting libraries -->
<dependency>
<groupId>org.gcube.accounting</groupId>
<artifactId>accounting-lib</artifactId>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>com.google.code.gson</groupId>
<artifactId>gson</artifactId>
<version>2.3.1</version>
<groupId>com.couchbase.client</groupId>
<artifactId>java-client</artifactId>
<version>2.2.7</version>
<scope>provided</scope>
</dependency>
<!-- END Document Store and Accounting libraries -->
<!-- Test libs -->
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
@ -132,6 +92,7 @@
<version>1.0.13</version>
<scope>test</scope>
</dependency>
<!-- END Test libs -->
</dependencies>
<build>
<plugins>

View File

@ -0,0 +1,61 @@
package org.gcube.accounting.aggregator.aggregation;
import java.util.Date;
import org.gcube.accounting.aggregator.utility.Constant;
import com.fasterxml.jackson.annotation.JsonFormat;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public class AggregationInfo {
protected String recordType;
protected AggregationType aggregationType;
@JsonFormat(shape=JsonFormat.Shape.STRING, pattern=Constant.DATETIME_PATTERN)
protected Date aggregationStartDate;
@JsonFormat(shape=JsonFormat.Shape.STRING, pattern=Constant.DATETIME_PATTERN)
protected Date aggregationEndDate;
// Needed for Jackon Unmarshalling
@SuppressWarnings("unused")
private AggregationInfo(){}
public AggregationInfo(String recordType, AggregationType aggregationType, Date aggregationStartDate,
Date aggregationEndDate) {
super();
this.recordType = recordType;
this.aggregationType = aggregationType;
this.aggregationStartDate = aggregationStartDate;
this.aggregationEndDate = aggregationEndDate;
}
public Date getAggregationStartDate() {
return aggregationStartDate;
}
public Date getAggregationEndDate() {
return aggregationEndDate;
}
public AggregationType getAggregationType() {
return aggregationType;
}
public String getRecordType() {
return recordType;
}
@Override
public String toString(){
return String.format("[%s %s %s -> %s]",
recordType, aggregationType,
aggregationType.getDateFormat().format(aggregationStartDate),
aggregationType.getDateFormat().format(aggregationEndDate));
}
}

View File

@ -0,0 +1,52 @@
package org.gcube.accounting.aggregator.aggregation;
import java.text.DateFormat;
import java.util.Calendar;
import org.gcube.accounting.aggregator.utility.Utility;
/**
* @author Alessandro Pieve (ISTI - CNR)
* @author Luca Frosini (ISTI - CNR)
*/
public enum AggregationType {
DAILY(Calendar.DAY_OF_MONTH, "yyyy/MM/dd", 7),
MONTHLY(Calendar.MONTH, "yyyy/MM", 3),
YEARLY(Calendar.YEAR, "yyyy", 3);
public static final String DATE_SEPARATOR = "/";
private final int calendarField;
private final String dateFormatPattern;
private final DateFormat dateFormat;
private final int notAggregableBefore;
private AggregationType(int calendarField, String dateFormatPattern, int notAggregableBefore) {
this.calendarField = calendarField;
this.dateFormatPattern=dateFormatPattern;
this.dateFormat = Utility.getUTCDateFormat(dateFormatPattern);
this.notAggregableBefore = notAggregableBefore;
}
public int getCalendarField() {
return calendarField;
}
public String getDateFormatPattern() {
return dateFormatPattern;
}
public DateFormat getDateFormat() {
return dateFormat;
}
public int getNotAggregableBefore(){
return notAggregableBefore;
}
}

View File

@ -0,0 +1,178 @@
package org.gcube.accounting.aggregator.aggregation;
import java.io.File;
import java.text.DateFormat;
import java.util.Calendar;
import java.util.List;
import java.util.UUID;
import org.gcube.accounting.aggregator.status.AggregationState;
import org.gcube.accounting.aggregator.status.AggregationStatus;
import org.gcube.accounting.aggregator.utility.Constant;
import org.gcube.accounting.aggregator.utility.Utility;
import org.gcube.accounting.datamodel.AggregatedUsageRecord;
import org.gcube.documentstore.records.AggregatedRecord;
import org.gcube.documentstore.records.DSMapper;
import org.gcube.documentstore.records.RecordUtility;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.document.JsonDocument;
import com.couchbase.client.java.document.json.JsonArray;
import com.couchbase.client.java.document.json.JsonObject;
import com.couchbase.client.java.view.ViewQuery;
import com.couchbase.client.java.view.ViewResult;
import com.couchbase.client.java.view.ViewRow;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public class Aggregator {
private static Logger logger = LoggerFactory.getLogger(Aggregator.class);
private static final String TMP_SUFFIX = ".tmp";
protected final AggregationStatus aggregationStatus;
protected final Bucket bucket;
protected final File originalRecordsbackupFile;
protected final File aggregateRecordsBackupFile;
protected Calendar startTime;
public Aggregator(AggregationStatus aggregationStatus, Bucket bucket, File originalRecordsbackupFile, File aggregateRecordsBackupFile) {
this.aggregationStatus = aggregationStatus;
this.bucket = bucket;
this.originalRecordsbackupFile = originalRecordsbackupFile;
this.aggregateRecordsBackupFile = aggregateRecordsBackupFile;
}
public void aggregate() throws Exception {
if(AggregationState.canContinue(aggregationStatus.getAggregationState(),AggregationState.STARTED)) {
startTime = Utility.getUTCCalendarInstance();
ViewResult viewResult = getViewResult();
retrieveAndAggregate(viewResult);
}
}
/**
* Generate a key for map-reduce
* @param key
* @return
*/
protected JsonArray generateKey(String key){
JsonArray arrayKey = JsonArray.create();
for (String value : key.split("/")){
if (!value.toString().isEmpty()){
arrayKey.add(Integer.parseInt(value));
}
}
return arrayKey;
}
protected ViewResult getViewResult() throws Exception {
DateFormat dateFormat = aggregationStatus.getAggregationInfo().getAggregationType().getDateFormat();
String dateStartKey = dateFormat.format(aggregationStatus.getAggregationInfo().getAggregationStartDate());
String dateEndKey = dateFormat.format(aggregationStatus.getAggregationInfo().getAggregationEndDate());
JsonArray startKey = generateKey(dateStartKey);
JsonArray endKey = generateKey(dateEndKey);
DesignID designid = DesignID.valueOf(bucket.name());
String designDocId = designid.getDesignName();
String viewName = designid.getViewName();
ViewQuery query = ViewQuery.from(designDocId, viewName);
query.startKey(startKey);
query.endKey(endKey);
query.reduce(false);
query.inclusiveEnd(false);
logger.debug("View Query: designDocId:{} - viewName:{}, startKey:{} - endKey:{} ",
designDocId, viewName, startKey, endKey);
try {
return bucket.query(query);
} catch (Exception e) {
logger.error("Exception error VIEW", e.getLocalizedMessage(), e);
throw e;
}
}
protected void retrieveAndAggregate(ViewResult viewResult) throws Exception {
AggregatorBuffer aggregatorBuffer = new AggregatorBuffer();
Calendar start = Utility.getUTCCalendarInstance();
logger.debug("Elaboration of Records started at {}", Constant.DEFAULT_DATE_FORMAT.format(start.getTime()));
originalRecordsbackupFile.delete();
aggregateRecordsBackupFile.delete();
int originalRecordsCounter = 0;
for (ViewRow row : viewResult) {
String record = row.document().content().toString();
// Backup the Record on local file
Utility.printLine(originalRecordsbackupFile, record);
// Aggregate the Record
aggregateRow(aggregatorBuffer, record);
++originalRecordsCounter;
if(originalRecordsCounter%1000==0){
int aggregatedRecordsNumber = aggregatorBuffer.getAggregatedRecords().size();
int diff = originalRecordsCounter - aggregatedRecordsNumber;
float percentage = (100 * diff) / originalRecordsCounter;
logger.info("{} At the moment, the elaborated original records are {}. The Aggregated records are {}. Difference {}. We are recovering {}% of Documents",
aggregationStatus.getAggregationInfo(), originalRecordsCounter, aggregatedRecordsNumber, diff, percentage);
}
}
Calendar end = Utility.getUTCCalendarInstance();
long duration = end.getTimeInMillis() - start.getTimeInMillis();
String durationForHuman = Utility.getHumanReadableDuration(duration);
logger.debug("{} Elaboration of Records terminated at {}. Duration {}",
aggregationStatus.getAggregationInfo(), Constant.DEFAULT_DATE_FORMAT.format(end.getTime()), durationForHuman);
File aggregateRecordsBackupFileTmp = new File(aggregateRecordsBackupFile.getParent(),
aggregateRecordsBackupFile.getName() + TMP_SUFFIX);
aggregateRecordsBackupFileTmp.delete();
// Saving Aggregated Record on local file
logger.debug("Going to save {} to file {}", AggregatedUsageRecord.class.getSimpleName(),
aggregateRecordsBackupFile);
List<AggregatedRecord<?, ?>> aggregatedRecords = aggregatorBuffer.getAggregatedRecords();
for (AggregatedRecord<?, ?> aggregatedRecord : aggregatedRecords) {
String marshalled = DSMapper.marshal(aggregatedRecord);
JsonObject jsonObject = JsonObject.fromJson(marshalled);
Utility.printLine(aggregateRecordsBackupFileTmp, jsonObject.toString());
}
aggregateRecordsBackupFileTmp.renameTo(aggregateRecordsBackupFile);
aggregationStatus.setRecordNumbers(originalRecordsCounter, aggregatedRecords.size());
aggregationStatus.setState(AggregationState.AGGREGATED, startTime, true);
}
protected void aggregateRow(AggregatorBuffer aggregatorBuffer, String json) throws Exception {
@SuppressWarnings("rawtypes")
AggregatedRecord record = (AggregatedRecord) RecordUtility.getRecord(json);
record.setId(UUID.randomUUID().toString());
aggregatorBuffer.aggregate(record);
}
protected JsonDocument getJsonDocument(ViewRow row) {
String identifier = (String) row.document().content().get("id");
JsonDocument jsonDocument = JsonDocument.create(identifier, row.document().content());
logger.trace("{}", jsonDocument.toString());
return jsonDocument;
}
}

View File

@ -0,0 +1,95 @@
package org.gcube.accounting.aggregator.aggregation;
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.List;
import org.gcube.documentstore.exception.InvalidValueException;
import org.gcube.documentstore.exception.NotAggregatableRecordsExceptions;
import org.gcube.documentstore.records.AggregatedRecord;
import org.gcube.documentstore.records.Record;
import org.gcube.documentstore.records.RecordUtility;
import org.gcube.documentstore.records.aggregation.AggregationUtility;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author Alessandro Pieve (ISTI - CNR)
* @author Luca Frosini (ISTI - CNR)
*/
public class AggregatorBuffer {
public static Logger logger = LoggerFactory.getLogger(AggregatorBuffer.class);
protected List<AggregatedRecord<?, ?>> aggregatedRecords;
public AggregatorBuffer() {
aggregatedRecords = new ArrayList<AggregatedRecord<?, ?>>();
}
@SuppressWarnings("rawtypes")
protected static AggregatedRecord instantiateAggregatedRecord(Record record) throws Exception {
String recordType = record.getRecordType();
Class<? extends AggregatedRecord> clz = RecordUtility.getAggregatedRecordClass(recordType);
Class[] argTypes = { record.getClass() };
Constructor<? extends AggregatedRecord> constructor = clz.getDeclaredConstructor(argTypes);
Object[] arguments = { record };
return constructor.newInstance(arguments);
}
@SuppressWarnings("rawtypes")
public static AggregatedRecord getAggregatedRecord(Record record) throws Exception {
AggregatedRecord aggregatedRecord;
if (record instanceof AggregatedRecord) {
// the record is already an aggregated version
aggregatedRecord = (AggregatedRecord) record;
} else {
aggregatedRecord = instantiateAggregatedRecord(record);
}
return aggregatedRecord;
}
@SuppressWarnings({ "rawtypes", "unchecked" })
protected void madeAggregation(AggregatedRecord<?, ?> record) throws InvalidValueException {
boolean found = false;
for (AggregatedRecord aggregatedRecord : aggregatedRecords) {
if (!(aggregatedRecord instanceof AggregatedRecord)) {
continue;
}
AggregationUtility aggregationUtility = new AggregationUtility(aggregatedRecord);
// verify a record is aggregable
if (aggregationUtility.isAggregable(record)) {
try {
Calendar aggregatedRecordCreationTime = aggregatedRecord.getCreationTime();
Calendar recordCreationTime = record.getCreationTime();
Calendar creationtime = aggregatedRecordCreationTime.before(recordCreationTime) ? aggregatedRecordCreationTime : recordCreationTime;
aggregatedRecord.aggregate((AggregatedRecord) record);
// Patch to maintain earlier creation time
aggregatedRecord.setCreationTime(creationtime);
found = true;
break;
} catch (NotAggregatableRecordsExceptions e) {
logger.debug("{} is not usable for aggregation", aggregatedRecord);
}
}
}
if (!found) {
aggregatedRecords.add(record);
return;
}
}
public List<AggregatedRecord<?, ?>> getAggregatedRecords() {
return aggregatedRecords;
}
public void aggregate(AggregatedRecord<?, ?> record) throws Exception {
if (record != null) {
madeAggregation(record);
}
}
}

View File

@ -0,0 +1,31 @@
package org.gcube.accounting.aggregator.aggregation;
/**
* @author Alessandro Pieve (ISTI - CNR)
* @author Luca Frosini (ISTI - CNR)
*/
public enum DesignID {
accounting_storage("StorageUsageRecordAggregated","all"),
accounting_service("ServiceUsageRecordAggregated","all"),
accounting_portlet("PortletUsageRecordAggregated","all"),
accounting_job("JobUsageRecordAggregated","all"),
accounting_task("TaskUsageRecordAggregated","all");
private String designName;
private String viewName;
private DesignID(String designName, String viewName) {
this.designName = designName;
this.viewName = viewName;
}
public String getDesignName() {
return designName;
}
public String getViewName() {
return viewName;
}
}

View File

@ -1,27 +0,0 @@
package org.gcube.accounting.aggregator.configuration;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
*/
public class ConfigurationServiceEndpoint {
//Static Key for Configuration from service end point
public static final String URL_PROPERTY_KEY = "URL";
public static final String PASSWORD_PROPERTY_KEY = "password";
public static final String BUCKET_NAME_PROPERTY_KEY = "bucketName";
public static final String BUCKET_STORAGE_NAME_PROPERTY_KEY="AggregatedStorageUsageRecord";
public static final String BUCKET_SERVICE_NAME_PROPERTY_KEY="AggregatedServiceUsageRecord";
public static final String BUCKET_PORTLET_NAME_PROPERTY_KEY="AggregatedPortletUsageRecord";
public static final String BUCKET_JOB_NAME_PROPERTY_KEY="AggregatedJobUsageRecord";
public static final String BUCKET_TASK_NAME_PROPERTY_KEY="AggregatedTaskUsageRecord";
public static final String BUCKET_STORAGE_TYPE="StorageUsageRecord";
public static final String BUCKET_SERVICE_TYPE="ServiceUsageRecord";
public static final String BUCKET_PORTLET_TYPE="PortletUsageRecord";
public static final String BUCKET_JOB_TYPE="JobUsageRecord";
public static final String BUCKET_TASK_TYPE="TaskUsageRecord";
}

View File

@ -1,33 +0,0 @@
package org.gcube.accounting.aggregator.configuration;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
*/
public class Constant {
//CONSTANT for generate file and backup
public static String user=null;
public static String NAME_DIR_BACKUP=".aggregatorPlugin";
public static String PATH_DIR_BACKUP="backup";
public final static String HOME_SYSTEM_PROPERTY = "user.home";
public static String PATH_DIR_BACKUP_INSERT="backup/insert";
public static String PATH_DIR_BACKUP_DELETE="backup/delete";
//create a file for delete record before insert a new aggregate
public static final String FILE_RECORD_NO_AGGREGATE="no_aggregated";
//create a temporany file for insert a new record aggregate
public static final String FILE_RECORD_AGGREGATE="aggregated";
public static final Integer CONNECTION_TIMEOUT=15;
public static final Integer NUM_RETRY=6;
public static final Integer CONNECTION_TIMEOUT_BUCKET=15;
public static final Integer VIEW_TIMEOUT_BUCKET=120;
public static final Integer MAX_REQUEST_LIFE_TIME=120;
}

View File

@ -1,109 +0,0 @@
package org.gcube.accounting.aggregator.configuration;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.couchbase.client.java.document.JsonDocument;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
*/
public class ManagementFileBackup {
private static Logger logger = LoggerFactory.getLogger(ManagementFileBackup.class);
/**
* The singleton instance of the ConfigurationFileBackup.
*/
private static ManagementFileBackup instance;
/**
* Construct a new ConfigurationFileBackup
*/
private ManagementFileBackup(){
File DirIns = new File(Constant.PATH_DIR_BACKUP_INSERT);
if (!DirIns.exists()) {
DirIns.mkdir();
}
File DirDel = new File(Constant.PATH_DIR_BACKUP_DELETE);
if (!DirDel.exists()) {
DirDel.mkdir();
}
}
public static ManagementFileBackup getInstance() {
if (instance == null) {
instance = new ManagementFileBackup();
}
return instance;
}
/**
* Create a file to string for recovery operation
* @param list
* @param nameFile
* @param type (for a recovery a insert o delete type record
* @return
*/
public boolean onCreateStringToFile(List<JsonDocument> listJson,String nameFile, Boolean type){
try {
File file;
if (type)
file = new File(Constant.PATH_DIR_BACKUP_INSERT+"/"+nameFile.replace(",", "_"));
else
file =new File(Constant.PATH_DIR_BACKUP_DELETE+"/"+nameFile.replace(",", "_"));
BufferedWriter writer = null;
writer = new BufferedWriter(new FileWriter(file,true));
writer.write("{\"new_edits\":false,\"docs\":[");
writer.newLine();
int count=1;
for (JsonDocument row:listJson) {
if (count==listJson.size())
writer.write(row.content().toString());
else
writer.write(row.content().toString()+",");
writer.newLine();
count++;
}
writer.write("]}");
writer.newLine();
writer.close();
} catch (Exception e) {
logger.error(e.getLocalizedMessage());
return false;
}
return true;
}
/**
* Delete a temporany file
* @param nameFile
* @return
*/
public boolean onDeleteFile(String nameFile, Boolean type){
try {
File file;
if (type)
file = new File(Constant.PATH_DIR_BACKUP_INSERT+"/"+nameFile.replace(",", "_"));
else
file = new File(Constant.PATH_DIR_BACKUP_DELETE+"/"+nameFile.replace(",", "_"));
file.delete();
} catch (Exception e) {
logger.error(e.getLocalizedMessage());
return false;
}
return true;
}
}

View File

@ -0,0 +1,33 @@
package org.gcube.accounting.aggregator.directory;
import java.text.DateFormat;
import java.util.Date;
import org.gcube.accounting.aggregator.aggregation.AggregationType;
import org.gcube.accounting.aggregator.plugin.AccountingAggregatorPluginDeclaration;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public abstract class DirectoryStructure<D> {
public D getTargetFolder(AggregationType aggregationType, Date aggregationStartDate) throws Exception {
D root = getRoot();
D aggregatorPluginDirectory = createDirectory(root, AccountingAggregatorPluginDeclaration.NAME);
D aggregationTypeDirectory = createDirectory(aggregatorPluginDirectory, aggregationType.name());
DateFormat dateFormat = aggregationType.getDateFormat();
String dateString = dateFormat.format(aggregationStartDate);
String[] splittedDate = dateString.split(AggregationType.DATE_SEPARATOR);
D d = aggregationTypeDirectory;
// lenght-1 because the last part is used as filename of file
for(int i=0; i<(splittedDate.length-1); i++){
d = createDirectory(d, splittedDate[i]);
}
return d;
}
protected abstract D getRoot() throws Exception;
protected abstract D createDirectory(D parent, String name) throws Exception;
}

View File

@ -0,0 +1,24 @@
package org.gcube.accounting.aggregator.directory;
import java.io.File;
import org.gcube.accounting.aggregator.utility.Constant;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public class FileSystemDirectoryStructure extends DirectoryStructure<File> {
@Override
protected File getRoot() throws Exception {
return Constant.ROOT_DIRECTORY;
}
@Override
protected File createDirectory(File parent, String name) throws Exception {
File directory = new File(parent, name);
directory.mkdirs();
return directory;
}
}

View File

@ -0,0 +1,23 @@
package org.gcube.accounting.aggregator.directory;
import org.gcube.accounting.aggregator.workspace.WorkSpaceManagement;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public class WorkSpaceDirectoryStructure extends DirectoryStructure<String>{
private static final String BACKUP_FOLDER_DESCRIPTION = "Accouting Aggregator Plugin Backup Folder";
@Override
protected String getRoot() throws Exception {
return WorkSpaceManagement.getHome();
}
@Override
protected String createDirectory(String parent, String name) throws Exception {
return WorkSpaceManagement.createFolder(parent, name, BACKUP_FOLDER_DESCRIPTION);
}
}

View File

@ -0,0 +1,96 @@
package org.gcube.accounting.aggregator.elaboration;
import java.util.Date;
import org.gcube.accounting.aggregator.aggregation.AggregationInfo;
import org.gcube.accounting.aggregator.aggregation.AggregationType;
import org.gcube.accounting.aggregator.persistence.CouchBaseConnector;
import org.gcube.accounting.aggregator.status.AggregationStatus;
import org.gcube.accounting.aggregator.utility.Constant;
import org.gcube.accounting.aggregator.utility.Utility;
import org.gcube.accounting.datamodel.UsageRecord;
import org.gcube.documentstore.records.DSMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AggregatorManager {
private static Logger logger = LoggerFactory.getLogger(AggregatorManager.class);
public final static String ACCOUNTING_MANAGER_BUCKET_NAME = "AccountingManager";
protected AggregationType aggregationType;
protected Date aggregationStartDate;
protected boolean restartFromLastAggregationDate;
public AggregatorManager(AggregationType aggregationType, boolean restartFromLastAggregationDate,
Date aggregationStartDate) throws Exception {
this.aggregationType = aggregationType;
if (aggregationStartDate != null) {
this.aggregationStartDate = aggregationStartDate;
}
this.restartFromLastAggregationDate = restartFromLastAggregationDate;
}
protected Date getEndDateFromStartDate() {
return Utility.getEndDateFromStartDate(aggregationType, aggregationStartDate, 1);
}
protected AggregationStatus createAggregationStatus(String recordType) throws Exception {
Date aggregationEndDate = getEndDateFromStartDate();
AggregationInfo aggregationInfo = new AggregationInfo(recordType, aggregationType, aggregationStartDate,
aggregationEndDate);
AggregationStatus aggregationStatus = new AggregationStatus(aggregationInfo);
return aggregationStatus;
}
public void elaborate(Date persistStartTime, Date persistEndTime,
Class<? extends UsageRecord> usageRecordClass) throws Exception {
CouchBaseConnector couchBaseConnector = CouchBaseConnector.getInstance();
for (String recordType : couchBaseConnector.getRecordTypes()) {
if (usageRecordClass != null && usageRecordClass.newInstance().getRecordType().compareTo(recordType) != 0) {
continue;
}
if (recordType.compareTo(ACCOUNTING_MANAGER_BUCKET_NAME) == 0) {
continue;
}
AggregationStatus aggregationStatus = null;
if (restartFromLastAggregationDate) {
AggregationStatus lastAggregationStatus = AggregationStatus.getLast(recordType, aggregationType);
// I don't check if this aggregation is COMPLETED because this
// is responsibility of Recovery Process
if(lastAggregationStatus!=null){
this.aggregationStartDate = lastAggregationStatus.getAggregationInfo().getAggregationEndDate();
logger.info("Last got AggregationStatus is {}. Restarting from {}",
DSMapper.getObjectMapper().writeValueAsString(lastAggregationStatus),
Constant.DEFAULT_DATE_FORMAT.format(aggregationStartDate));
}
} else {
aggregationStatus = AggregationStatus.getAggregationStatus(recordType, aggregationType,
aggregationStartDate);
}
if (aggregationStatus == null) {
aggregationStatus = createAggregationStatus(recordType);
}
Elaborator elaborator = new Elaborator(aggregationStatus, persistStartTime, persistEndTime);
elaborator.elaborate();
}
}
}

View File

@ -0,0 +1,166 @@
package org.gcube.accounting.aggregator.elaboration;
import java.io.File;
import java.text.DateFormat;
import java.util.Calendar;
import java.util.Date;
import org.gcube.accounting.aggregator.aggregation.AggregationInfo;
import org.gcube.accounting.aggregator.aggregation.AggregationType;
import org.gcube.accounting.aggregator.aggregation.Aggregator;
import org.gcube.accounting.aggregator.directory.FileSystemDirectoryStructure;
import org.gcube.accounting.aggregator.persist.Persist;
import org.gcube.accounting.aggregator.persistence.CouchBaseConnector;
import org.gcube.accounting.aggregator.persistence.CouchBaseConnector.SUFFIX;
import org.gcube.accounting.aggregator.plugin.AccountingAggregatorPlugin;
import org.gcube.accounting.aggregator.status.AggregationState;
import org.gcube.accounting.aggregator.status.AggregationStatus;
import org.gcube.accounting.aggregator.utility.Utility;
import org.gcube.documentstore.records.DSMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.couchbase.client.java.Bucket;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public class Elaborator {
private static Logger logger = LoggerFactory.getLogger(Elaborator.class);
public final static String ORIGINAL_SUFFIX = ".original.json";
public final static String AGGREGATED_SUFFIX = ".aggregated.json";
protected final AggregationStatus aggregationStatus;
protected final Date persistStartTime;
protected final Date persistEndTime;
public Elaborator(AggregationStatus aggregationStatus, Date persistStartTime, Date persistEndTime) throws Exception {
this.aggregationStatus = aggregationStatus;
this.persistStartTime = persistStartTime;
this.persistEndTime = persistEndTime;
}
public boolean isAggregationAllowed(){
AggregationInfo aggregationInfo = aggregationStatus.getAggregationInfo();
Date aggregationStartDate = aggregationInfo.getAggregationStartDate();
AggregationType aggregationType = aggregationInfo.getAggregationType();
boolean allowed = false;
Calendar calendar = Utility.getUTCCalendarInstance();
switch (aggregationType) {
case DAILY:
break;
case MONTHLY:
calendar.set(Calendar.DAY_OF_MONTH, 1);
break;
case YEARLY:
calendar.set(Calendar.DAY_OF_MONTH, 1);
calendar.set(Calendar.MONTH, Calendar.JANUARY);
break;
default:
break;
}
calendar.set(Calendar.HOUR_OF_DAY, 0);
calendar.set(Calendar.MINUTE, 0);
calendar.set(Calendar.SECOND, 0);
calendar.set(Calendar.MILLISECOND, 0);
calendar.add(aggregationType.getCalendarField(), -aggregationType.getNotAggregableBefore());
logger.trace("Checking if {} is before {}",
aggregationType.getDateFormat().format(aggregationStartDate),
aggregationType.getDateFormat().format(calendar.getTime()));
if(aggregationStartDate.before(calendar.getTime())){
allowed = true;
}
return allowed;
}
public void elaborate() throws Exception {
Calendar startTime = Utility.getUTCCalendarInstance();
AggregationInfo aggregationInfo = aggregationStatus.getAggregationInfo();
Date aggregationStartDate = aggregationInfo.getAggregationStartDate();
AggregationType aggregationType = aggregationInfo.getAggregationType();
if(!isAggregationAllowed()){
logger.info("Too early to start aggregation {}. {} Aggregation is not allowed for the last {} {}",
DSMapper.getObjectMapper().writeValueAsString(aggregationStatus),
aggregationType,
aggregationType.getNotAggregableBefore(),
aggregationType.name().toLowerCase().replace("ly", "s").replaceAll("dais", "days"));
return;
}
if(aggregationStatus.getAggregationState()==null){
aggregationStatus.setState(AggregationState.STARTED, startTime, true);
}
if(aggregationStatus.getAggregationState()==AggregationState.COMPLETED){
logger.info("{} is {}. Nothing to do :-). \n Details {}",
AggregationStatus.class.getSimpleName(),
aggregationStatus.getAggregationState(),
DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
return;
}
String recordType = aggregationInfo.getRecordType();
FileSystemDirectoryStructure fileSystemDirectoryStructure = new FileSystemDirectoryStructure();
File elaborationDirectory = fileSystemDirectoryStructure.getTargetFolder(aggregationType, aggregationStartDate);
Bucket srcBucket = CouchBaseConnector.getInstance().getBucket(recordType, aggregationInfo.getAggregationType(), SUFFIX.src);
Bucket dstBucket = CouchBaseConnector.getInstance().getBucket(recordType, aggregationInfo.getAggregationType(), SUFFIX.dst);
File originalRecordsbackupFile = getOriginalRecordsBackupFile(elaborationDirectory, recordType);
File aggregateRecordsBackupFile = getAggregatedRecordsBackupFile(originalRecordsbackupFile);
Aggregator aggregator = new Aggregator(aggregationStatus, srcBucket, originalRecordsbackupFile,
aggregateRecordsBackupFile);
aggregator.aggregate();
Calendar now = Utility.getUTCCalendarInstance();
/*
* now is passed as argument to isTimeElapsed function to avoid situation
* (even rare) where both check are valid because the first invocation happen
* before midnight and the second after midnight (so in the next day).
*/
if (Utility.isTimeElapsed(now, persistStartTime) && !Utility.isTimeElapsed(now, persistEndTime)) {
Persist persist = new Persist(aggregationStatus, srcBucket, dstBucket, originalRecordsbackupFile, aggregateRecordsBackupFile);
persist.recover();
}else{
logger.info("Cannot delete/insert document before {} and after {}.", AccountingAggregatorPlugin.LOCAL_TIME_DATE_FORMAT.format(persistStartTime), AccountingAggregatorPlugin.LOCAL_TIME_DATE_FORMAT.format(persistEndTime));
}
}
protected File getOriginalRecordsBackupFile(File elaborationDirectory, String name) throws Exception {
AggregationInfo aggregationInfo = aggregationStatus.getAggregationInfo();
Date aggregationStartDate = aggregationInfo.getAggregationStartDate();
AggregationType aggregationType = aggregationInfo.getAggregationType();
DateFormat dateFormat = aggregationType.getDateFormat();
String dateString = dateFormat.format(aggregationStartDate);
String[] splittedDate = dateString.split(AggregationType.DATE_SEPARATOR);
String backupFileName = splittedDate[splittedDate.length-1] + "-" +name;
File originalRecordsbackupFile = new File(elaborationDirectory, backupFileName + ORIGINAL_SUFFIX);
return originalRecordsbackupFile;
}
protected File getAggregatedRecordsBackupFile(File originalRecordsbackupFile) throws Exception {
File aggregateRecordsBackupFile = new File(originalRecordsbackupFile.getParentFile(),
originalRecordsbackupFile.getName().replace(ORIGINAL_SUFFIX, AGGREGATED_SUFFIX));
return aggregateRecordsBackupFile;
}
}

View File

@ -0,0 +1,38 @@
package org.gcube.accounting.aggregator.elaboration;
import java.util.Date;
import java.util.List;
import org.gcube.accounting.aggregator.persistence.CouchBaseConnector;
import org.gcube.accounting.aggregator.status.AggregationStatus;
import org.gcube.documentstore.records.DSMapper;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class RecoveryManager {
private static Logger logger = LoggerFactory.getLogger(RecoveryManager.class);
protected final Date persistStartTime;
protected final Date persistEndTime;
public RecoveryManager(Date persistStartTime, Date persistEndTime){
super();
this.persistStartTime = persistStartTime;
this.persistEndTime = persistEndTime;
}
public void recovery() throws Exception {
List<AggregationStatus> aggregationStatusList = CouchBaseConnector.getUnterminated();
if(aggregationStatusList.size()==0){
logger.info("Nothing to recover :)");
}
for(AggregationStatus aggregationStatus : aggregationStatusList){
logger.info("Going to Recover unterminated elaboration {}", DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
Elaborator elaborator = new Elaborator(aggregationStatus, persistStartTime, persistEndTime);
elaborator.elaborate();
}
}
}

View File

@ -1,167 +0,0 @@
package org.gcube.accounting.aggregator.madeaggregation;
import java.lang.reflect.Constructor;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.gcube.accounting.aggregator.plugin.Utility;
import org.gcube.documentstore.exception.InvalidValueException;
import org.gcube.documentstore.exception.NotAggregatableRecordsExceptions;
import org.gcube.documentstore.records.AggregatedRecord;
import org.gcube.documentstore.records.Record;
import org.gcube.documentstore.records.RecordUtility;
import org.gcube.documentstore.records.aggregation.AggregationUtility;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.couchbase.client.java.document.JsonDocument;
import com.couchbase.client.java.document.json.JsonObject;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
*/
public class Aggregation {
public static Logger logger = LoggerFactory.getLogger(Aggregation.class);
//count buffer records
protected int totalBufferedRecords;
//list Aggregate record
protected Map<String, List<AggregatedRecord<?,?>>> bufferedRecords = new HashMap<String, List<AggregatedRecord<?,?>>>();
public Aggregation() {
super();
}
@SuppressWarnings("rawtypes")
protected static AggregatedRecord instantiateAggregatedRecord(Record record) throws Exception{
String recordType = record.getRecordType();
Class<? extends AggregatedRecord> clz = RecordUtility.getAggregatedRecordClass(recordType);
Class[] argTypes = { record.getClass() };
Constructor<? extends AggregatedRecord> constructor = clz.getDeclaredConstructor(argTypes);
Object[] arguments = {record};
return constructor.newInstance(arguments);
}
@SuppressWarnings("rawtypes")
public static AggregatedRecord getAggregatedRecord(Record record) throws Exception {
AggregatedRecord aggregatedRecord;
if(record instanceof AggregatedRecord){
// the record is already an aggregated version
aggregatedRecord = (AggregatedRecord) record;
}else{
aggregatedRecord = instantiateAggregatedRecord(record);
}
return aggregatedRecord;
}
@SuppressWarnings({ "rawtypes", "unchecked" })
protected void madeAggregation(AggregatedRecord<?,?> record) throws InvalidValueException{
String recordType = record.getRecordType();
List<AggregatedRecord<?,?>> records;
if(this.bufferedRecords.containsKey(recordType)){
records = this.bufferedRecords.get(recordType);
boolean found = false;
for(AggregatedRecord bufferedRecord : records){
if(!(bufferedRecord instanceof AggregatedRecord)){
continue;
}
AggregationUtility util = new AggregationUtility(bufferedRecord);
//verify a record is aggregable
if (util.isAggregable(record)){
try {
AggregatedRecord bufferedAggregatedRecord = (AggregatedRecord) bufferedRecord;
//logger.debug("if -- madeAggregation aggregate");
bufferedAggregatedRecord.aggregate((AggregatedRecord) record);
//patch for not changed a creation time
bufferedAggregatedRecord.setCreationTime(record.getCreationTime());
found = true;
break;
} catch(NotAggregatableRecordsExceptions e) {
logger.debug("{} is not usable for aggregation", bufferedRecord);
}
}
}
if(!found){
records.add(record);
totalBufferedRecords++;
return;
}
}else{
records = new ArrayList<AggregatedRecord<?,?>>();
try {
records.add(getAggregatedRecord(record));
} catch (Exception e) {
logger.debug("pre Exception but records");
records.add(record);
logger.debug("Exception but records Add e:{}",e);
}
totalBufferedRecords++;
this.bufferedRecords.put(recordType, records);
}
}
/**
* Reset buffer records
*/
protected void clear(){
totalBufferedRecords=0;
bufferedRecords.clear();
}
/**
*
* @return
* @throws Exception
*/
public List<JsonDocument> reallyFlush() throws Exception{
if(totalBufferedRecords==0){
return null;
}
List<JsonDocument> listDocumentToPersist = new ArrayList<JsonDocument>();
Collection<List<AggregatedRecord<?,?>>> collectionValuesRecord = bufferedRecords.values();
for(List<AggregatedRecord<?,?>> records : collectionValuesRecord){
for(Record thisRecord: records){
String id=thisRecord.getId();
JsonObject accounting = JsonObject.empty();
for (String key : thisRecord.getResourceProperties().keySet()){
Object value=thisRecord.getResourceProperty(key);
if (!Utility.checkType(value))
value=(String)value.toString();
accounting.put(key, value);
}
JsonDocument document = JsonDocument.create(id, accounting);
listDocumentToPersist.add(document);
}
}
clear();
return listDocumentToPersist;
}
/**
* Get an usage records and try to aggregate with other buffered
* Usage Record.
* @param singleRecord the Usage Record To Buffer
* @throws Exception if fails
*/
public void aggregate(AggregatedRecord<?,?> record) throws Exception {
if(record!=null){
//logger.debug("aggregate:{}",record.toString());
madeAggregation(record);
}
}
}

View File

@ -1,40 +0,0 @@
package org.gcube.accounting.aggregator.madeaggregation;
import java.util.Calendar;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
*/
public enum AggregationType {
HOURLY(Calendar.MINUTE, 60, "yyyy,M,d,H,m"),
DAILY(Calendar.HOUR, 24,"yyyy,M,d,H"),
MONTHLY(Calendar.DAY_OF_MONTH,31, "yyyy,M,d"),
YEARLY(Calendar.MONTH,12,"yyyy,M");
private int calendarField;
private int multiplierFactor;
private String dateformat;
private AggregationType(int calendarField, int multipliertFactor, String dateFormat) {
this.calendarField = calendarField;
this.multiplierFactor = multipliertFactor;
this.dateformat=dateFormat;
}
public int getCalendarField() {
return calendarField;
}
public int getMultiplierFactor() {
return multiplierFactor;
}
public String getDateformat() {
return dateformat;
}
}

View File

@ -1,160 +0,0 @@
/**
*
*/
package org.gcube.accounting.aggregator.madeaggregation;
import java.io.Serializable;
import java.util.Calendar;
import java.util.HashSet;
import java.util.Set;
import org.gcube.documentstore.exception.NotAggregatableRecordsExceptions;
import org.gcube.documentstore.records.AggregatedRecord;
import org.gcube.documentstore.records.Record;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
*/
public class AggregationUtility<T extends AggregatedRecord<T,?>> {
private static final Logger logger = LoggerFactory.getLogger(AggregationUtility.class);
protected T t;
protected Set<String> aggregationFields;
protected Set<String> neededFields;
protected void setDefaultAggregationFields(){
this.aggregationFields = new HashSet<String>(t.getRequiredFields());
this.aggregationFields.removeAll(t.getAggregatedFields());
this.aggregationFields.remove(Record.ID);
this.aggregationFields.remove(Record.CREATION_TIME);
this.aggregationFields.remove(AggregatedRecord.OPERATION_COUNT);
this.aggregationFields.remove(AggregatedRecord.AGGREGATED);
this.aggregationFields.remove(AggregatedRecord.START_TIME);
this.aggregationFields.remove(AggregatedRecord.END_TIME);
}
protected void setDefaultNeededFields(){
this.neededFields = new HashSet<String>(t.getRequiredFields());
this.neededFields.addAll(t.getAggregatedFields());
this.neededFields.add(AggregatedRecord.OPERATION_COUNT);
this.neededFields.add(AggregatedRecord.AGGREGATED);
this.neededFields.add(AggregatedRecord.START_TIME);
this.neededFields.add(AggregatedRecord.END_TIME);
}
public AggregationUtility(T t){
this.t = t;
setDefaultAggregationFields();
setDefaultNeededFields();
}
/**
* This function is used to set the Set of Aggregation Fields.
* By default this Set if composed by Required Fields for lossless
* aggregation. If you want perform lossy aggregation set this Set
* consistently with NeededFields using {@link #setNeededFields}
* @param aggregationFields
*/
public void setAggregationFields(Set<String> aggregationFields){
this.aggregationFields = aggregationFields;
}
/**
* This function is used to set the Set of Needed Fields to keep after
* aggregation. All other fields are removed.
* By default this Set if composed by Required Fields and AggregationField
* for lossless aggregation. If you want perform lossy aggregation set
* this Set consistently with AggregationFields using
* {@link #setAggregationFields}
* @param neededFields
*/
public void setNeededFields(Set<String> neededFields){
this.neededFields = neededFields;
}
/**
* Check if the record provided as argument is aggregable with the one
* provided to the Constructor.
* This is done comparing the value of each AggregationFields
* @param record to check
* @return true if the record provided as argument is aggregable with the
* one provided to the Constructor. False otherwise.
*/
@SuppressWarnings("unchecked")
public boolean isAggregable(T record) {
for(String field : aggregationFields){
Serializable recordValue = record.getResourceProperty(field);
Serializable thisValue = t.getResourceProperty(field);
if(recordValue instanceof Comparable && thisValue instanceof Comparable){
@SuppressWarnings("rawtypes")
Comparable recordValueComparable = (Comparable) recordValue;
@SuppressWarnings("rawtypes")
Comparable thisValueComparable = (Comparable) thisValue;
if(recordValueComparable.compareTo(thisValueComparable)!=0){
logger.trace("isAggregable {} != {}", recordValueComparable, thisValueComparable);
return false;
}
}else{
if(recordValue.hashCode()!=this.hashCode()){
return false;
}
}
}
return true;
}
/**
* Remove all fields which are not in AggregationFields nor in
* AggregatedFields Sets
*/
protected void cleanExtraFields(){
Set<String> propertyKeys = t.getResourceProperties().keySet();
for(String propertyName : propertyKeys){
if(!neededFields.contains(propertyName)){
t.getResourceProperties().remove(propertyName);
}
}
}
public synchronized T aggregate(T record) throws NotAggregatableRecordsExceptions {
try{
if(!isAggregable(record)){
throw new NotAggregatableRecordsExceptions("The Record provided as argument has different values for field wich must be common to be aggregatable");
}
Calendar recordStartTime = record.getStartTime();
Calendar actualStartTime = t.getStartTime();
if(recordStartTime.before(actualStartTime)){
t.setStartTime(recordStartTime);
}
Calendar recordEndTime = record.getEndTime();
Calendar actualEndTime = t.getEndTime();
if(recordEndTime.after(actualEndTime)){
t.setEndTime(recordEndTime);
}
Calendar newCreationTime = Calendar.getInstance();
t.setCreationTime(newCreationTime);
t.setOperationCount(t.getOperationCount() + record.getOperationCount());
cleanExtraFields();
return t;
}catch(NotAggregatableRecordsExceptions e){
throw e;
}catch(Exception ex){
throw new NotAggregatableRecordsExceptions(ex.getCause());
}
}
}

View File

@ -0,0 +1,40 @@
package org.gcube.accounting.aggregator.persist;
import java.io.File;
import java.util.concurrent.TimeUnit;
import org.gcube.accounting.aggregator.persistence.CouchBaseConnector;
import org.gcube.accounting.aggregator.status.AggregationState;
import org.gcube.accounting.aggregator.status.AggregationStatus;
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.PersistTo;
import com.couchbase.client.java.document.json.JsonObject;
import com.couchbase.client.java.error.DocumentDoesNotExistException;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public class DeleteDocument extends DocumentElaboration {
public DeleteDocument(AggregationStatus aggregationStatus, File file, Bucket bucket){
super(aggregationStatus, AggregationState.DELETED, file, bucket, aggregationStatus.getOriginalRecordsNumber());
}
@Override
protected void elaborateLine(String line) throws Exception {
JsonObject jsonObject = JsonObject.fromJson(line);
String id = jsonObject.getString(ID);
try {
bucket.remove(id, PersistTo.MASTER, CouchBaseConnector.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
}catch (DocumentDoesNotExistException e) {
// OK it can happen when the delete procedure were started but was interrupted
}
}
@Override
protected void afterElaboration() {
// Nothing to do
}
}

View File

@ -0,0 +1,96 @@
package org.gcube.accounting.aggregator.persist;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStreamReader;
import java.util.Calendar;
import org.gcube.accounting.aggregator.status.AggregationState;
import org.gcube.accounting.aggregator.status.AggregationStatus;
import org.gcube.accounting.aggregator.utility.Utility;
import org.gcube.documentstore.records.Record;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.couchbase.client.java.Bucket;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public abstract class DocumentElaboration {
protected Logger logger = LoggerFactory.getLogger(this.getClass());
protected static final String ID = Record.ID;
protected final AggregationStatus aggregationStatus;
protected final File file;
protected final Bucket bucket;
protected final AggregationState finalAggregationState;
protected final int rowToBeElaborated;
protected Calendar startTime;
protected DocumentElaboration(AggregationStatus statusManager, AggregationState finalAggregationState, File file, Bucket bucket, int rowToBeElaborated){
this.aggregationStatus = statusManager;
this.finalAggregationState = finalAggregationState;
this.file = file;
this.bucket = bucket;
this.rowToBeElaborated = rowToBeElaborated;
}
protected void readFile() throws Exception {
try {
// Open the file that is the first // command line parameter
FileInputStream fstream = new FileInputStream(file);
// Get the object of DataInputStream
DataInputStream in = new DataInputStream(fstream);
BufferedReader br = new BufferedReader(new InputStreamReader(in));
logger.info("{} - Going to elaborate {} rows", aggregationStatus.getAggregationInfo(), rowToBeElaborated);
int tenPercentOfNumberOfRows = (rowToBeElaborated/10)+1;
int elaborated = 0;
String line;
// Read File Line By Line
while ((line = br.readLine()) != null) {
elaborateLine(line);
++elaborated;
if(elaborated % tenPercentOfNumberOfRows == 0){
int elaboratedPercentage = elaborated*100/rowToBeElaborated;
logger.info("{} - Elaborated {} rows of {} (about {}%)", aggregationStatus.getAggregationInfo(), elaborated, rowToBeElaborated, elaboratedPercentage);
}
}
logger.info("{} - Elaborated {} rows of {} ({}%)", aggregationStatus.getAggregationInfo(), elaborated, rowToBeElaborated, 100);
br.close();
in.close();
fstream.close();
} catch (Exception e) {
logger.error("Error while elaborating file {}", file.getAbsolutePath(), e);
throw e;
}
}
public void elaborate() throws Exception{
startTime = Utility.getUTCCalendarInstance();
readFile();
aggregationStatus.setState(finalAggregationState, startTime, true);
afterElaboration();
}
protected abstract void elaborateLine(String line) throws Exception;
/**
* Perform actions at the end of line by line elaboration
* @throws Exception
*/
protected abstract void afterElaboration() throws Exception;
}

View File

@ -0,0 +1,37 @@
package org.gcube.accounting.aggregator.persist;
import java.io.File;
import java.util.concurrent.TimeUnit;
import org.gcube.accounting.aggregator.persistence.CouchBaseConnector;
import org.gcube.accounting.aggregator.status.AggregationState;
import org.gcube.accounting.aggregator.status.AggregationStatus;
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.PersistTo;
import com.couchbase.client.java.document.JsonDocument;
import com.couchbase.client.java.document.json.JsonObject;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public class InsertDocument extends DocumentElaboration {
public InsertDocument(AggregationStatus aggregationStatus, File file, Bucket bucket){
super(aggregationStatus, AggregationState.ADDED, file, bucket, aggregationStatus.getAggregatedRecordsNumber());
}
@Override
protected void elaborateLine(String line) throws Exception {
JsonObject jsonObject = JsonObject.fromJson(line);
String id = jsonObject.getString(ID);
JsonDocument jsonDocument = JsonDocument.create(id, jsonObject);
bucket.upsert(jsonDocument, PersistTo.MASTER, CouchBaseConnector.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
}
@Override
protected void afterElaboration() {
// Nothing to do
}
}

View File

@ -0,0 +1,98 @@
package org.gcube.accounting.aggregator.persist;
import java.io.File;
import java.util.Calendar;
import org.gcube.accounting.aggregator.directory.WorkSpaceDirectoryStructure;
import org.gcube.accounting.aggregator.elaboration.Elaborator;
import org.gcube.accounting.aggregator.status.AggregationState;
import org.gcube.accounting.aggregator.status.AggregationStatus;
import org.gcube.accounting.aggregator.utility.Utility;
import org.gcube.accounting.aggregator.workspace.WorkSpaceManagement;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.couchbase.client.java.Bucket;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public class Persist {
private static Logger logger = LoggerFactory.getLogger(Persist.class);
protected final AggregationStatus aggregationStatus;
protected final Bucket originalRecordBucket;
protected final Bucket aggregatedRecordBucket;
protected final File originalRecordsbackupFile;
protected final File aggregateRecordsBackupFile;
public Persist(AggregationStatus aggregationStatus,
Bucket originalRecordBucket, Bucket aggregatedRecordBucket,
File originalRecordsbackupFile, File aggregateRecordsBackupFile) {
super();
this.aggregationStatus = aggregationStatus;
this.originalRecordBucket = originalRecordBucket;
this.aggregatedRecordBucket = aggregatedRecordBucket;
this.originalRecordsbackupFile = originalRecordsbackupFile;
this.aggregateRecordsBackupFile = aggregateRecordsBackupFile;
}
private void setAggregationStateToCompleted(Calendar now) throws Exception {
originalRecordsbackupFile.delete();
aggregateRecordsBackupFile.delete();
aggregationStatus.setState(AggregationState.COMPLETED, now, true);
}
public void recover() throws Exception{
if(aggregationStatus.getAggregatedRecordsNumber()==aggregationStatus.getOriginalRecordsNumber()){
if(originalRecordBucket.name().compareTo(aggregatedRecordBucket.name())==0 || aggregationStatus.getAggregatedRecordsNumber()==0){
Calendar now = Utility.getUTCCalendarInstance();
logger.info("{} - OriginalRecords are {}. AggregatedRecords are {} ({}=={}). All records were already aggregated. The aggregation didn't had any effects and the Source and Destination Bucket are the same ({}) or the record number is 0. Setting {} to {}",
aggregationStatus.getAggregationInfo(),
aggregationStatus.getOriginalRecordsNumber(),
aggregationStatus.getAggregatedRecordsNumber(),
aggregationStatus.getOriginalRecordsNumber(),
aggregationStatus.getAggregatedRecordsNumber(),
originalRecordBucket.name(),
AggregationState.class.getSimpleName(), AggregationState.COMPLETED);
setAggregationStateToCompleted(now);
return;
}
}
if(AggregationState.canContinue(aggregationStatus.getAggregationState(),AggregationState.AGGREGATED)){
// For Each original row stored on file it remove them from Bucket.
// At the end of elaboration set AgrgegationStatus to DELETED
// Then save the file in Workspace and set AgrgegationStatus to COMPLETED
DeleteDocument deleteDocument = new DeleteDocument(aggregationStatus, originalRecordsbackupFile, originalRecordBucket);
deleteDocument.elaborate();
}
if(AggregationState.canContinue(aggregationStatus.getAggregationState(),AggregationState.DELETED)){
// For Each aggregated row stored on file it add them to Bucket. At the end of elaboration set AggregationStatus to ADDED
InsertDocument insertDocument = new InsertDocument(aggregationStatus, aggregateRecordsBackupFile, aggregatedRecordBucket);
insertDocument.elaborate();
}
if(AggregationState.canContinue(aggregationStatus.getAggregationState(),AggregationState.ADDED)){
Calendar now = Utility.getUTCCalendarInstance();
WorkSpaceDirectoryStructure workspaceDirectoryStructure = new WorkSpaceDirectoryStructure();
String targetFolder = workspaceDirectoryStructure.getTargetFolder(aggregationStatus.getAggregationInfo().getAggregationType(), aggregationStatus.getAggregationInfo().getAggregationStartDate());
WorkSpaceManagement.zipAndBackupFiles(targetFolder,
originalRecordsbackupFile.getName().replace(Elaborator.ORIGINAL_SUFFIX, ""), originalRecordsbackupFile, aggregateRecordsBackupFile);
setAggregationStateToCompleted(now);
}
}
}

View File

@ -0,0 +1,12 @@
package org.gcube.accounting.aggregator.persistence;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public interface AggregatorPersistence {
public static final int KEY_VALUES_LIMIT = 25;
public void prepareConnection(AggregatorPersitenceConfiguration configuration) throws Exception;
}

View File

@ -1,16 +0,0 @@
package org.gcube.accounting.aggregator.persistence;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
*/
public interface AggregatorPersistenceBackendQuery {
public static final int KEY_VALUES_LIMIT = 25;
public void prepareConnection(
AggregatorPersistenceBackendQueryConfiguration configuration)
throws Exception;
}

View File

@ -1,30 +0,0 @@
package org.gcube.accounting.aggregator.persistence;
import org.gcube.accounting.persistence.AccountingPersistenceConfiguration;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
*/
public class AggregatorPersistenceBackendQueryConfiguration extends AccountingPersistenceConfiguration {
/**
* Default Constructor
*/
public AggregatorPersistenceBackendQueryConfiguration(){
super();
}
/**
* @param class The class of the persistence to instantiate
* @throws Exception if fails
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public AggregatorPersistenceBackendQueryConfiguration(Class<?> persistence) throws Exception{
super((Class) persistence);
}
}

View File

@ -0,0 +1,28 @@
package org.gcube.accounting.aggregator.persistence;
import org.gcube.accounting.persistence.AccountingPersistenceConfiguration;
/**
* @author Alessandro Pieve (ISTI - CNR)
*/
public class AggregatorPersitenceConfiguration extends AccountingPersistenceConfiguration {
/**
* Default Constructor
*/
public AggregatorPersitenceConfiguration() {
super();
}
/**
* @param class
* The class of the persistence to instantiate
* @throws Exception
* if fails
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public AggregatorPersitenceConfiguration(Class<?> persistence) throws Exception {
super((Class) persistence);
}
}

View File

@ -0,0 +1,343 @@
package org.gcube.accounting.aggregator.persistence;
import static com.couchbase.client.java.query.Select.select;
import static com.couchbase.client.java.query.dsl.Expression.s;
import static com.couchbase.client.java.query.dsl.Expression.x;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import org.gcube.accounting.aggregator.aggregation.AggregationType;
import org.gcube.accounting.aggregator.status.AggregationState;
import org.gcube.accounting.aggregator.status.AggregationStatus;
import org.gcube.accounting.aggregator.utility.Constant;
import org.gcube.accounting.aggregator.utility.Utility;
import org.gcube.accounting.datamodel.AggregatedUsageRecord;
import org.gcube.accounting.datamodel.UsageRecord;
import org.gcube.documentstore.records.DSMapper;
import org.gcube.documentstore.records.Record;
import org.gcube.documentstore.records.RecordUtility;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.Cluster;
import com.couchbase.client.java.CouchbaseCluster;
import com.couchbase.client.java.PersistTo;
import com.couchbase.client.java.document.JsonDocument;
import com.couchbase.client.java.document.json.JsonObject;
import com.couchbase.client.java.env.CouchbaseEnvironment;
import com.couchbase.client.java.env.DefaultCouchbaseEnvironment;
import com.couchbase.client.java.error.DocumentAlreadyExistsException;
import com.couchbase.client.java.query.N1qlQueryResult;
import com.couchbase.client.java.query.N1qlQueryRow;
import com.couchbase.client.java.query.Statement;
import com.couchbase.client.java.query.dsl.Expression;
import com.couchbase.client.java.query.dsl.Sort;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public class CouchBaseConnector {
private static Logger logger = LoggerFactory.getLogger(CouchBaseConnector.class);
public static final long MAX_REQUEST_LIFE_TIME = TimeUnit.SECONDS.toMillis(120);
public static final long KEEP_ALIVE_INTERVAL = TimeUnit.HOURS.toMillis(1);
public static final long AUTO_RELEASE_AFTER = TimeUnit.HOURS.toMillis(1);
public static final long VIEW_TIMEOUT_BUCKET = TimeUnit.SECONDS.toMillis(120);
public static final long CONNECTION_TIMEOUT_BUCKET = TimeUnit.SECONDS.toMillis(15);
public static final long CONNECTION_TIMEOUT = TimeUnit.SECONDS.toMillis(15);
private static final String URL_PROPERTY_KEY = "URL";
private static final String PASSWORD_PROPERTY_KEY = "password";
public final static String ACCOUNTING_MANAGER_BUCKET_NAME = "AccountingManager";
/* The environment configuration */
protected static final CouchbaseEnvironment ENV;
protected static final PersistTo PERSIST_TO;
static {
ENV = DefaultCouchbaseEnvironment.builder()
.connectTimeout(CouchBaseConnector.CONNECTION_TIMEOUT)
.maxRequestLifetime(CouchBaseConnector.MAX_REQUEST_LIFE_TIME)
.queryTimeout(CouchBaseConnector.CONNECTION_TIMEOUT)
.viewTimeout(CouchBaseConnector.VIEW_TIMEOUT_BUCKET)
.keepAliveInterval(CouchBaseConnector.KEEP_ALIVE_INTERVAL)
.kvTimeout(5000)
.autoreleaseAfter(CouchBaseConnector.AUTO_RELEASE_AFTER).build();
PERSIST_TO = PersistTo.MASTER;
}
protected static CouchBaseConnector couchBaseConnector;
protected AggregatorPersitenceConfiguration configuration;
protected Cluster cluster;
protected Map<String,Bucket> connectionMap;
protected Map<String, Class<? extends Record>> recordTypeMap;
public static CouchBaseConnector getInstance() throws Exception{
if(couchBaseConnector==null){
couchBaseConnector = new CouchBaseConnector();
}
return couchBaseConnector;
}
protected CouchBaseConnector() throws Exception {
this.configuration = new AggregatorPersitenceConfiguration(AggregatorPersistence.class);
this.cluster = getCluster();
createConnectionMap();
}
private Cluster getCluster() throws Exception {
try {
String url = configuration.getProperty(URL_PROPERTY_KEY);
return CouchbaseCluster.create(ENV, url);
} catch (Exception e) {
throw e;
}
}
public static enum SUFFIX {
src, dst
};
private static String getBucketKey(String recordType, AggregationType aggregationType, SUFFIX suffix){
return recordType + "-" + aggregationType.name() + "-" + suffix.name();
}
private Map<String,Bucket> createConnectionMap() throws Exception {
connectionMap = new HashMap<>();
recordTypeMap = new HashMap<>();
try {
Bucket b = cluster.openBucket(
ACCOUNTING_MANAGER_BUCKET_NAME,
configuration.getProperty(PASSWORD_PROPERTY_KEY));
connectionMap.put(ACCOUNTING_MANAGER_BUCKET_NAME, b);
}catch (Exception e) {
logger.error("Unable to open Bucket used for Accounting Aggregation Management", e);
throw e;
}
Map<String, Class<? extends Record>> recordClasses = RecordUtility.getRecordClassesFound();
for (Class<? extends Record> recordClass : recordClasses.values()) {
Record recordInstance = recordClass.newInstance();
if (recordInstance instanceof UsageRecord && !(recordInstance instanceof AggregatedUsageRecord<?,?>)) {
String recordType = recordInstance.getRecordType();
recordTypeMap.put(recordType, recordClass);
for(AggregationType aggregationType : AggregationType.values()){
for(SUFFIX suffix : SUFFIX.values()){
try {
logger.debug("Trying to get the Bucket for {} {} {}", suffix, recordType, aggregationType);
String bucketKey = getBucketKey(recordType, aggregationType, suffix);
String bucketName = configuration.getProperty(bucketKey);
logger.debug("Bucket for {} {} {} is {}. Going to open it.", suffix, recordType, aggregationType, bucketName);
Bucket bucket = cluster.openBucket(bucketName, configuration.getProperty(PASSWORD_PROPERTY_KEY));
connectionMap.put(bucketKey, bucket);
}catch (Exception e) {
logger.info("Unable to open Bucket for type {}. This normally mean that is configured.", recordClass);
}
}
}
}
}
return connectionMap;
}
public Set<String> getConnectionMapKeys(){
return connectionMap.keySet();
}
public Set<String> getRecordTypes(){
return recordTypeMap.keySet();
}
public Bucket getBucket(String recordType, AggregationType aggregationType, SUFFIX suffix){
return connectionMap.get(getBucketKey(recordType, aggregationType, suffix));
}
public static AggregationStatus getLast(String recordType, AggregationType aggregationType) throws Exception{
Bucket bucket = CouchBaseConnector.getInstance().connectionMap.get(CouchBaseConnector.ACCOUNTING_MANAGER_BUCKET_NAME);
/*
* SELECT *
* FROM AccountingManager
* WHERE
* `aggregationInfo`.`recordType` = "ServiceUsageRecord" AND
* `aggregationInfo`.`aggregationType` = "DAILY"
* ORDER BY `aggregationInfo`.`aggregationStartDate` DESC LIMIT 1
*/
Expression expression = x("`aggregationInfo`.`recordType`").eq(s(recordType));
expression = expression.and(x("`aggregationInfo`.`aggregationType`").eq(s(aggregationType.name())));
Sort sort = Sort.desc("`aggregationInfo`.`aggregationStartDate`");
Statement statement = select("*").from(bucket.name()).where(expression).orderBy(sort).limit(1);
logger.trace("Going to query : {}", statement.toString());
N1qlQueryResult result = bucket.query(statement);
if (!result.finalSuccess()) {
logger.debug("{} failed : {}", N1qlQueryResult.class.getSimpleName(), result.errors());
return null;
}
List<N1qlQueryRow> rows = result.allRows();
if(rows.size()>1){
String error = String.format("More than one Document found for query %. This is really strange and should not occur. Please contact the Administrator.", statement.toString());
logger.error(error);
throw new Exception(error);
}
if(rows.size()==1){
N1qlQueryRow row = rows.get(0);
try {
JsonObject jsonObject = row.value().getObject(bucket.name());
logger.trace("JsonObject : {}", jsonObject.toString());
return DSMapper.getObjectMapper().readValue(jsonObject.toString(), AggregationStatus.class);
} catch (Exception e) {
logger.warn("Unable to elaborate result for {}", row.toString());
}
}
return null;
}
public static List<AggregationStatus> getUnterminated() throws Exception{
return getUnterminated(null, null);
}
public static List<AggregationStatus> getUnterminated(String recordType, AggregationType aggregationType) throws Exception{
Bucket bucket = CouchBaseConnector.getInstance().connectionMap.get(CouchBaseConnector.ACCOUNTING_MANAGER_BUCKET_NAME);
/*
* SELECT *
* FROM AccountingManager
* WHERE
* `aggregationState` != "COMPLETED" AND
* `lastUpdateTime` < "2017-07-31 09:31:10.984 +0000" AND
* `aggregationInfo`.`recordType` = "ServiceUsageRecord" AND
* `aggregationInfo`.`aggregationType` = "DAILY" AND
*
* ORDER BY `aggregationInfo`.`aggregationStartDate` ASC
*/
Calendar now = Utility.getUTCCalendarInstance();
now.add(Constant.CALENDAR_FIELD_TO_SUBSTRACT_TO_CONSIDER_UNTERMINATED, -Constant.UNIT_TO_SUBSTRACT_TO_CONSIDER_UNTERMINATED);
Expression expression = x("`aggregationState`").ne(s(AggregationState.COMPLETED.name()));
expression = expression.and(x("`lastUpdateTime`").lt(s(Constant.DEFAULT_DATE_FORMAT.format(now.getTime()))));
if(recordType!=null){
expression = expression.and(x("`aggregationInfo`.`recordType`").eq(s(recordType)));
}
if(aggregationType!=null){
expression = expression.and(x("`aggregationInfo`.`aggregationType`").eq(s(aggregationType.name())));
}
Sort sort = Sort.asc("`aggregationInfo`.`aggregationStartDate`");
Statement statement = select("*").from(bucket.name()).where(expression).orderBy(sort);
logger.trace("Going to query : {}", statement.toString());
N1qlQueryResult result = bucket.query(statement);
if (!result.finalSuccess()) {
logger.debug("{} failed : {}", N1qlQueryResult.class.getSimpleName(), result.errors());
return null;
}
List<N1qlQueryRow> rows = result.allRows();
List<AggregationStatus> aggregationStatuses = new ArrayList<>(rows.size());
for(N1qlQueryRow row: rows){
try {
JsonObject jsonObject = row.value().getObject(bucket.name());
logger.trace("JsonObject : {}", jsonObject.toString());
AggregationStatus aggregationStatus = DSMapper.getObjectMapper().readValue(jsonObject.toString(), AggregationStatus.class);
aggregationStatuses.add(aggregationStatus);
} catch (Exception e) {
logger.warn("Unable to elaborate result for {}", row.toString());
}
}
return aggregationStatuses;
}
public static AggregationStatus getAggregationStatus(String recordType, AggregationType aggregationType, Date aggregationStartDate) throws Exception{
Bucket bucket = CouchBaseConnector.getInstance().connectionMap.get(CouchBaseConnector.ACCOUNTING_MANAGER_BUCKET_NAME);
/*
* SELECT *
* FROM AccountingManager
* WHERE
* `aggregationInfo`.`recordType` = "ServiceUsageRecord" AND
* `aggregationInfo`.`aggregationType` = "DAILY" AND
* `aggregationInfo`.`aggregationStartDate` = "2017-06-24 00:00:00.000 +0000"
*/
Expression expression = x("`aggregationInfo`.`recordType`").eq(s(recordType));
expression = expression.and(x("`aggregationInfo`.`aggregationType`").eq(s(aggregationType.name())));
expression = expression.and(x("`aggregationInfo`.`aggregationStartDate`").eq(s(Constant.DEFAULT_DATE_FORMAT.format(aggregationStartDate))));
Statement statement = select("*").from(bucket.name()).where(expression);
logger.trace("Going to query : {}", statement.toString());
N1qlQueryResult result = bucket.query(statement);
if (!result.finalSuccess()) {
logger.debug("{} failed : {}", N1qlQueryResult.class.getSimpleName(), result.errors());
return null;
}
List<N1qlQueryRow> rows = result.allRows();
if(rows.size()>1){
String error = String.format("More than one Document found for query %. This is really strange and should not occur. Please contact the Administrator.", statement.toString());
logger.error(error);
throw new Exception(error);
}
if(rows.size()==1){
N1qlQueryRow row = rows.get(0);
try {
JsonObject jsonObject = row.value().getObject(bucket.name());
logger.trace("JsonObject : {}", jsonObject.toString());
return DSMapper.getObjectMapper().readValue(jsonObject.toString(), AggregationStatus.class);
} catch (Exception e) {
logger.warn("Unable to elaborate result for {}", row.toString());
}
}
return null;
}
public static void upsertAggregationStatus(AggregationStatus aggregationStatus) throws Exception{
Bucket bucket = CouchBaseConnector.getInstance().connectionMap.get(CouchBaseConnector.ACCOUNTING_MANAGER_BUCKET_NAME);
JsonObject jsonObject = JsonObject.fromJson(DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
JsonDocument jsonDocument = JsonDocument.create(aggregationStatus.getUUID().toString(), jsonObject);
try{
bucket.upsert(jsonDocument, PersistTo.MASTER, CouchBaseConnector.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
}catch (DocumentAlreadyExistsException e) {
// OK it can happen when the insert procedure were started but was interrupted
}
}
}

View File

@ -1,663 +1,186 @@
package org.gcube.accounting.aggregator.plugin;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.PrintStream;
import java.io.Serializable;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
import org.gcube.accounting.aggregator.configuration.ConfigurationServiceEndpoint;
import org.gcube.accounting.aggregator.configuration.Constant;
import org.gcube.accounting.aggregator.configuration.ManagementFileBackup;
import org.gcube.accounting.aggregator.madeaggregation.Aggregation;
import org.gcube.accounting.aggregator.madeaggregation.AggregationType;
import org.gcube.accounting.aggregator.persistence.AggregatorPersistenceBackendQueryConfiguration;
import org.gcube.accounting.aggregator.recovery.RecoveryRecord;
import org.gcube.accounting.datamodel.aggregation.AggregatedJobUsageRecord;
import org.gcube.accounting.datamodel.aggregation.AggregatedPortletUsageRecord;
import org.gcube.accounting.aggregator.aggregation.AggregationType;
import org.gcube.accounting.aggregator.elaboration.AggregatorManager;
import org.gcube.accounting.aggregator.elaboration.RecoveryManager;
import org.gcube.accounting.aggregator.utility.Utility;
import org.gcube.accounting.datamodel.UsageRecord;
import org.gcube.accounting.datamodel.aggregation.AggregatedServiceUsageRecord;
import org.gcube.accounting.datamodel.aggregation.AggregatedStorageUsageRecord;
import org.gcube.accounting.datamodel.aggregation.AggregatedTaskUsageRecord;
import org.gcube.accounting.datamodel.usagerecords.JobUsageRecord;
import org.gcube.accounting.datamodel.usagerecords.PortletUsageRecord;
import org.gcube.accounting.datamodel.usagerecords.ServiceUsageRecord;
import org.gcube.accounting.datamodel.usagerecords.StorageUsageRecord;
import org.gcube.accounting.datamodel.usagerecords.TaskUsageRecord;
import org.gcube.common.scope.api.ScopeProvider;
import org.gcube.documentstore.exception.InvalidValueException;
import org.gcube.documentstore.persistence.PersistenceCouchBase;
import org.gcube.documentstore.records.AggregatedRecord;
import org.gcube.documentstore.records.Record;
import org.gcube.documentstore.records.RecordUtility;
import org.gcube.vremanagement.executor.plugin.Plugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.Cluster;
import com.couchbase.client.java.CouchbaseCluster;
import com.couchbase.client.java.PersistTo;
import com.couchbase.client.java.document.JsonDocument;
import com.couchbase.client.java.document.json.JsonArray;
import com.couchbase.client.java.env.CouchbaseEnvironment;
import com.couchbase.client.java.env.DefaultCouchbaseEnvironment;
import com.couchbase.client.java.view.ViewQuery;
import com.couchbase.client.java.view.ViewResult;
import com.couchbase.client.java.view.ViewRow;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
* @author Alessandro Pieve (ISTI - CNR)
* @author Luca Frosini (ISTI - CNR)
*/
public class AccountingAggregatorPlugin extends Plugin<AccountingAggregatorPluginDeclaration> {
private static Logger logger = LoggerFactory.getLogger(AccountingAggregatorPlugin.class);
public Bucket accountingBucket;
protected Cluster cluster;
static {
/// One Record per package is enough
RecordUtility.addRecordPackage(ServiceUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(AggregatedServiceUsageRecord.class.getPackage());
}
/**
* Key to indicate {@link AggregationType}
*/
public static final String AGGREGATION_TYPE_INPUT_PARAMETER = "aggregationType";
/**
* Day is ignored for MONTHLY aggregation
* Month and Day are ignored for YEARLY aggregation
*/
public static final String AGGREGATION_START_DATE_INPUT_PARAMETER = "aggregationStartDate";
public static final String RESTART_FROM_LAST_AGGREGATION_DATE_INPUT_PARAMETER = "restartFromLastAggregationDate";
public static final String AGGREGATION_START_DATE_DATE_FORMAT_PATTERN = "yyyy/MM/dd";
public static final DateFormat AGGREGATION_START_DATE_DATE_FORMAT;
private static final String AGGREGATION_START_DATE_UTC_DATE_FORMAT_PATTERN = "yyyy/MM/dd Z";
private static final DateFormat AGGREGATION_START_DATE_UTC_DATE_FORMAT;
private static final String UTC = "+0000";
public enum ElaborationType {
AGGREGATE, // Aggregate
RECOVERY // Recover unterminated executions
}
/**
* Indicate which types of elaboration the plugin must perform
*/
public static final String ELABORATION_TYPE_INPUT_PARAMETER = "elaborationType";
/**
* Start Day Time in UTC when the plugin is allowed to write in buckets
*/
public static final String PERSIST_START_TIME_INPUT_PARAMETER = "persistStartTime";
public static final String PERSIST_END_TIME_INPUT_PARAMETER = "persistEndTime";
public static final String PERSIST_TIME_DATE_FORMAT_PATTERN = "HH:mm";
public static final DateFormat PERSIST_TIME_DATE_FORMAT;
public static final String LOCAL_TIME_DATE_FORMAT_PATTERN = "HH:mm Z";
public static final DateFormat LOCAL_TIME_DATE_FORMAT;
public static final String RECORD_TYPE_INPUT_PARAMETER = Record.RECORD_TYPE;
static {
AGGREGATION_START_DATE_DATE_FORMAT = Utility.getUTCDateFormat(AGGREGATION_START_DATE_DATE_FORMAT_PATTERN);
AGGREGATION_START_DATE_UTC_DATE_FORMAT = Utility.getUTCDateFormat(AGGREGATION_START_DATE_UTC_DATE_FORMAT_PATTERN);
PERSIST_TIME_DATE_FORMAT = new SimpleDateFormat(PERSIST_TIME_DATE_FORMAT_PATTERN);
LOCAL_TIME_DATE_FORMAT = new SimpleDateFormat(LOCAL_TIME_DATE_FORMAT_PATTERN);
}
public Aggregation aggregate;
public static final String AGGREGATED = "aggregated";
private final static String LINE_FREFIX = "{";
private final static String LINE_SUFFIX = "}";
private final static String KEY_VALUE_PAIR_SEPARATOR = ",";
private final static String KEY_VALUE_LINKER = "=";
public static Integer countInsert=0;
public static Integer countDelete=0;
public static Integer recoveryMode=0;
public Boolean backup=true;
//value if 0 PersistTo.MASTER if 1 PersistTo.ONE
public static Integer typePersisted=0;
protected PersistTo persisted ;
/**
* @param runningPluginEvolution
*/
public AccountingAggregatorPlugin(AccountingAggregatorPluginDeclaration pluginDeclaration) {
super(pluginDeclaration);
super(pluginDeclaration);
}
/* The environment configuration */
protected static final CouchbaseEnvironment ENV =
DefaultCouchbaseEnvironment.builder()
.connectTimeout(Constant.CONNECTION_TIMEOUT * 1000)
.maxRequestLifetime(Constant.MAX_REQUEST_LIFE_TIME * 1000)
.queryTimeout(Constant.CONNECTION_TIMEOUT * 1000) //15 Seconds in milliseconds
.viewTimeout(Constant.VIEW_TIMEOUT_BUCKET * 1000)//120 Seconds in milliseconds
.keepAliveInterval(3600 * 1000) // 3600 Seconds in milliseconds
.kvTimeout(5000) //in ms
.build();
/**{@inheritDoc}*/
private Date getPersistTime(Map<String, Object> inputs, String parameterName) throws ParseException{
Date persistTime = null;
if (inputs.containsKey(parameterName)) {
String persistTimeString = (String) inputs.get(parameterName);
persistTime = Utility.getPersistTimeDate(persistTimeString);
}
if(persistTime==null){
throw new IllegalArgumentException("Please set a valid '" + parameterName +"' by using " + PERSIST_TIME_DATE_FORMAT_PATTERN + " format.");
}
return persistTime;
}
/** {@inheritDoc} */
@SuppressWarnings("unchecked")
@Override
public void launch(Map<String, Object> inputs) throws Exception {
countInsert=0;
countDelete=0;
if(inputs == null || inputs.isEmpty()){
logger.debug("{} inputs {}", this.getClass().getSimpleName(), inputs);
throw new Exception("Inputs null");
}
//Type :HOURLY,DAILY,MONTHLY,YEARLY
//Interval: Number of hour,day,month,year
if (!inputs.containsKey("type") || !inputs.containsKey("interval"))
throw new IllegalArgumentException("Interval and type must be defined");
AggregationType aggType =AggregationType.valueOf((String)inputs.get("type"));
Integer intervaTot=(Integer)inputs.get("interval");
Integer interval=intervaTot* aggType.getMultiplierFactor();
//new feature for not elaborate the full range but a set of small intervals
if (inputs.containsKey("intervalStep"))
interval=(Integer)inputs.get("intervalStep");
Integer inputStartTime=null;
String pathFile = null;
if (inputs.containsKey("startTime"))
inputStartTime=(Integer)inputs.get("startTime");
else{
//get start time with file
logger.debug("Attention get start Time from file");
if (inputs.containsKey("pathFile")){
//get start time from file
pathFile=(String) inputs.get("pathFile");
logger.trace("open file:{}",pathFile);
BufferedReader reader = new BufferedReader(new FileReader(pathFile));
String line;
while ((line = reader.readLine()) != null)
{
line=line.trim();
String strDate = line;
SimpleDateFormat fmt = new SimpleDateFormat("yyyy/MM/dd");
Date d1 = fmt.parse(strDate);
Date now = new Date();
long millisDiff = now.getTime() - d1.getTime();
inputStartTime= (int) (millisDiff / 86400000);
logger.debug("Read Start Time:{}",d1.toString());
logger.debug("Start Time:{}",inputStartTime);
}
reader.close();
}
}
Boolean currentScope =false;
String scope=null;
if (inputs.containsKey("currentScope"))
currentScope=(Boolean)inputs.get("currentScope");
if (currentScope)
scope=ScopeProvider.instance.get();
if (inputs.containsKey("user"))
Constant.user=(String)inputs.get("user");
else
Constant.user="service.aggregatorAccounting";
if (inputs.containsKey("recovery"))
recoveryMode=(Integer)inputs.get("recovery");
if (inputs.containsKey("backup"))
backup=(Boolean)inputs.get("backup");
if (inputs.containsKey("typePersisted"))
typePersisted=(Integer)inputs.get("typePersisted");
switch(typePersisted) {
case 0:
persisted=PersistTo.MASTER;
break;
case 1:
persisted=PersistTo.ONE;
break;
default:
persisted=PersistTo.MASTER;
}
logger.debug("-Launch with Type:{}, Interval:{}, startTime:{}, Scope:{}, Recovery:{}",aggType.toString(),interval,inputStartTime,scope,recoveryMode);
logger.debug("persist:{} backup:{}",persisted.toString(),backup);
if(!backup){
logger.warn("Attention backup disabled");
Thread.sleep(20000);
}
if (inputs.containsKey("intervalStep")){
logger.debug("Interval is not considered, aggregate only :{} step",interval);
}
//Get Configuration from service end point
String url=null;
String password =null;
List<String> listBucket=new ArrayList<String>();
AggregatorPersistenceBackendQueryConfiguration configuration;
try{
configuration = new AggregatorPersistenceBackendQueryConfiguration(PersistenceCouchBase.class);
url = configuration.getProperty(ConfigurationServiceEndpoint.URL_PROPERTY_KEY);
password = configuration.getProperty(ConfigurationServiceEndpoint.PASSWORD_PROPERTY_KEY);
if (inputs.containsKey("bucket"))
listBucket.add(inputs.get("bucket").toString());
else{
listBucket.add(configuration.getProperty(ConfigurationServiceEndpoint.BUCKET_STORAGE_NAME_PROPERTY_KEY));
listBucket.add(configuration.getProperty(ConfigurationServiceEndpoint.BUCKET_SERVICE_NAME_PROPERTY_KEY));
listBucket.add(configuration.getProperty(ConfigurationServiceEndpoint.BUCKET_JOB_NAME_PROPERTY_KEY));
listBucket.add(configuration.getProperty(ConfigurationServiceEndpoint.BUCKET_PORTLET_NAME_PROPERTY_KEY));
listBucket.add(configuration.getProperty(ConfigurationServiceEndpoint.BUCKET_TASK_NAME_PROPERTY_KEY));
}
}
catch (Exception e) {
logger.error("launch",e.getLocalizedMessage());
throw e;
}
Cluster cluster = CouchbaseCluster.create(ENV, url);
//Define a type for aggregate
RecordUtility.addRecordPackage(PortletUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(AggregatedPortletUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(JobUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(AggregatedJobUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(TaskUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(AggregatedTaskUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(StorageUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(AggregatedStorageUsageRecord.class.getPackage());
AggregationType aggregationType = null;
Date aggregationStartDate = null;
boolean restartFromLastAggregationDate = false;
ElaborationType elaborationType = ElaborationType.AGGREGATE;
RecordUtility.addRecordPackage(ServiceUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(AggregatedServiceUsageRecord.class.getPackage());
//end define
Date persistStartTime = null;
Date persistEndTime = null;
Date today = new Date();
Date endScriptTime = new Date();
if (inputs.containsKey("endScriptTime")){
DateFormat df = new SimpleDateFormat ("MM/dd/yyyy HH:mm");
endScriptTime = df.parse ((today.getMonth()+1)+"/"+today.getDate()+"/"+(today.getYear()+1900) +" "+(String)inputs.get("endScriptTime"));
logger.debug("Script Run until :{}",endScriptTime);
Class<? extends UsageRecord> usageRecordClass = null;
if (inputs == null || inputs.isEmpty()) {
throw new IllegalArgumentException("The can only be launched providing valid input parameters");
}
do {
logger.debug("--Start Time Loop:{}"+inputStartTime);
initFolder();
if ((recoveryMode==2)||(recoveryMode==0)){
logger.debug("Recovery mode enabled");
RecoveryRecord.searchFile(cluster,configuration);
}
if (recoveryMode!=2){
if (inputs.containsKey(ELABORATION_TYPE_INPUT_PARAMETER)) {
elaborationType = ElaborationType.valueOf((String) inputs.get(ELABORATION_TYPE_INPUT_PARAMETER));
}
persistStartTime = getPersistTime(inputs, PERSIST_START_TIME_INPUT_PARAMETER);
persistEndTime = getPersistTime(inputs, PERSIST_END_TIME_INPUT_PARAMETER);
switch (elaborationType) {
case AGGREGATE:
if (!inputs.containsKey(AGGREGATION_TYPE_INPUT_PARAMETER)) {
throw new IllegalArgumentException("Please set required parameter '" + AGGREGATION_TYPE_INPUT_PARAMETER +"'");
}
aggregationType = AggregationType.valueOf((String) inputs.get(AGGREGATION_TYPE_INPUT_PARAMETER));
for (String bucket:listBucket){
logger.trace("OpenBucket:{}",bucket);
accountingBucket = cluster.openBucket(bucket,password);
//elaborate bucket, with scope, type aggregation and interval
elaborateBucket(bucket,scope, inputStartTime, interval, aggType);
if (inputs.containsKey(AGGREGATION_START_DATE_INPUT_PARAMETER)) {
String aggregationStartDateString = (String) inputs.get(AGGREGATION_START_DATE_INPUT_PARAMETER);
aggregationStartDate = AGGREGATION_START_DATE_UTC_DATE_FORMAT.parse(aggregationStartDateString + " " + UTC);
}
if (inputs.containsKey("pathFile")){
//update a file for new start time
FileOutputStream file = new FileOutputStream(pathFile);
PrintStream output = new PrintStream(file);
logger.debug("Update pathfile:{} with new start time:{}",pathFile,inputStartTime-intervaTot);
Date dateNow = new Date();
Calendar data = Calendar.getInstance();
data.setTime(dateNow);
data.add(Calendar.DATE,-(inputStartTime-intervaTot));
SimpleDateFormat format1 = new SimpleDateFormat("yyyy/MM/dd");
String formatted = format1.format(data.getTime());
output.println(formatted);
inputStartTime=inputStartTime-intervaTot;
today = new Date();
if(inputs.containsKey(RESTART_FROM_LAST_AGGREGATION_DATE_INPUT_PARAMETER)){
restartFromLastAggregationDate = (boolean) inputs.get(RESTART_FROM_LAST_AGGREGATION_DATE_INPUT_PARAMETER);
}
logger.debug("Complete countInsert{}, countDelete{}",countInsert,countDelete);
}
if(restartFromLastAggregationDate==false && aggregationStartDate==null){
throw new IllegalArgumentException("Aggregation Start Date cannot be found. Please provide it as parameter or set '" + RESTART_FROM_LAST_AGGREGATION_DATE_INPUT_PARAMETER + "' input parameter to 'true'.");
}
if (inputs.containsKey(RECORD_TYPE_INPUT_PARAMETER)) {
usageRecordClass = (Class<? extends UsageRecord>) RecordUtility.getRecordClass((String) inputs.get(RECORD_TYPE_INPUT_PARAMETER));
}
AggregatorManager aggregatorManager = new AggregatorManager(aggregationType, restartFromLastAggregationDate, aggregationStartDate);
aggregatorManager.elaborate(persistStartTime, persistEndTime, usageRecordClass);
break;
} while(today.compareTo(endScriptTime)<0);
logger.debug("Plugin Terminated");
case RECOVERY:
RecoveryManager recoveryManager = new RecoveryManager(persistStartTime, persistEndTime);
recoveryManager.recovery();
break;
default:
throw new IllegalArgumentException("No ElaborationType provided. You should not be here. Please Contact the administrator");
}
}
/**{@inheritDoc}*/
/** {@inheritDoc} */
@Override
protected void onStop() throws Exception {
logger.trace("{} onStop() function", this.getClass().getSimpleName());
logger.trace("Stopping execution of {}, UUID : {}", AccountingAggregatorPluginDeclaration.NAME, this.uuid);
Thread.currentThread().interrupt();
}
/**
* Init folder for backup file
*/
public void initFolder(){
Constant.PATH_DIR_BACKUP=System.getProperty(Constant.HOME_SYSTEM_PROPERTY)+"/"+Constant.NAME_DIR_BACKUP;
Constant.PATH_DIR_BACKUP_INSERT=Constant.PATH_DIR_BACKUP+"/insert";
Constant.PATH_DIR_BACKUP_DELETE=Constant.PATH_DIR_BACKUP+"/delete";
File DirRoot = new File(Constant.PATH_DIR_BACKUP);
if (!DirRoot.exists()) {
DirRoot.mkdir();
}
logger.debug("init folder:{}",Constant.PATH_DIR_BACKUP);
}
/**
* Elaborate a Bucket from startTime to interval
* @param bucket
* @param inputStartTime
* @param interval
* @param aggType
* @return
* @throws Exception
*/
protected boolean elaborateBucket(String bucket,String scope ,Integer inputStartTime,Integer interval,AggregationType aggType) throws Exception{
SimpleDateFormat format = new SimpleDateFormat(aggType.getDateformat());
//calculate a start time and end time for map reduce key
Calendar now, nowTemp;
if (inputStartTime==null){
now= Calendar.getInstance();
nowTemp= Calendar.getInstance();
}else{
now=Calendar.getInstance();
nowTemp= Calendar.getInstance();
switch (aggType.name()) {
case "YEARLY":
now.add( Calendar.YEAR, -inputStartTime );
nowTemp.add( Calendar.YEAR, -inputStartTime );
break;
case "MONTHLY":
now.add( Calendar.MONTH, -inputStartTime );
nowTemp.add( Calendar.MONTH, -inputStartTime );
break;
case "DAILY":
now.add( Calendar.DATE, -inputStartTime );
nowTemp.add( Calendar.DATE, -inputStartTime );
break;
case "HOURLY":
now.add( Calendar.HOUR, -inputStartTime );
nowTemp.add( Calendar.HOUR, -inputStartTime );
break;
}
}
String endAllKeyString = format.format(now.getTime());
String endKeyString = format.format(now.getTime());
//save a record modified into a file and save into a workspace
nowTemp.add(aggType.getCalendarField(), -1*interval);
String startAllKeyString = format.format(nowTemp.getTime());
if (backup){
logger.debug("Start Backup");
WorkSpaceManagement.onSaveBackupFile(accountingBucket,bucket,scope,startAllKeyString, endAllKeyString,aggType);
}
else
logger.debug("No Backup required");
List<JsonDocument> documentElaborate=new ArrayList<JsonDocument>();
for (int i=0; i<interval; i++){
now.add(aggType.getCalendarField(), -1);
String startKeyString = format.format(now.getTime());
//init a json start,end key
JsonArray startKey = Utility.generateKey(scope,startKeyString);
JsonArray endKey = Utility.generateKey(scope,endKeyString);
DesignID designid=DesignID.valueOf(bucket);
String designDocId=designid.getNameDesign();
String viewName="";
if (scope!=null)
viewName=designid.getNameViewScope();
else
viewName=designid.getNameView();
ViewQuery query = ViewQuery.from(designDocId, viewName);
query.startKey(startKey);
query.endKey(endKey);
query.reduce(false);
query.inclusiveEnd(false);
logger.debug("--{}/{} View Query: startKey:{} - endKey:{} designDocId:{} - viewName:{}",i,interval,startKey, endKey,designDocId,viewName);
ViewResult viewResult = null;
try {
viewResult = accountingBucket.query(query);
} catch (Exception e) {
logger.error("Exception error VIEW",e.getLocalizedMessage(),e);
}
// Iterate through the returned ViewRows
aggregate = new Aggregation();
documentElaborate.clear();
logger.debug("Start elaborate row");
Boolean resultElaborate=false;
for (ViewRow row : viewResult)
resultElaborate=elaborateRow(row,documentElaborate);
logger.debug("End elaborate row");
//Backup File saved
String nameFileBackup="";
if (scope!=null)
nameFileBackup=scope.replace("/", "")+"-"+startKeyString+"-"+endKeyString;
else
nameFileBackup=startKeyString+"-"+endKeyString;
reallyFlush(aggregate,documentElaborate,nameFileBackup);
endKeyString = startKeyString;
}
return true;
}
/**
* Elaborate row for aggregate
* elaborateRow
* @param row
* @return
* @throws Exception
*/
protected Boolean elaborateRow(ViewRow row ,List<JsonDocument> documentElaborate) throws Exception{
int i=0;
JsonDocument documentJson = null;
try {
//patch for field of long type
String document=row.value().toString().replace("\":", "=").replace("\"", "");
i=1;//1
Map<String,? extends Serializable> map = getMapFromString(document);
i=2;//2
//prepare a document for elaborate
String identifier=(String) row.document().content().get("id");
i=3;//3
documentJson = JsonDocument.create(identifier, row.document().content());
i=4;//4
@SuppressWarnings("rawtypes")
AggregatedRecord record = (AggregatedRecord)RecordUtility.getRecord(map);
i=5;//5
aggregate.aggregate(record);
i=6;//6
//insert an elaborate row into list JsonDocument for memory document elaborate
documentElaborate.add(documentJson);
i=7;//7
return true;
}
catch(InvalidValueException ex){
logger.warn("InvalidValueException - Record is not valid. Anyway, it will be persisted i:{}",i);
logger.warn("Runtime Exception ex",ex);
if ((i==5)&&(documentJson!=null)){
documentElaborate.add(documentJson);
}
return false;
}
catch(RuntimeException exr){
logger.warn("Runtime Exception -Record is not valid. Anyway, it will be persisted i:{}",i);
logger.warn("Runtime Exception exr",exr);
if ((i==5)&&(documentJson!=null)){
documentElaborate.add(documentJson);
logger.debug("Record is elaborate");
}
return false;
}
catch (Exception e) {
logger.error("record is not elaborated:"+row.toString()+" but it will be persisted");
logger.error("error elaborateRow", e);
logger.error("i:{}",i);
if ((i==5)&&(documentJson!=null)){
documentElaborate.add(documentJson);
logger.debug("Record is elaborate");
}
return false;
}
}
/**
* getMapFromString
* @param serializedMap
* @return
*/
protected static Map<String, ? extends Serializable> getMapFromString(String serializedMap){
/* Checking line sanity */
if(!serializedMap.startsWith(LINE_FREFIX) && !serializedMap.endsWith(LINE_SUFFIX)){
return null;
}
/* Cleaning prefix and suffix to parse line */
serializedMap = serializedMap.replace(LINE_FREFIX, "");
serializedMap = serializedMap.replace(LINE_SUFFIX, "");
Map<String, Serializable> map = new HashMap<String,Serializable>();
String[] pairs = serializedMap.split(KEY_VALUE_PAIR_SEPARATOR);
for (int i=0;i<pairs.length;i++) {
String pair = pairs[i];
pair.trim();
String[] keyValue = pair.split(KEY_VALUE_LINKER);
String key = keyValue[0].trim();
Serializable value = keyValue[1].trim();
map.put(key, value);
}
return map;
}
/**
* Delete a record not aggregate and insert a new record aggregate
* If a problem with delete record, not insert a new record and save a backupfile
* reallyFlush
* @param aggregate
* @param docs
* @param nameFile
* @return
* @throws Exception
*/
protected boolean reallyFlush(Aggregation aggregate, List<JsonDocument> docs,String nameFile) throws Exception{
if (docs.size()!=0){
Integer index=0;
boolean succesfulDelete=false;
logger.trace("Start a delete document:{}",docs.size());
//before elaborate a record, create a backup file
List<JsonDocument> notDeleted = docs;
List<JsonDocument> notInserted = aggregate.reallyFlush();
nameFile =nameFile+"-"+UUID.randomUUID();
ManagementFileBackup.getInstance().onCreateStringToFile(notDeleted,Constant.FILE_RECORD_NO_AGGREGATE+"_"+nameFile,false);
ManagementFileBackup.getInstance().onCreateStringToFile(notInserted,Constant.FILE_RECORD_AGGREGATE+"_"+nameFile,true);
List<JsonDocument> notDeletedTemp = null;
while ((index < Constant.NUM_RETRY) && !succesfulDelete){
notDeletedTemp = new ArrayList<JsonDocument>();
for (JsonDocument doc: notDeleted){
if (index>0){
logger.trace("delete Start {} pass",index);
}
countDelete ++;
try{
accountingBucket.remove(doc.id(),persisted,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
}
catch(Exception e){
logger.warn("doc:{} not deleted retry:{} for error:{}",doc.id(),index,e);
Thread.sleep(1500);
try{
if (accountingBucket.exists(doc.id()))
notDeletedTemp.add(doc);
}
catch(Exception ext){
logger.warn("doc:{} not verify for delete because timeout, retry:{}",doc.id(),index,ext);
Thread.sleep(3000);
try{
if (accountingBucket.exists(doc.id()))
notDeletedTemp.add(doc);
}
catch(Exception ex) {
logger.error("doc:{} not delete ({}), problem with exist bucket",doc.id(),doc.toString(),ex);
logger.error("force insert into list for delete");
notDeletedTemp.add(doc);
}
}
}
}
if (notDeletedTemp.isEmpty()){
succesfulDelete=true;
}
else {
index++;
notDeleted = new ArrayList<JsonDocument>(notDeletedTemp);
Thread.sleep(1000);
logger.trace("First pass no delete all succesfulDelete:{} index:{}",succesfulDelete,index);
}
}
if (!succesfulDelete){
logger.error("Error Delete record");
}
logger.debug("Delete complete:{}, Start a insert aggregated document:{}",countDelete,notInserted.size());
// delete all record and ready for insert a new aggregated record
if (succesfulDelete){
//if successful record delete, delete backup file
ManagementFileBackup.getInstance().onDeleteFile(Constant.FILE_RECORD_NO_AGGREGATE+"_"+nameFile,false);
index=0;
boolean succesfulInsert=false;
while ((index < Constant.NUM_RETRY) && !succesfulInsert){
List<JsonDocument> notInsertedTemp = new ArrayList<JsonDocument>();
for (JsonDocument document: notInserted){
if (index>0){
logger.trace("insert Start {} pass for document:{}",index,document.toString());
}
countInsert ++;
try{
//JsonDocument response = accountingBucket.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
JsonDocument response = accountingBucket.upsert(document,persisted,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
}
catch(Exception e){
logger.warn("record:{} not insert retry:{} for error:{}",document.id(),index,e);
Thread.sleep(1500);
try{
if (!accountingBucket.exists(document.id()))
notInsertedTemp.add(document);
}
catch(Exception ext){
logger.warn("doc:{} not verify for insert because timeout, retry",document.id(),ext);
Thread.sleep(3000);
try{
if (!accountingBucket.exists(document.id()))
notInsertedTemp.add(document);
}
catch(Exception ex) {
logger.error("doc:{} not insert ({}), problem with exist bucket",document.id(),document.toString(),ex);
logger.error("force insert into list for insert");
notInsertedTemp.add(document);
}
}
}
}
if (notInsertedTemp.isEmpty()){
succesfulInsert=true;
}
else {
index++;
notInserted = new ArrayList<JsonDocument>(notInsertedTemp);
Thread.sleep(1000);
logger.trace("First pass no insert all succesfulInsert:{} index:{}",succesfulInsert,index);
}
}
if (!succesfulInsert){
//do something clever with the exception
logger.error("Error Insert record{}");
} else{
logger.debug("elaborate record aggregate:{} and record not aggregate:{}",countInsert, countDelete);
ManagementFileBackup.getInstance().onDeleteFile(Constant.FILE_RECORD_AGGREGATE+"_"+nameFile,true);
}
}
logger.trace("Insert complete");
}
return true;
}
}

View File

@ -13,7 +13,7 @@ import org.slf4j.LoggerFactory;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
* @author Luca Frosini (ISTI - CNR)
*/
public class AccountingAggregatorPluginDeclaration implements PluginDeclaration {
/**
@ -24,9 +24,9 @@ public class AccountingAggregatorPluginDeclaration implements PluginDeclaration
/**
* Plugin name used by the Executor to retrieve this class
*/
public static final String NAME = "Accouting-Aggregator-Plugin";
public static final String NAME = "Accounting-Aggregator-Plugin";
public static final String DESCRIPTION = "This plugin is used to aggregate accounting record";
public static final String DESCRIPTION = "This plugin is used to aggregate accounting records";
public static final String VERSION = "1.0.0";

View File

@ -1,42 +0,0 @@
package org.gcube.accounting.aggregator.plugin;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
*/
public enum DesignID {
accounting_storage("accounting_storage","StorageUsageRecordAggregated","all","scope"),
accounting_service("accounting_service","ServiceUsageRecordAggregated","all","scope"),
accounting_portlet("accounting_portlet","PortletUsageRecordAggregated","all","scope"),
accounting_job("accounting_job","JobUsageRecordAggregated","all","scope"),
accounting_task("accounting_task","TaskUsageRecordAggregated","all","scope");
private String nameBucket;
private String nameDesign;
private String nameView;
private String nameViewScope;
private DesignID(String nameBucket, String nameDesign, String nameView,String nameViewScope) {
this.nameBucket = nameBucket;
this.nameDesign = nameDesign;
this.nameView=nameView;
this.nameViewScope=nameViewScope;
}
public String getNameBucket() {
return nameBucket;
}
public String getNameDesign() {
return nameDesign;
}
public String getNameView() {
return nameView;
}
public String getNameViewScope(){
return nameViewScope;
}
}

View File

@ -1,44 +0,0 @@
package org.gcube.accounting.aggregator.plugin;
import com.couchbase.client.java.document.json.JsonArray;
import com.couchbase.client.java.document.json.JsonObject;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
*/
public class Utility {
/**
* Generate a key for map-reduce
* @param key
* @return
*/
protected static JsonArray generateKey(String scope,String key){
JsonArray generateKey = JsonArray.create();
if (scope!=null){
generateKey.add(scope);
}
for (String value: key.split(",")){
if (!value.toString().isEmpty())
generateKey.add(Integer.parseInt(value));
}
return generateKey;
}
/**
* Verify a record aggregated for insert into bucket
* @param item
* @return
*/
public static boolean checkType(Object item) {
return item == null
|| item instanceof String
|| item instanceof Integer
|| item instanceof Long
|| item instanceof Double
|| item instanceof Boolean
|| item instanceof JsonObject
|| item instanceof JsonArray;
}
}

View File

@ -1,245 +0,0 @@
package org.gcube.accounting.aggregator.plugin;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.FileWriter;
import java.io.InputStream;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import org.gcube.accounting.aggregator.configuration.Constant;
import org.gcube.accounting.aggregator.madeaggregation.Aggregation;
import org.gcube.accounting.aggregator.madeaggregation.AggregationType;
import org.gcube.common.homelibrary.home.Home;
import org.gcube.common.homelibrary.home.HomeLibrary;
import org.gcube.common.homelibrary.home.HomeManager;
import org.gcube.common.homelibrary.home.HomeManagerFactory;
import org.gcube.common.homelibrary.home.User;
import org.gcube.common.homelibrary.home.workspace.Workspace;
import org.gcube.common.homelibrary.home.workspace.WorkspaceFolder;
import org.gcube.common.homelibrary.home.workspace.WorkspaceItem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.document.json.JsonArray;
import com.couchbase.client.java.view.ViewQuery;
import com.couchbase.client.java.view.ViewResult;
import com.couchbase.client.java.view.ViewRow;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
*/
public class WorkSpaceManagement {
public static Logger logger = LoggerFactory.getLogger(Aggregation.class);
/**
* Save a compressed backup file into workspace
* @param bucket
* @param startKeyString
* @param endKeyString
* @param aggType
* @return
* @throws Exception
*/
protected static boolean onSaveBackupFile(Bucket accountingBucket,String bucket,String scope,String startKeyString,String endKeyString,
AggregationType aggType) throws Exception{
String nameFile="complete.json";
String nameFileZip="complete.zip";
String namePathFile=Constant.PATH_DIR_BACKUP+"/"+nameFile;
String namePathFileZip=Constant.PATH_DIR_BACKUP+"/"+nameFileZip;
String subFolderName="";
if (scope==null)
subFolderName=endKeyString.replace(",","-")+"_"+startKeyString.replace(",","-");
else
subFolderName=scope.replace("/", "")+"_"+endKeyString.replace(",","-")+"_"+startKeyString.replace(",","-");
try {
WorkspaceFolder wsRootDir=init(Constant.user);
//bucket folder for backup
WorkspaceFolder folderBucketName=createFolder(Constant.user, wsRootDir.getId(), bucket, "Backup Folder");
//type folder for backup
WorkspaceFolder folderTypeName=createFolder(Constant.user, folderBucketName.getId(), aggType.name(), "Backup Folder");
//type folder for backup
WorkspaceFolder folderStartTimeName=createFolder(Constant.user, folderTypeName.getId(), subFolderName, "Backup Folder");
DesignID designid=DesignID.valueOf(bucket);
String designDocId=designid.getNameDesign();
String viewName="";
if (scope!=null)
viewName=designid.getNameViewScope();
else
viewName=designid.getNameView();
JsonArray startKey = Utility.generateKey(scope,startKeyString);
JsonArray endKey = Utility.generateKey(scope,endKeyString);
ViewQuery query = ViewQuery.from(designDocId, viewName);
query.startKey(startKey);
query.endKey(endKey);
query.reduce(false);
query.inclusiveEnd(false);
ViewResult viewResult;
try {
viewResult = accountingBucket.query(query);
} catch (Exception e) {
logger.error(e.getLocalizedMessage());
throw e;
}
BufferedWriter filebackup =null;
File logFile = new File(namePathFile);
logFile.delete();
Thread.sleep(500);
filebackup = new BufferedWriter(new FileWriter(logFile));
int count = 0;
int maxTries = 3;
boolean exitRetry=false;
for (ViewRow row : viewResult){
while(!exitRetry) {
try {
if (row.document()!=null){
if (!row.document().content().toString().isEmpty()){
filebackup.write(row.document().content().toString());
filebackup.newLine();
}
}
exitRetry=true;
} catch (Exception e) {
logger.error("retry:{}",count);
logger.error(e.getMessage());
if (++count == maxTries){
filebackup.close();
throw e;
}
}
}
}
filebackup.close();
//create a zip file
byte[] buffer = new byte[1024];
FileOutputStream fos = new FileOutputStream(namePathFileZip);
ZipOutputStream zos = new ZipOutputStream(fos);
ZipEntry ze= new ZipEntry(nameFile);
zos.putNextEntry(ze);
FileInputStream in = new FileInputStream(namePathFile);
int len;
while ((len = in.read(buffer)) > 0) {
zos.write(buffer, 0, len);
}
in.close();
zos.closeEntry();
zos.close();
InputStream fileZipStream = new FileInputStream(namePathFileZip);
WorkSpaceManagement.saveItemOnWorkspace(Constant.user,fileZipStream,"complete.zip", "Description", folderStartTimeName.getId());
logger.trace("Save a backup file into workspace; bucket{},scope:{}, startkey:{},endkey:{}, aggregation type:{}",bucket,scope,startKeyString,endKeyString ,aggType.toString());
logFile.delete();
File logFileZip = new File(namePathFileZip);
logFileZip.delete();
return true;
}
catch (Exception e) {
logger.error(e.getLocalizedMessage());
logger.error(e.getMessage());
logger.error("onSaveBackupFile excepiton:{}",e);
throw e;
}
}
/**
* Init
* Return a workspace folder
* @param user
* @return
* @throws Exception
*/
protected static WorkspaceFolder init(String user) throws Exception{
// TODO Auto-generated constructor stub
try {
HomeManagerFactory factory = HomeLibrary.getHomeManagerFactory();
HomeManager manager = factory.getHomeManager();
User userWS = manager.createUser(user);
Home home = manager.getHome(userWS);
Workspace ws = home.getWorkspace();
WorkspaceFolder root = ws.getRoot();
return root;
} catch (Exception e){
logger.error("init excepiton:{}",e);
throw e;
}
}
/**
* Create Folder into workspace
* @param user
* @param parentId folder parent
* @param folderName
* @param folderDescription
* @return
* @throws Exception
*/
protected static WorkspaceFolder createFolder(String user, String parentId, String folderName, String folderDescription) throws Exception
{
Workspace ws;
try {
ws = HomeLibrary.getUserWorkspace(user);
WorkspaceFolder projectTargetFolder;
if (!ws.exists(folderName, parentId))
projectTargetFolder = ws.createFolder(folderName, folderDescription, parentId);
else
projectTargetFolder = (WorkspaceFolder) ws.find(folderName, parentId);
return projectTargetFolder;
} catch (Exception e){
logger.error("createFolder:{}",e);
throw e;
}
}
/**
* Save a Item on workspace
* @param user of workspace
* @param inputStream
* @param name
* @param description
* @param folderId
* @throws Exception
*/
protected static void saveItemOnWorkspace(String user, InputStream inputStream,String name, String description,String folderId) throws Exception
{
Workspace ws;
try {
ws = HomeLibrary.getUserWorkspace(user);
WorkspaceItem workSpaceItem = ws.getItem(folderId);
if (!workSpaceItem.isFolder()) {
throw new Exception(
"Destination is not a folder!");
}
WorkspaceItem projectItem = ws.find(name, folderId);
logger.trace("Save Item on WorkSpace Folder:{}, name:{},description:{}, folderID:{}",projectItem,name,description,folderId);
if (projectItem == null) {
ws.createExternalFile(name, description, null, inputStream, folderId);
}
else{
ws.remove(name, folderId);
Thread.sleep(2000);
ws.createExternalFile(name, description, null, inputStream, folderId);
}
return;
} catch (Exception e) {
logger.error("saveItemOnWorkspace:{}",e);
throw e;
}
}
}

View File

@ -1,241 +0,0 @@
package org.gcube.accounting.aggregator.recovery;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.gcube.accounting.aggregator.configuration.ConfigurationServiceEndpoint;
import org.gcube.accounting.aggregator.configuration.Constant;
import org.gcube.accounting.aggregator.persistence.AggregatorPersistenceBackendQueryConfiguration;
import org.gcube.accounting.aggregator.plugin.AccountingAggregatorPlugin;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.couchbase.client.java.Bucket;
import com.couchbase.client.java.Cluster;
import com.couchbase.client.java.PersistTo;
import com.couchbase.client.java.document.JsonDocument;
import com.couchbase.client.java.document.json.JsonObject;
import com.couchbase.client.java.error.DocumentDoesNotExistException;
import com.google.gson.Gson;
import com.google.gson.internal.LinkedTreeMap;
/**
* @author Alessandro Pieve (ISTI - CNR)
*
*/
public class RecoveryRecord {
private static Logger logger = LoggerFactory.getLogger(AccountingAggregatorPlugin.class);
protected static Cluster cluster = null;
/* One Bucket for type*/
protected static Bucket bucketStorage;
protected static String bucketNameStorage;
protected static Bucket bucketService;
protected static String bucketNameService;
protected static Bucket bucketPortlet;
protected static String bucketNamePortlet;
protected static Bucket bucketJob;
protected static String bucketNameJob;
protected static Bucket bucketTask;
protected static String bucketNameTask;
private static Map <String, Bucket> connectionMap;
/**
* {@inheritDoc}
*/
protected static void prepareConnection(Cluster cluster,AggregatorPersistenceBackendQueryConfiguration configuration) throws Exception {
String password = configuration.getProperty(ConfigurationServiceEndpoint.PASSWORD_PROPERTY_KEY);
try {
bucketNameStorage = configuration.getProperty(ConfigurationServiceEndpoint.BUCKET_STORAGE_NAME_PROPERTY_KEY);
bucketNameService = configuration.getProperty(ConfigurationServiceEndpoint.BUCKET_SERVICE_NAME_PROPERTY_KEY);
bucketNameJob = configuration.getProperty(ConfigurationServiceEndpoint.BUCKET_JOB_NAME_PROPERTY_KEY);
bucketNamePortlet = configuration.getProperty(ConfigurationServiceEndpoint.BUCKET_PORTLET_NAME_PROPERTY_KEY);
bucketNameTask = configuration.getProperty(ConfigurationServiceEndpoint.BUCKET_TASK_NAME_PROPERTY_KEY);
connectionMap = new HashMap<String, Bucket>();
bucketStorage = cluster.openBucket( bucketNameStorage,password);
connectionMap.put(ConfigurationServiceEndpoint.BUCKET_STORAGE_TYPE, bucketStorage);
bucketService = cluster.openBucket( bucketNameService,password);
connectionMap.put(ConfigurationServiceEndpoint.BUCKET_SERVICE_TYPE, bucketService);
bucketJob = cluster.openBucket( bucketNameJob,password);
connectionMap.put(ConfigurationServiceEndpoint.BUCKET_JOB_TYPE, bucketJob);
bucketPortlet = cluster.openBucket( bucketNamePortlet,password);
connectionMap.put(ConfigurationServiceEndpoint.BUCKET_PORTLET_TYPE, bucketPortlet);
bucketTask = cluster.openBucket( bucketNameTask,password);
connectionMap.put(ConfigurationServiceEndpoint.BUCKET_TASK_TYPE, bucketTask);
} catch(Exception e) {
logger.error("Bucket connection error");
throw e;
}
}
public static void searchFile(Cluster cluster,AggregatorPersistenceBackendQueryConfiguration configuration) throws Exception{
try{
prepareConnection(cluster,configuration);
File folderDelete = new File(Constant.PATH_DIR_BACKUP_DELETE);
if (folderDelete.exists() && folderDelete.isDirectory()) {
logger.trace("Start Recovery delete");
File[] listOfFilesDelete = folderDelete.listFiles();
for (int i = 0; i < listOfFilesDelete.length; i++) {
if (listOfFilesDelete[i].isFile()){
Boolean result=ElaborateDeleteFile(Constant.PATH_DIR_BACKUP_DELETE+"/"+listOfFilesDelete[i].getName());
if (result){
logger.trace("Recovery delete complete.. Delete a file");
File file = new File(Constant.PATH_DIR_BACKUP_DELETE+"/"+listOfFilesDelete[i].getName());
file.delete();
}
}
}
}
else
logger.trace("not found files delete");
//search for insert file
File folderInsert= new File(Constant.PATH_DIR_BACKUP_INSERT);
if (folderInsert.exists() && folderInsert.isDirectory()) {
logger.trace("Start Recovery insert");
File[] listOfFilesInsert = folderInsert.listFiles();
for (int i = 0; i < listOfFilesInsert.length; i++) {
if (listOfFilesInsert[i].isFile()) {
Boolean result=ElaborateInsertFile(Constant.PATH_DIR_BACKUP_INSERT+"/"+listOfFilesInsert[i].getName());
if (result){
logger.trace("Recovery insert complete.. Delete a file");
File file= new File(Constant.PATH_DIR_BACKUP_INSERT+"/"+listOfFilesInsert[i].getName());
file.delete();
}
}
}
}
else
logger.trace("not found files insert");
}
catch(Exception e){
logger.error("Error for list file:{}",e);
}
}
public static boolean ElaborateDeleteFile(String nameFile) throws IOException{
HashMap<String, Object> mapper = new Gson().fromJson(new FileReader(new File(nameFile)), HashMap.class);
List<LinkedTreeMap<String, Object>> docs = (List<LinkedTreeMap<String, Object>>) mapper.get("docs");
String recordType="";
String usageRecordType="";
for (LinkedTreeMap<String, Object> doc: docs){
String identifier=(String) doc.get("id");
try{
JsonObject accounting = JsonObject.empty();
for (String key : doc.keySet()){
accounting.put(key, doc.get(key));
}
if (accounting.containsKey("usageRecordType"))
usageRecordType=(String) doc.get("usageRecordType");
else
usageRecordType="";
if (accounting.containsKey("recordType"))
recordType=(String) doc.get("recordType");
else
recordType="";
if ((recordType.equals("ServiceUsageRecord")) || (usageRecordType.equals("ServiceUsageRecord")))
bucketService.remove(identifier,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
if ((recordType.equals("StorageUsageRecord")) || (usageRecordType.equals("StorageUsageRecord")))
bucketStorage.remove(identifier,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
if ((recordType.equals("JobUsageRecord")) || (usageRecordType.equals("JobUsageRecord")))
bucketJob.remove(identifier,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
if ((recordType.equals("TaskUsageRecord")) || (usageRecordType.equals("TaskUsageRecord")))
bucketTask.remove(identifier,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
if ((recordType.equals("PortletUsageRecord")) || (usageRecordType.equals("PortletUsageRecord")))
bucketPortlet.remove(identifier,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
}catch(DocumentDoesNotExistException d){
logger.trace("Document id:{} not Exist",identifier);
}
catch(Exception e){
logger.error("Problem with recovery file and delete record excepiton:{}",e.getLocalizedMessage());
throw e;
}
}
return true;
}
public static boolean ElaborateInsertFile(String nameFile)throws IOException{
HashMap<String, Object> mapper = new Gson().fromJson(new FileReader(new File(nameFile)), HashMap.class);
List<LinkedTreeMap<String, Object>> docs = (List<LinkedTreeMap<String, Object>>) mapper.get("docs");
String recordType="";
String usageRecordType="";
for (LinkedTreeMap<String, Object> doc: docs){
String identifier=(String) doc.get("id");
try{
JsonObject accounting = JsonObject.empty();
for (String key : doc.keySet()){
accounting.put(key, doc.get(key));
}
if (accounting.containsKey("usageRecordType"))
usageRecordType=(String) doc.get("usageRecordType");
else
usageRecordType="";
if (accounting.containsKey("recordType"))
recordType=(String) doc.get("recordType");
else
recordType="";
if (usageRecordType==null)
usageRecordType="";
if (recordType==null)
recordType="";
JsonDocument response = null;
if ((recordType.equals("ServiceUsageRecord")) || (usageRecordType.equals("ServiceUsageRecord"))){
JsonDocument document = JsonDocument.create(identifier, accounting);
response = bucketService.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
}
if ((recordType.equals("StorageUsageRecord")) || (usageRecordType.equals("StorageUsageRecord"))){
JsonDocument document = JsonDocument.create(identifier, accounting);
response = bucketStorage.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
}
if ((recordType.equals("JobUsageRecord")) || (usageRecordType.equals("JobUsageRecord"))){
JsonDocument document = JsonDocument.create(identifier, accounting);
response = bucketJob.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
}
if ((recordType.equals("TaskUsageRecord")) || (usageRecordType.equals("TaskUsageRecord"))){
JsonDocument document = JsonDocument.create(identifier, accounting);
response = bucketTask.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
}
if ((recordType.equals("PortletUsageRecord")) || (usageRecordType.equals("PortletUsageRecord"))){
JsonDocument document = JsonDocument.create(identifier, accounting);
response = bucketPortlet.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
}
logger.trace("Elaborate Insert fileJsondocument response:{}",response);
}catch(Exception e){
logger.error("Problem with recovery file and insert record excepiton:{}",e.getLocalizedMessage());
throw e;
}
}
return true;
}
}

View File

@ -0,0 +1,50 @@
package org.gcube.accounting.aggregator.status;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public enum AggregationState {
/**
* The Aggregation has been started
*/
STARTED,
/**
* Original Records were aggregated.
* Original Records and Aggregated ones has been saved on a local files
*/
AGGREGATED,
/**
* Original Records has been deleted from DB.
*/
DELETED,
/**
* Aggregated Records has been saved on DB and the backup file has been deleted
*/
ADDED,
/**
* The backup file of Original Records has been saved on Workspace and the local file has been deleted
*/
COMPLETED;
private static Logger logger = LoggerFactory.getLogger(AggregationState.class);
public static boolean canContinue(AggregationState effective, AggregationState desired) throws Exception{
if(effective == desired){
return true;
}else{
if(effective.ordinal() > desired.ordinal()){
logger.debug("{} is {}. The already reached value to continue is {}. The next step has been already done. It can be skipped.",
AggregationState.class.getSimpleName(), effective.name(), desired.name());
return false;
}else{
String error = String.format("%s is %s which is lower than the required value to continue (%s). This is really strange and should not occur. Please contact the administrator.",
AggregationState.class.getSimpleName(), effective.name(), desired.name());
throw new Exception(error);
}
}
}
}

View File

@ -0,0 +1,49 @@
package org.gcube.accounting.aggregator.status;
import java.util.Calendar;
import org.gcube.accounting.aggregator.utility.Constant;
import com.fasterxml.jackson.annotation.JsonFormat;
import com.fasterxml.jackson.annotation.JsonProperty;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public class AggregationStateEvent {
@JsonProperty
@JsonFormat(shape= JsonFormat.Shape.STRING)
protected AggregationState aggregationState;
@JsonProperty
@JsonFormat(shape= JsonFormat.Shape.STRING, pattern = Constant.DATETIME_PATTERN)
protected Calendar startTime;
@JsonProperty
@JsonFormat(shape= JsonFormat.Shape.STRING, pattern = Constant.DATETIME_PATTERN)
protected Calendar endTime;
// Needed for Jackon Unmarshalling
@SuppressWarnings("unused")
private AggregationStateEvent(){}
public AggregationStateEvent(AggregationState aggregationState, Calendar startTime, Calendar endTime) {
super();
this.aggregationState = aggregationState;
this.startTime = startTime;
this.endTime = endTime;
}
public AggregationState getAggregationState() {
return aggregationState;
}
public Calendar getStartTime() {
return startTime;
}
public Calendar getEndTime() {
return endTime;
}
}

View File

@ -0,0 +1,148 @@
package org.gcube.accounting.aggregator.status;
import java.util.ArrayList;
import java.util.Calendar;
import java.util.Date;
import java.util.List;
import java.util.UUID;
import org.gcube.accounting.aggregator.aggregation.AggregationInfo;
import org.gcube.accounting.aggregator.aggregation.AggregationType;
import org.gcube.accounting.aggregator.persistence.CouchBaseConnector;
import org.gcube.accounting.aggregator.utility.Constant;
import org.gcube.accounting.aggregator.utility.Utility;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.fasterxml.jackson.annotation.JsonFormat;
import com.fasterxml.jackson.annotation.JsonProperty;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public class AggregationStatus {
private static Logger logger = LoggerFactory.getLogger(AggregationStatus.class);
protected AggregationInfo aggregationInfo;
@JsonProperty
protected UUID uuid;
@JsonProperty
protected int originalRecordsNumber;
@JsonProperty
protected int aggregatedRecordsNumber;
@JsonProperty
protected int recoveredRecordNumber;
@JsonProperty
protected float percentage;
@JsonProperty(required=false)
protected String context;
// Last observed status
@JsonFormat(shape= JsonFormat.Shape.STRING)
@JsonProperty
protected AggregationState aggregationState;
@JsonProperty
@JsonFormat(shape= JsonFormat.Shape.STRING, pattern = Constant.DATETIME_PATTERN)
protected Calendar lastUpdateTime;
// List of Status Event Changes
@JsonProperty
protected List<AggregationStateEvent> aggregationStateEvents;
// Needed for Jackon Unmarshalling
@SuppressWarnings("unused")
private AggregationStatus(){}
public static AggregationStatus getLast(String recordType, AggregationType aggregationType) throws Exception{
return CouchBaseConnector.getLast(recordType, aggregationType);
}
public static List<AggregationStatus> getUnterminated(String recordType, AggregationType aggregationType) throws Exception{
return CouchBaseConnector.getUnterminated(recordType, aggregationType);
}
public static AggregationStatus getAggregationStatus(String recordType, AggregationType aggregationType, Date aggregationStartDate) throws Exception{
return CouchBaseConnector.getAggregationStatus(recordType, aggregationType, aggregationStartDate);
}
public AggregationStatus(AggregationInfo aggregationInfo) throws Exception {
this.aggregationInfo = aggregationInfo;
this.aggregationStateEvents = new ArrayList<>();
this.uuid = UUID.randomUUID();
}
public AggregationInfo getAggregationInfo() {
return aggregationInfo;
}
public synchronized void setState(AggregationState aggregationState, Calendar startTime, boolean sync) throws Exception {
Calendar endTime = Utility.getUTCCalendarInstance();
logger.info("Going to Set {} for {} to {}. StartTime {}, EndTime {} [Duration : {}]",
AggregationState.class.getSimpleName(),
aggregationInfo, aggregationState.name(),
Constant.DEFAULT_DATE_FORMAT.format(startTime.getTime()),
Constant.DEFAULT_DATE_FORMAT.format(endTime.getTime()),
Utility.getHumanReadableDuration(endTime.getTimeInMillis() - startTime.getTimeInMillis()));
this.aggregationState = aggregationState;
this.lastUpdateTime = endTime;
AggregationStateEvent aggregationStatusEvent = new AggregationStateEvent(aggregationState, startTime, endTime);
aggregationStateEvents.add(aggregationStatusEvent);
if(sync){
CouchBaseConnector.upsertAggregationStatus(this);
}
}
public void setRecordNumbers(int originalRecordsNumber, int aggregatedRecordsNumber) {
this.recoveredRecordNumber = originalRecordsNumber - aggregatedRecordsNumber;
this.percentage = originalRecordsNumber!=0 ? (100 * recoveredRecordNumber) / originalRecordsNumber : 0;
logger.info("Original records are {}. Aggregated records are {}. Difference {}. We recover {}% of Documents",
originalRecordsNumber, aggregatedRecordsNumber, recoveredRecordNumber, percentage);
this.originalRecordsNumber = originalRecordsNumber;
this.aggregatedRecordsNumber = aggregatedRecordsNumber;
}
public UUID getUUID() {
return uuid;
}
public void setAggregation(AggregationInfo aggregation) {
this.aggregationInfo = aggregation;
}
public int getOriginalRecordsNumber() {
return originalRecordsNumber;
}
public int getAggregatedRecordsNumber() {
return aggregatedRecordsNumber;
}
public AggregationState getAggregationState() {
return aggregationState;
}
public List<AggregationStateEvent> getAggregationStateEvents() {
return aggregationStateEvents;
}
public String getContext() {
return context;
}
public void setContext(String context) {
this.context = context;
}
}

View File

@ -0,0 +1,35 @@
package org.gcube.accounting.aggregator.utility;
import java.io.File;
import java.text.DateFormat;
import java.util.Calendar;
/**
* @author Alessandro Pieve (ISTI - CNR)
* @author Luca Frosini (ISTI - CNR)
*/
public class Constant {
public static final int NUM_RETRY = 6;
public static final String HOME_SYSTEM_PROPERTY = "user.home";
public static final File ROOT_DIRECTORY;
public static final String DATETIME_PATTERN = "yyyy-MM-dd HH:mm:ss.SSS Z";
public static final DateFormat DEFAULT_DATE_FORMAT;
public static final int CALENDAR_FIELD_TO_SUBSTRACT_TO_CONSIDER_UNTERMINATED;
public static final int UNIT_TO_SUBSTRACT_TO_CONSIDER_UNTERMINATED;
static {
String rootDirectoryPath = System.getProperty(Constant.HOME_SYSTEM_PROPERTY);
ROOT_DIRECTORY = new File(rootDirectoryPath);
DEFAULT_DATE_FORMAT = Utility.getUTCDateFormat(DATETIME_PATTERN);
CALENDAR_FIELD_TO_SUBSTRACT_TO_CONSIDER_UNTERMINATED = Calendar.HOUR_OF_DAY;
UNIT_TO_SUBSTRACT_TO_CONSIDER_UNTERMINATED = 8;
}
}

View File

@ -0,0 +1,168 @@
package org.gcube.accounting.aggregator.utility;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.text.DateFormat;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
import java.util.TimeZone;
import org.gcube.accounting.aggregator.aggregation.AggregationType;
import org.gcube.accounting.aggregator.plugin.AccountingAggregatorPlugin;
import org.gcube.common.authorization.client.Constants;
import org.gcube.common.authorization.library.AuthorizationEntry;
import org.gcube.common.authorization.library.provider.AuthorizationProvider;
import org.gcube.common.authorization.library.provider.ClientInfo;
import org.gcube.common.authorization.library.provider.SecurityTokenProvider;
import org.gcube.common.authorization.library.utils.Caller;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author Luca Frosini (ISTI - CNR)
*/
public class Utility {
private static Logger logger = LoggerFactory.getLogger(Utility.class);
public static String getCurrentContext() throws Exception {
String token = SecurityTokenProvider.instance.get();
return Constants.authorizationService().get(token).getContext();
}
public static String getHumanReadableDuration(long duration){
return String.format("%d hours %02d minutes %02d seconds %03d milliseconds",
duration/(1000*60*60),
(duration/(1000*60))%60,
(duration/1000)%60,
(duration%1000));
}
public static void printLine(File file, String line) throws Exception {
synchronized (file) {
try (FileWriter fw = new FileWriter(file, true);
BufferedWriter bw = new BufferedWriter(fw);
PrintWriter out = new PrintWriter(bw)) {
out.println(line);
out.flush();
} catch (IOException e) {
throw e;
}
}
}
public static TimeZone UTC_TIMEZONE = TimeZone.getTimeZone("UTC");
public static DateFormat getUTCDateFormat(String pattern){
DateFormat dateFormat = new SimpleDateFormat(pattern);
dateFormat.setTimeZone(UTC_TIMEZONE);
return dateFormat;
}
public static Calendar getUTCCalendarInstance(){
return Calendar.getInstance(UTC_TIMEZONE);
}
private static final String LOCALE_FORMAT_PATTERN = "Z";
private static final DateFormat LOCALE_DATE_FORMAT;
static {
LOCALE_DATE_FORMAT = new SimpleDateFormat(LOCALE_FORMAT_PATTERN);
}
public static String getPersistTimeParameter(int hour, int minute) {
// Used from Clients. Not in UTC but in locale
Calendar persistEndTime = Calendar.getInstance();
persistEndTime.set(Calendar.HOUR_OF_DAY, hour);
persistEndTime.set(Calendar.MINUTE, minute);
String persistEndTimeParameter = AccountingAggregatorPlugin.PERSIST_TIME_DATE_FORMAT
.format(persistEndTime.getTime());
return persistEndTimeParameter;
}
public static Date getPersistTimeDate(String persistTimeString) throws ParseException{
Date date = new Date();
persistTimeString = AccountingAggregatorPlugin.AGGREGATION_START_DATE_DATE_FORMAT.format(
date) + " " + persistTimeString + " " + LOCALE_DATE_FORMAT.format(date);
// Local Date Format (not UTC)
DateFormat dateFormat = new SimpleDateFormat(
AccountingAggregatorPlugin.AGGREGATION_START_DATE_DATE_FORMAT_PATTERN + " "
+ AccountingAggregatorPlugin.LOCAL_TIME_DATE_FORMAT_PATTERN);
Date persistTime = dateFormat.parse(persistTimeString);
return persistTime;
}
public static boolean isTimeElapsed(Calendar now, Date date) throws ParseException {
try {
boolean elapsed = now.getTime().after(date);
logger.info("{} is {}elapsed.",
AccountingAggregatorPlugin.LOCAL_TIME_DATE_FORMAT.format(date),elapsed? "" : "NOT ");
return elapsed;
}catch (Exception e) {
logger.error("Unable to check if " + date.toString() + " is elapsed", e);
throw e;
}
}
public static Calendar getAggregationStartCalendar(int year, int month, int day){
Calendar aggregationStartCalendar = getUTCCalendarInstance();
aggregationStartCalendar.set(Calendar.YEAR, year);
aggregationStartCalendar.set(Calendar.MONTH, month);
aggregationStartCalendar.set(Calendar.DAY_OF_MONTH, day);
aggregationStartCalendar.set(Calendar.HOUR_OF_DAY, 0);
aggregationStartCalendar.set(Calendar.MINUTE, 0);
aggregationStartCalendar.set(Calendar.SECOND, 0);
aggregationStartCalendar.set(Calendar.MILLISECOND, 0);
logger.debug("{}", Constant.DEFAULT_DATE_FORMAT.format(aggregationStartCalendar.getTime()));
return aggregationStartCalendar;
}
public static Date getEndDateFromStartDate(AggregationType aggregationType, Date aggregationStartDate, int offset) {
Calendar aggregationEndDate = getUTCCalendarInstance();
aggregationEndDate.setTimeInMillis(aggregationStartDate.getTime());
aggregationEndDate.add(aggregationType.getCalendarField(), offset);
return aggregationEndDate.getTime();
}
protected static ClientInfo getClientInfo() throws Exception {
Caller caller = AuthorizationProvider.instance.get();
if(caller!=null){
return caller.getClient();
}else{
String token = SecurityTokenProvider.instance.get();
AuthorizationEntry authorizationEntry = Constants.authorizationService().get(token);
return authorizationEntry.getClientInfo();
}
}
public static String getUsername() throws Exception{
try {
ClientInfo clientInfo = getClientInfo();
String clientId = clientInfo.getId();
if (clientId != null && clientId.compareTo("") != 0) {
return clientId;
}
throw new Exception("Username null or empty");
} catch (Exception e) {
logger.error("Unable to retrieve user.");
throw new Exception("Unable to retrieve user.", e);
}
}
}

View File

@ -0,0 +1,337 @@
package org.gcube.accounting.aggregator.workspace;
import java.io.BufferedReader;
import java.io.DataOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.StringWriter;
import java.io.UnsupportedEncodingException;
import java.net.HttpURLConnection;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLEncoder;
import java.util.Map;
import org.gcube.common.authorization.library.provider.SecurityTokenProvider;
import org.gcube.common.scope.api.ScopeProvider;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class HTTPCall {
private static final Logger logger = LoggerFactory
.getLogger(HTTPCall.class);
public static final String CONTENT_TYPE_APPLICATION_JSON_CHARSET_UTF_8 = "application/json;charset=UTF-8";
public static final String CONTENT_TYPE_APPLICATION_XML_CHARSET_UTF_8 = "application/xml;charset=UTF-8";
public static final String CONTENT_TYPE_TEXT_PLAIN = "text/plain;charset=UTF-8";
public enum HTTPMETHOD {
HEAD, GET, POST, PUT, DELETE;
@Override
public String toString() {
return this.name();
}
}
public static final String PATH_SEPARATOR = "/";
public static final String PARAM_STARTER = "?";
public static final String PARAM_EQUALS = "=";
public static final String PARAM_SEPARATOR = "&";
public static final String UTF8 = "UTF-8";
protected final String address;
protected final String userAgent;
public HTTPCall(String address, String userAgent) {
this.address = address;
this.userAgent = userAgent;
}
protected String getParametersDataString(
Map<String, String> parameters)
throws UnsupportedEncodingException {
if (parameters == null) {
return null;
}
StringBuilder result = new StringBuilder();
boolean first = true;
for (String key : parameters.keySet()) {
if (first) {
first = false;
} else {
result.append(PARAM_SEPARATOR);
}
result.append(URLEncoder.encode(key, UTF8));
result.append(PARAM_EQUALS);
result.append(URLEncoder.encode(parameters.get(key), UTF8));
}
return result.toString();
}
protected URL getURL(String address, String path, String urlParameters) throws MalformedURLException {
StringWriter stringWriter = new StringWriter();
stringWriter.append(address);
if(address.endsWith(PATH_SEPARATOR)){
if(path.startsWith(PATH_SEPARATOR)){
path = path.substring(1);
}
}else{
if(path.compareTo("")!=0 && !path.startsWith(PATH_SEPARATOR)){
stringWriter.append(PATH_SEPARATOR);
}
}
stringWriter.append(path);
if(urlParameters!=null){
stringWriter.append(PARAM_STARTER);
stringWriter.append(urlParameters);
}
return getURL(stringWriter.toString());
}
protected URL getURL(String urlString) throws MalformedURLException{
URL url = new URL(urlString);
if(url.getProtocol().compareTo("https")==0){
url = new URL(url.getProtocol(), url.getHost(), url.getDefaultPort(), url.getFile());
}
return url;
}
protected HttpURLConnection getConnection(String path, String urlParameters, HTTPMETHOD method, String body, String contentType)
throws Exception {
URL url = getURL(address, path, urlParameters);
return getConnection(url, method, body, contentType);
}
protected HttpURLConnection getConnection(String path, String urlParameters, HTTPMETHOD method, InputStream inputStream, String contentType)
throws Exception {
URL url = getURL(address, path, urlParameters);
return getConnection(url, method, inputStream, contentType);
}
protected HttpURLConnection getConnection(URL url, HTTPMETHOD method, InputStream inputStream, String contentType) throws Exception {
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
if (SecurityTokenProvider.instance.get() == null) {
if (ScopeProvider.instance.get() == null) {
throw new RuntimeException(
"Null Token and Scope. Please set your token first.");
}
connection.setRequestProperty("gcube-scope",
ScopeProvider.instance.get());
} else {
connection.setRequestProperty(org.gcube.common.authorization.client.Constants.TOKEN_HEADER_ENTRY,
SecurityTokenProvider.instance.get());
}
connection.setDoOutput(true);
connection.setRequestProperty("Content-type", contentType);
connection.setRequestProperty("User-Agent", userAgent);
connection.setRequestMethod(method.toString());
if (inputStream != null
&& (method == HTTPMETHOD.POST || method == HTTPMETHOD.PUT)) {
DataOutputStream wr = new DataOutputStream(
connection.getOutputStream());
byte[] buffer = new byte[1024];
int len;
while ((len = inputStream.read(buffer)) > 0) {
wr.write(buffer, 0, len);
}
wr.flush();
wr.close();
}
int responseCode = connection.getResponseCode();
String responseMessage = connection.getResponseMessage();
logger.trace("{} {} : {} - {}",
method, connection.getURL(), responseCode, responseMessage);
if (responseCode == HttpURLConnection.HTTP_MOVED_TEMP ||
responseCode == HttpURLConnection.HTTP_MOVED_PERM ||
responseCode == HttpURLConnection.HTTP_SEE_OTHER) {
URL redirectURL = getURL(connection.getHeaderField("Location"));
logger.trace("{} is going to be redirect to {}", url.toString(), redirectURL.toString());
connection = getConnection(redirectURL, method, inputStream, contentType);
}
return connection;
}
protected HttpURLConnection getConnection(URL url, HTTPMETHOD method, String body, String contentType) throws Exception {
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
if (SecurityTokenProvider.instance.get() == null) {
if (ScopeProvider.instance.get() == null) {
throw new RuntimeException(
"Null Token and Scope. Please set your token first.");
}
connection.setRequestProperty("gcube-scope",
ScopeProvider.instance.get());
} else {
connection.setRequestProperty(org.gcube.common.authorization.client.Constants.TOKEN_HEADER_ENTRY,
SecurityTokenProvider.instance.get());
}
connection.setDoOutput(true);
connection.setRequestProperty("Content-type", contentType);
connection.setRequestProperty("User-Agent", userAgent);
connection.setRequestMethod(method.toString());
if (body != null
&& (method == HTTPMETHOD.POST || method == HTTPMETHOD.PUT)) {
DataOutputStream wr = new DataOutputStream(
connection.getOutputStream());
wr.writeBytes(body);
wr.flush();
wr.close();
}
int responseCode = connection.getResponseCode();
String responseMessage = connection.getResponseMessage();
logger.trace("{} {} : {} - {}",
method, connection.getURL(), responseCode, responseMessage);
if (responseCode == HttpURLConnection.HTTP_MOVED_TEMP ||
responseCode == HttpURLConnection.HTTP_MOVED_PERM ||
responseCode == HttpURLConnection.HTTP_SEE_OTHER) {
URL redirectURL = getURL(connection.getHeaderField("Location"));
logger.trace("{} is going to be redirect to {}", url.toString(), redirectURL.toString());
connection = getConnection(redirectURL, method, body, contentType);
}
return connection;
}
protected StringBuilder getStringBuilder(InputStream inputStream) throws IOException{
StringBuilder result = new StringBuilder();
try (BufferedReader reader = new BufferedReader(
new InputStreamReader(inputStream))) {
String line;
while ((line = reader.readLine()) != null) {
result.append(line);
}
}
return result;
}
public void call(String path, HTTPMETHOD method, Map<String, String> parameters, String contentType) throws Exception {
call(path, method, parameters, null, contentType);
}
public void call(String path, HTTPMETHOD method, String body, String contentType) throws Exception {
call(path, method, null, body, contentType);
}
protected void call(String path, HTTPMETHOD method, Map<String, String> parameters, String body, String contentType) throws Exception {
String urlParameters = getParametersDataString(parameters);
HttpURLConnection connection = getConnection(path, urlParameters, method, body, contentType);
int responseCode = connection.getResponseCode();
String responseMessage = connection.getResponseMessage();
logger.info("{} {} : {} - {}",
method, connection.getURL(), responseCode, responseMessage);
if(method == HTTPMETHOD.HEAD){
if(responseCode == HttpURLConnection.HTTP_NO_CONTENT){
throw new Exception(responseMessage);
}
if(responseCode == HttpURLConnection.HTTP_NOT_FOUND){
throw new Exception(responseMessage);
}
if(responseCode == HttpURLConnection.HTTP_FORBIDDEN){
throw new Exception(responseMessage);
}
}
if (responseCode >= HttpURLConnection.HTTP_BAD_REQUEST) {
InputStream inputStream = connection.getErrorStream();
StringBuilder result = getStringBuilder(inputStream);
String res = result.toString();
throw new Exception(res);
}
StringBuilder result = getStringBuilder(connection.getInputStream());
String res = result.toString();
logger.trace("Server returned content : {}", res);
connection.disconnect();
}
public void call(String path, HTTPMETHOD method, InputStream inputStream, Map<String, String> parameters, String contentType) throws Exception {
String urlParameters = getParametersDataString(parameters);
HttpURLConnection connection = getConnection(path, urlParameters, method, inputStream, contentType);
int responseCode = connection.getResponseCode();
String responseMessage = connection.getResponseMessage();
logger.info("{} {} : {} - {}",
method, connection.getURL(), responseCode, responseMessage);
if(method == HTTPMETHOD.HEAD){
if(responseCode == HttpURLConnection.HTTP_NO_CONTENT){
throw new Exception(responseMessage);
}
if(responseCode == HttpURLConnection.HTTP_NOT_FOUND){
throw new Exception(responseMessage);
}
if(responseCode == HttpURLConnection.HTTP_FORBIDDEN){
throw new Exception(responseMessage);
}
}
if (responseCode >= HttpURLConnection.HTTP_BAD_REQUEST) {
InputStream errorStream = connection.getErrorStream();
StringBuilder result = getStringBuilder(errorStream);
String res = result.toString();
throw new Exception(res);
}
StringBuilder result = getStringBuilder(connection.getInputStream());
String res = result.toString();
logger.trace("Server returned content : {}", res);
connection.disconnect();
}
}

View File

@ -0,0 +1,192 @@
package org.gcube.accounting.aggregator.workspace;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.InputStream;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import org.gcube.accounting.aggregator.plugin.AccountingAggregatorPluginDeclaration;
import org.gcube.accounting.aggregator.utility.Utility;
import org.gcube.accounting.aggregator.workspace.HTTPCall.HTTPMETHOD;
import org.gcube.common.resources.gcore.GCoreEndpoint;
import org.gcube.common.resources.gcore.GCoreEndpoint.Profile.Endpoint;
import org.gcube.common.resources.gcore.utils.Group;
import org.gcube.resources.discovery.client.api.DiscoveryClient;
import org.gcube.resources.discovery.client.queries.api.SimpleQuery;
import org.gcube.resources.discovery.icclient.ICFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author Alessandro Pieve (ISTI - CNR)
* @author Luca Frosini (ISTI - CNR)
*/
public class WorkSpaceManagement {
public static Logger logger = LoggerFactory.getLogger(WorkSpaceManagement.class);
private static final String ZIP_SUFFIX = ".zip";
private static final String ZIP_FILE_DESCRIPTION = "Backup of original records deleted and aggregtaed records inserted.";
private static final String ZIP_MIMETYPE = "application/zip, application/octet-stream";
protected static final GCoreEndpoint gCoreEndpoint;
protected static final Map<String, String> restEndpointMap;
protected static final String CLASS_FORMAT = "$resource/Profile/ServiceClass/text() eq '%1s'";
protected static final String NAME_FORMAT = "$resource/Profile/ServiceName/text() eq '%1s'";
protected static final String STATUS_FORMAT = "$resource/Profile/DeploymentData/Status/text() eq 'ready'";
protected static final String SERVICE_CLASS = "DataAccess";
protected static final String SERVICE_NAME = "HomeLibraryWebapp";
public static final String USER_AGENT = AccountingAggregatorPluginDeclaration.NAME;
protected static SimpleQuery queryForHomeLibraryGCoreEndpoint(){
return ICFactory.queryFor(GCoreEndpoint.class)
.addCondition(String.format(CLASS_FORMAT, SERVICE_CLASS))
.addCondition(String.format(NAME_FORMAT, SERVICE_NAME))
.addCondition(String.format(STATUS_FORMAT))
.setResult("$resource");
}
protected static GCoreEndpoint getHomeLibraryGCoreEndpoint(){
SimpleQuery query = queryForHomeLibraryGCoreEndpoint();
DiscoveryClient<GCoreEndpoint> client = ICFactory.clientFor(GCoreEndpoint.class);
List<GCoreEndpoint> gCoreEndpoints = client.submit(query);
return gCoreEndpoints.get(0);
}
static {
gCoreEndpoint = getHomeLibraryGCoreEndpoint();
Group<Endpoint> endpoints = gCoreEndpoint.profile().endpoints();
restEndpointMap = new HashMap<>();
for(Endpoint endpoint : endpoints){
String endpointName = endpoint.name();
String endpointURI = endpoint.uri().toString();
if(endpointURI.contains("rest")){
restEndpointMap.put(endpointName, endpointURI);
}
}
}
public static void addToZipFile(ZipOutputStream zos, File file) throws Exception {
byte[] buffer = new byte[1024];
FileInputStream in = new FileInputStream(file);
ZipEntry ze = new ZipEntry(file.getName());
zos.putNextEntry(ze);
int len;
while ((len = in.read(buffer)) > 0) {
zos.write(buffer, 0, len);
}
zos.closeEntry();
in.close();
}
private static String getZipFileName(String name) throws Exception {
String zipFileName = String.format("%s%s", name, ZIP_SUFFIX);
return zipFileName;
}
public static boolean zipAndBackupFiles(String targetFolder, String name, File... files) throws Exception {
try {
String zipFileName = getZipFileName(name);
File zipFile = new File(files[0].getParentFile(), zipFileName);
zipFile.delete();
logger.trace("Going to save {} into workspace", zipFile.getAbsolutePath());
FileOutputStream fos = new FileOutputStream(zipFile);
ZipOutputStream zos = new ZipOutputStream(fos);
for(File file : files){
addToZipFile(zos, file);
}
zos.close();
FileInputStream zipFileStream = new FileInputStream(zipFile);
WorkSpaceManagement.uploadFile(zipFileStream, zipFileName, ZIP_FILE_DESCRIPTION,
ZIP_MIMETYPE, targetFolder);
zipFile.delete();
return true;
} catch (Exception e) {
logger.error("Error while trying to save a backup file containg aggregated records", e);
throw e;
}
}
public static String getHome() throws Exception {
String username = Utility.getUsername();
return String.format("/Home/%s/Workspace", username);
}
/**
* Create a Folder name folderName into workspace in the parent folder.
* Before creating it check if it already exists.
* If it exist there is no needs to recreate it, so it just return it.
*
* @param parent
* @param folderName
* @param folderDescription
* @return
* @throws Exception
*/
public static String createFolder(String parentPath, String folderName, String folderDescription)
throws Exception {
try {
HTTPCall httpCall = new HTTPCall(restEndpointMap.get("CreateFolder"), USER_AGENT);
Map<String, String> parameters = new HashMap<>();
parameters.put("name", folderName);
parameters.put("description", folderDescription);
parameters.put("parentPath", parentPath);
httpCall.call("", HTTPMETHOD.POST, parameters, null, HTTPCall.CONTENT_TYPE_TEXT_PLAIN);
return parentPath + "/" + folderName;
} catch (Exception e) {
logger.error("Error while creating folder ", e);
throw e;
}
}
/**
* Save a Item on workspace
*
* @param user
* of workspace
* @param inputStream
* @param name
* @param description
* @param folderId
* @throws Exception
*/
public static void uploadFile(InputStream inputStream, String name, String description, String mimeType,
String parentPath) throws Exception {
try {
logger.trace("Going to upload file on WorkSpace name:{}, description:{}, mimetype:{}, parentPath:{}", name,
description, mimeType, parentPath);
HTTPCall httpCall = new HTTPCall(restEndpointMap.get("Upload"), USER_AGENT);
Map<String, String> parameters = new HashMap<>();
parameters.put("name", name);
parameters.put("description", description);
parameters.put("parentPath", parentPath);
httpCall.call("", HTTPMETHOD.POST, inputStream, parameters, HTTPCall.CONTENT_TYPE_TEXT_PLAIN);
} catch (Exception e) {
logger.error("Error while uploading file on WorkSpace", e);
throw e;
}
}
}

View File

@ -1,19 +0,0 @@
<configuration>
<appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
<encoder>
<pattern>%d{HH:mm:ss.SSS} [%thread] %-5level %logger{0}: %msg%n</pattern>
</encoder>
</appender>
<logger name="org.gcube" level="DEBUG" />
<logger name="org.gcube.documentstore.records" level="WARN"/>
<logger name="org.gcube.aggregator.plugin" level="TRACE"/>
<root level="WARN">
<appender-ref ref="STDOUT" />
</root>
</configuration>

View File

@ -0,0 +1,34 @@
package org.gcube.accounting.aggregator.file;
import java.io.File;
import java.util.Calendar;
import java.util.Date;
import org.gcube.accounting.aggregator.aggregation.AggregationType;
import org.gcube.accounting.aggregator.directory.WorkSpaceDirectoryStructure;
import org.gcube.accounting.aggregator.plugin.ScopedTest;
import org.gcube.accounting.aggregator.utility.Constant;
import org.gcube.accounting.aggregator.utility.Utility;
import org.gcube.accounting.aggregator.workspace.WorkSpaceManagement;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class WorkSpaceDirectoryStructureTest extends ScopedTest {
public static Logger logger = LoggerFactory.getLogger(WorkSpaceDirectoryStructureTest.class);
@Test
public void test() throws Exception{
AggregationType aggregationType = AggregationType.YEARLY;
Date date = Utility.getAggregationStartCalendar(2015, Calendar.JANUARY, 1).getTime();
WorkSpaceDirectoryStructure workSpaceDirectoryStructure = new WorkSpaceDirectoryStructure();
String targetFolder = workSpaceDirectoryStructure.getTargetFolder(aggregationType, date);
File file = new File(Constant.ROOT_DIRECTORY, "aux.txt");
WorkSpaceManagement.zipAndBackupFiles(targetFolder, "Test", file);
}
}

View File

@ -0,0 +1,69 @@
package org.gcube.accounting.aggregator.plugin;
import java.util.Calendar;
import java.util.HashMap;
import java.util.Map;
import org.gcube.accounting.aggregator.aggregation.AggregationType;
import org.gcube.accounting.aggregator.plugin.AccountingAggregatorPlugin.ElaborationType;
import org.gcube.accounting.aggregator.utility.Utility;
import org.gcube.accounting.datamodel.usagerecords.ServiceUsageRecord;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class AccountingAggregatorPluginTest extends ScopedTest {
private static Logger logger = LoggerFactory.getLogger(AccountingAggregatorPluginTest.class);
@Test
public void testAggregation() throws Exception {
Map<String, Object> inputs = new HashMap<String, Object>();
//type aggregation
inputs.put(AccountingAggregatorPlugin.AGGREGATION_TYPE_INPUT_PARAMETER, AggregationType.DAILY.name());
inputs.put(AccountingAggregatorPlugin.ELABORATION_TYPE_INPUT_PARAMETER, ElaborationType.AGGREGATE.name());
inputs.put(AccountingAggregatorPlugin.PERSIST_START_TIME_INPUT_PARAMETER, Utility.getPersistTimeParameter(8, 0));
inputs.put(AccountingAggregatorPlugin.PERSIST_END_TIME_INPUT_PARAMETER, Utility.getPersistTimeParameter(18, 0));
inputs.put(AccountingAggregatorPlugin.RECORD_TYPE_INPUT_PARAMETER, ServiceUsageRecord.class.newInstance().getRecordType());
inputs.put(AccountingAggregatorPlugin.RESTART_FROM_LAST_AGGREGATION_DATE_INPUT_PARAMETER, false);
Calendar aggregationStartCalendar = Utility.getAggregationStartCalendar(2017, Calendar.MAY, 1);
String aggregationStartDate = AccountingAggregatorPlugin.AGGREGATION_START_DATE_DATE_FORMAT.format(aggregationStartCalendar.getTime());
logger.trace("{} : {}", AccountingAggregatorPlugin.AGGREGATION_START_DATE_INPUT_PARAMETER, aggregationStartDate);
inputs.put(AccountingAggregatorPlugin.AGGREGATION_START_DATE_INPUT_PARAMETER, aggregationStartDate);
AccountingAggregatorPlugin plugin = new AccountingAggregatorPlugin(null);
logger.debug("Going to launch {} with inputs {}", AccountingAggregatorPluginDeclaration.NAME, inputs);
plugin.launch(inputs);
}
@Test
public void testRecovery() throws Exception {
Map<String, Object> inputs = new HashMap<String, Object>();
inputs.put(AccountingAggregatorPlugin.ELABORATION_TYPE_INPUT_PARAMETER, ElaborationType.RECOVERY.name());
inputs.put(AccountingAggregatorPlugin.PERSIST_START_TIME_INPUT_PARAMETER, Utility.getPersistTimeParameter(8, 0));
inputs.put(AccountingAggregatorPlugin.PERSIST_END_TIME_INPUT_PARAMETER, Utility.getPersistTimeParameter(18, 0));
AccountingAggregatorPlugin plugin = new AccountingAggregatorPlugin(null);
logger.debug("Going to launch {} with inputs {}", AccountingAggregatorPluginDeclaration.NAME, inputs);
plugin.launch(inputs);
}
}

View File

@ -0,0 +1,128 @@
package org.gcube.accounting.aggregator.plugin;
import java.util.Calendar;
import java.util.Date;
import java.util.List;
import org.gcube.accounting.aggregator.aggregation.AggregationInfo;
import org.gcube.accounting.aggregator.aggregation.AggregationType;
import org.gcube.accounting.aggregator.persistence.CouchBaseConnector;
import org.gcube.accounting.aggregator.status.AggregationState;
import org.gcube.accounting.aggregator.status.AggregationStatus;
import org.gcube.accounting.aggregator.utility.Utility;
import org.gcube.accounting.datamodel.usagerecords.ServiceUsageRecord;
import org.gcube.documentstore.records.DSMapper;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CouchBaseConnectorTest extends ScopedTest {
private static Logger logger = LoggerFactory.getLogger(AccountingAggregatorPluginTest.class);
@Test
public void getLastTest() throws Exception {
AggregationStatus aggregationStatus = CouchBaseConnector.getLast(ServiceUsageRecord.class.getSimpleName(), AggregationType.DAILY);
logger.debug("Last : {}", DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
}
@Test
public void getUnterminatedTest() throws Exception{
List<AggregationStatus> aggregationStatuses = CouchBaseConnector.getUnterminated(ServiceUsageRecord.class.getSimpleName(), AggregationType.DAILY);
for(AggregationStatus aggregationStatus : aggregationStatuses){
logger.debug("Unterminated : {}", DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
}
}
@Test
public void getAggregationStatusTest() throws Exception{
Calendar aggregationStartCalendar = Utility.getAggregationStartCalendar(2017, Calendar.JUNE, 15);
AggregationStatus aggregationStatus = CouchBaseConnector.getAggregationStatus(ServiceUsageRecord.class.getSimpleName(), AggregationType.DAILY, aggregationStartCalendar.getTime());
logger.debug("{}", DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
}
@Test
public void aggregationStatusTest() throws Exception {
int toRemove = -36;
Calendar today = Utility.getUTCCalendarInstance();
today.add(Calendar.DAY_OF_YEAR, toRemove);
String aggregationStartDateString = AccountingAggregatorPlugin.AGGREGATION_START_DATE_DATE_FORMAT.format(today.getTime());
Date aggregationStartDate = AccountingAggregatorPlugin.AGGREGATION_START_DATE_DATE_FORMAT.parse(aggregationStartDateString);
Calendar tomorrow = Utility.getUTCCalendarInstance();
tomorrow.add(Calendar.DAY_OF_YEAR, toRemove+1);
String aggregationEndDateString = AccountingAggregatorPlugin.AGGREGATION_START_DATE_DATE_FORMAT.format(tomorrow.getTime());
Date aggregationEndDate = AccountingAggregatorPlugin.AGGREGATION_START_DATE_DATE_FORMAT.parse(aggregationEndDateString);
AggregationInfo aggregation = new AggregationInfo(ServiceUsageRecord.class.newInstance().getRecordType(), AggregationType.DAILY, aggregationStartDate, aggregationEndDate);
String aggregationString = DSMapper.getObjectMapper().writeValueAsString(aggregation);
logger.debug("{} : {}", AggregationInfo.class.getSimpleName(), aggregationString);
AggregationStatus aggregationStatus = new AggregationStatus(aggregation);
aggregationStatus.setContext("TEST_CONTEXT");
logger.debug("{} : {}", AggregationStatus.class.getSimpleName(), DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
// Set to true just for one test and restore to false
boolean sync = true;
Calendar startedStart = Utility.getUTCCalendarInstance();
aggregationStatus.setState(AggregationState.STARTED, startedStart, sync);
logger.debug("{} : {}", AggregationStatus.class.getSimpleName(), DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
aggregationStatus.setRecordNumbers(100, 72);
logger.debug("{} : {}", AggregationStatus.class.getSimpleName(), DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
Calendar aggregatedStart = Utility.getUTCCalendarInstance();
aggregationStatus.setState(AggregationState.AGGREGATED, aggregatedStart, sync);
logger.debug("{} : {}", AggregationStatus.class.getSimpleName(), DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
Calendar addedStart = Utility.getUTCCalendarInstance();
aggregationStatus.setState(AggregationState.ADDED, addedStart, sync);
logger.debug("{} : {}", AggregationStatus.class.getSimpleName(), DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
Calendar deletedStart = Utility.getUTCCalendarInstance();
aggregationStatus.setState(AggregationState.DELETED, deletedStart, sync);
logger.debug("{} : {}", AggregationStatus.class.getSimpleName(), DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
Calendar completedStart = Utility.getUTCCalendarInstance();
aggregationStatus.setState(AggregationState.COMPLETED, completedStart, sync);
logger.debug("{} : {}", AggregationStatus.class.getSimpleName(), DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
}
@Test
public void createStartedElaboration() throws Exception {
Calendar start = Utility.getAggregationStartCalendar(2017, Calendar.JUNE, 15);
String aggregationStartDateString = AccountingAggregatorPlugin.AGGREGATION_START_DATE_DATE_FORMAT.format(start.getTime());
Date aggregationStartDate = AccountingAggregatorPlugin.AGGREGATION_START_DATE_DATE_FORMAT.parse(aggregationStartDateString);
Calendar end = Utility.getUTCCalendarInstance();
end.setTime(aggregationStartDate);
end.add(Calendar.DAY_OF_MONTH, 1);
String aggregationEndDateString = AccountingAggregatorPlugin.AGGREGATION_START_DATE_DATE_FORMAT.format(end.getTime());
Date aggregationEndDate = AccountingAggregatorPlugin.AGGREGATION_START_DATE_DATE_FORMAT.parse(aggregationEndDateString);
AggregationInfo aggregation = new AggregationInfo(ServiceUsageRecord.class.newInstance().getRecordType(), AggregationType.DAILY, aggregationStartDate, aggregationEndDate);
String aggregationString = DSMapper.getObjectMapper().writeValueAsString(aggregation);
logger.debug("{} : {}", AggregationInfo.class.getSimpleName(), aggregationString);
AggregationStatus aggregationStatus = new AggregationStatus(aggregation);
aggregationStatus.setContext("TEST_CONTEXT");
logger.debug("{} : {}", AggregationStatus.class.getSimpleName(), DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
// Set to true just for one test and restore to false
boolean sync = true;
Calendar startedStart = Utility.getUTCCalendarInstance();
aggregationStatus.setState(AggregationState.STARTED, startedStart, sync);
logger.debug("{} : {}", AggregationStatus.class.getSimpleName(), DSMapper.getObjectMapper().writeValueAsString(aggregationStatus));
}
}

View File

@ -0,0 +1,171 @@
package org.gcube.accounting.aggregator.plugin;
import java.text.ParseException;
import java.util.Calendar;
import java.util.Date;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import org.gcube.accounting.aggregator.aggregation.AggregationInfo;
import org.gcube.accounting.aggregator.aggregation.AggregationType;
import org.gcube.accounting.aggregator.elaboration.Elaborator;
import org.gcube.accounting.aggregator.status.AggregationStatus;
import org.gcube.accounting.aggregator.utility.Constant;
import org.gcube.accounting.aggregator.utility.Utility;
import org.gcube.accounting.datamodel.AggregatedUsageRecord;
import org.gcube.accounting.datamodel.UsageRecord;
import org.gcube.accounting.datamodel.aggregation.AggregatedServiceUsageRecord;
import org.gcube.accounting.datamodel.usagerecords.ServiceUsageRecord;
import org.gcube.documentstore.records.Record;
import org.gcube.documentstore.records.RecordUtility;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class MyTest {
private static Logger logger = LoggerFactory.getLogger(Elaborator.class);
@Test
public void test() throws InterruptedException {
Calendar start = Utility.getUTCCalendarInstance();
logger.debug("Elaboration of Records started at {}", Constant.DEFAULT_DATE_FORMAT.format(start.getTime()));
// Thread.sleep(TimeUnit.MINUTES.toMillis(2) +
// TimeUnit.SECONDS.toMillis(2));
Thread.sleep(TimeUnit.SECONDS.toMillis(12));
Calendar end = Utility.getUTCCalendarInstance();
long duration = end.getTimeInMillis() - start.getTimeInMillis();
String durationForHuman = Utility.getHumanReadableDuration(duration);
logger.debug("Elaboration of Records ended at {}. Duration {}", Constant.DEFAULT_DATE_FORMAT.format(end.getTime()),
durationForHuman);
}
@Test
public void classesTest() throws InstantiationException, IllegalAccessException {
RecordUtility.addRecordPackage(ServiceUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(AggregatedServiceUsageRecord.class.getPackage());
Map<String, Class<? extends Record>> recordClasses = RecordUtility.getRecordClassesFound();
for (String recordType : recordClasses.keySet()) {
Class<? extends Record> recordClass = recordClasses.get(recordType);
if (recordClass.newInstance() instanceof UsageRecord
&& !(recordClass.newInstance() instanceof AggregatedUsageRecord<?, ?>)) {
@SuppressWarnings("unchecked")
Class<? extends UsageRecord> usageRecordClazz = (Class<? extends UsageRecord>) recordClass;
logger.debug("Getting {} : {}", usageRecordClazz, recordType);
} else {
logger.debug("Discarding {} : {}", recordClass, recordType);
}
}
}
@Test
public void cicleWithPercentage() {
int rowToBeElaborated = 76543;
int tenPercentOfNumberOfRows = (rowToBeElaborated / 10) + 1;
int elaborated;
for (elaborated = 0; elaborated < rowToBeElaborated; elaborated++) {
if (elaborated % tenPercentOfNumberOfRows == 0) {
int elaboratedPercentage = elaborated * 100 / rowToBeElaborated;
logger.debug("Elaborated {} of {} (about {}%)", elaborated, rowToBeElaborated, elaboratedPercentage);
}
}
logger.debug("Elaborated {} of {} ({}%)", elaborated, rowToBeElaborated, 100);
}
private static final String ZIP_SUFFIX = ".zip";
@Test
public void testStringFormatter() {
String name = "filename";
int count = 1;
String formatted = String.format("%s-%02d%s", name, count, ZIP_SUFFIX);
logger.debug("{}", formatted);
}
@Test
public void testCalendarDisplayName() {
for (AggregationType aggregationType : AggregationType.values()) {
logger.info("{} Aggregation is not allowed for the last {} {}", aggregationType,
aggregationType.getNotAggregableBefore(),
aggregationType.name().toLowerCase().replace("ly", "s").replaceAll("dais", "days"));
}
}
@Test
public void elaboratorTest() throws Exception {
for (AggregationType aggregationType : AggregationType.values()) {
Calendar aggregationStartTime = Utility.getUTCCalendarInstance();
switch (aggregationType) {
case DAILY:
break;
case MONTHLY:
aggregationStartTime.set(Calendar.DAY_OF_MONTH, 1);
break;
case YEARLY:
aggregationStartTime.set(Calendar.DAY_OF_MONTH, 1);
aggregationStartTime.set(Calendar.MONTH, Calendar.JANUARY);
break;
default:
break;
}
aggregationStartTime.set(Calendar.HOUR_OF_DAY, 0);
aggregationStartTime.set(Calendar.MINUTE, 0);
aggregationStartTime.set(Calendar.SECOND, 0);
aggregationStartTime.set(Calendar.MILLISECOND, 0);
aggregationStartTime.add(aggregationType.getCalendarField(), -aggregationType.getNotAggregableBefore());
Date aggregationEndTime = Utility.getEndDateFromStartDate(aggregationType, aggregationStartTime.getTime(),
1);
AggregationInfo aggregationInfo = new AggregationInfo("ServiceUsageRecord", aggregationType,
aggregationStartTime.getTime(), aggregationEndTime);
AggregationStatus aggregationStatus = new AggregationStatus(aggregationInfo);
Elaborator elaborator = new Elaborator(aggregationStatus, Utility.getPersistTimeDate("8:00"), Utility.getPersistTimeDate("18:00"));
boolean allowed = elaborator.isAggregationAllowed();
if (!allowed) {
logger.info("AggregationStartTime {}. {} Aggregation is not allowed for the last {} {}",
aggregationType.getDateFormat().format(aggregationStartTime.getTime()), aggregationType,
aggregationType.getNotAggregableBefore(),
aggregationType.name().toLowerCase().replace("ly", "s").replaceAll("dais", "days"));
}
}
}
@Test
public void testEnd(){
Calendar aggregationStartCalendar = Utility.getAggregationStartCalendar(2017, Calendar.MARCH, 1);
Date aggregationStartDate = aggregationStartCalendar.getTime();
Date aggregationEndDate = Utility.getEndDateFromStartDate(AggregationType.MONTHLY, aggregationStartDate, 1);
logger.info("{} -> {}",
Constant.DEFAULT_DATE_FORMAT.format(aggregationStartDate),
Constant.DEFAULT_DATE_FORMAT.format(aggregationEndDate));
}
@Test
public void testUTCStartAndTime() throws ParseException{
String persistTimeString = Utility.getPersistTimeParameter(8, 00);
Date endTime = Utility.getPersistTimeDate(persistTimeString);
Calendar now = Calendar.getInstance();
Utility.isTimeElapsed(now, endTime);
}
}

View File

@ -0,0 +1,95 @@
/**
*
*/
package org.gcube.accounting.aggregator.plugin;
import java.io.IOException;
import java.io.InputStream;
import java.util.Properties;
import org.gcube.common.authorization.client.Constants;
import org.gcube.common.authorization.client.exceptions.ObjectNotFound;
import org.gcube.common.authorization.library.AuthorizationEntry;
import org.gcube.common.authorization.library.provider.SecurityTokenProvider;
import org.gcube.common.scope.api.ScopeProvider;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author Luca Frosini (ISTI - CNR)
*
*/
public class ScopedTest {
private static final Logger logger = LoggerFactory.getLogger(ScopedTest.class);
protected static final String PROPERTIES_FILENAME = "token.properties";
private static final String GCUBE_DEVNEXT_VARNAME = "GCUBE_DEVNEXT";
public static final String GCUBE_DEVNEXT;
private static final String GCUBE_DEVNEXT_NEXTNEXT_VARNAME = "GCUBE_DEVNEXT_NEXTNEXT";
public static final String GCUBE_DEVNEXT_NEXTNEXT;
public static final String GCUBE_DEVSEC_VARNAME = "GCUBE_DEVSEC";
public static final String GCUBE_DEVSEC;
public static final String GCUBE_DEVSEC_DEVVRE_VARNAME = "GCUBE_DEVSEC_DEVVRE";
public static final String GCUBE_DEVSEC_DEVVRE;
public static final String GCUBE_VARNAME = "GCUBE";
public static final String GCUBE;
public static final String DEFAULT_TEST_SCOPE;
public static final String ALTERNATIVE_TEST_SCOPE;
static {
Properties properties = new Properties();
InputStream input = ScopedTest.class.getClassLoader().getResourceAsStream(PROPERTIES_FILENAME);
try {
// load the properties file
properties.load(input);
} catch (IOException e) {
throw new RuntimeException(e);
}
GCUBE = properties.getProperty(GCUBE_VARNAME);
GCUBE_DEVNEXT = properties.getProperty(GCUBE_DEVNEXT_VARNAME);
GCUBE_DEVNEXT_NEXTNEXT = properties.getProperty(GCUBE_DEVNEXT_NEXTNEXT_VARNAME);
GCUBE_DEVSEC = properties.getProperty(GCUBE_DEVSEC_VARNAME);
GCUBE_DEVSEC_DEVVRE = properties.getProperty(GCUBE_DEVSEC_DEVVRE_VARNAME);
DEFAULT_TEST_SCOPE = GCUBE_DEVSEC;
ALTERNATIVE_TEST_SCOPE = GCUBE_DEVNEXT;
}
public static String getCurrentScope(String token) throws ObjectNotFound, Exception{
AuthorizationEntry authorizationEntry = Constants.authorizationService().get(token);
String context = authorizationEntry.getContext();
logger.info("Context of token {} is {}", token, context);
return context;
}
public static void setContext(String token) throws ObjectNotFound, Exception{
SecurityTokenProvider.instance.set(token);
ScopeProvider.instance.set(getCurrentScope(token));
}
@BeforeClass
public static void beforeClass() throws Exception{
setContext(DEFAULT_TEST_SCOPE);
}
@AfterClass
public static void afterClass() throws Exception{
SecurityTokenProvider.instance.reset();
ScopeProvider.instance.reset();
}
}

View File

@ -1,69 +0,0 @@
package org.gcube.accounting.aggregator.plugin;
import java.util.HashMap;
import java.util.Map;
import org.gcube.accounting.aggregator.madeaggregation.AggregationType;
import org.gcube.common.authorization.library.provider.SecurityTokenProvider;
import org.gcube.common.scope.api.ScopeProvider;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Tests {
/**
* Logger
*/
private static Logger logger = LoggerFactory.getLogger(Tests.class);
@Before
public void beforeTest(){
SecurityTokenProvider.instance.set("36501a0d-a205-4bf1-87ad-4c7185faa0d6-98187548");
//FOR DEBUG
String scopeDebug="/gcube/devNext";
ScopeProvider.instance.set(scopeDebug);
// END FOR DEBUG
}
@Test
public void testLaunch() throws Exception {
Map<String, Object> inputs = new HashMap<String, Object>();
//type aggregation
inputs.put("type",AggregationType.DAILY.name());
//period to be processed
inputs.put("interval",1 );
/* OPTIONAL INPUT */
//change to time
//inputs.put("startTime", 6);
inputs.put("pathFile","/home/pieve/startTime");
//inputs.put("endScriptTime","16:00");
//specify bucket
inputs.put("bucket","accounting_service");
//current scope
inputs.put("currentScope",false);
//specify user for save to workspace
/*OPTIONAL INPUT for work a partial interval */
//inputs.put("intervalStep",6);
//specify a recovery 0 default recovery and aggregate, 1 only aggregate, 2 only recovery
inputs.put("recovery",0);
//user
inputs.put("user","alessandro.pieve");
AccountingAggregatorPlugin plugin = new AccountingAggregatorPlugin(null);
plugin.launch(inputs);
logger.debug("-------------- launch test finished");
}
@After
public void after(){
}
}

View File

@ -0,0 +1,34 @@
package org.gcube.accounting.aggregator.workspace;
import java.util.HashMap;
import java.util.Map;
import org.gcube.accounting.aggregator.plugin.ScopedTest;
import org.gcube.accounting.aggregator.workspace.WorkSpaceManagement;
import org.gcube.common.resources.gcore.GCoreEndpoint.Profile;
import org.gcube.common.resources.gcore.GCoreEndpoint.Profile.Endpoint;
import org.gcube.common.resources.gcore.utils.Group;
import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class WorkSpaceManagementTest extends ScopedTest {
public static Logger logger = LoggerFactory.getLogger(WorkSpaceManagementTest.class);
@Test
public void endpointTest(){
Profile profile = WorkSpaceManagement.gCoreEndpoint.profile();
Group<Endpoint> endpoints = profile.endpoints();
Map<String, String> restEndpointMap = new HashMap<>();
for(Endpoint endpoint : endpoints){
String endpointName = endpoint.name();
String endpointURI = endpoint.uri().toString();
if(endpointURI.contains("rest")){
restEndpointMap.put(endpointName, endpointURI);
}
}
logger.debug("{}", restEndpointMap);
}
}