Version 1.1.0

add input endScriptTime 

git-svn-id: https://svn.d4science.research-infrastructures.eu/gcube/trunk/accounting/accounting-aggregator-se-plugin@148447 82a268e6-3cf1-43bd-a215-b396298e98cf
This commit is contained in:
Alessandro Pieve 2017-05-10 13:01:46 +00:00
parent 00490b54ad
commit 2b16e5a527
11 changed files with 119 additions and 91 deletions

View File

@ -20,12 +20,12 @@ Please see the file named "changelog.xml" in this directory for the release note
Authors Authors
-------------------------------------------------- --------------------------------------------------
* Luca Frosini (luca.frosini-AT-isti.cnr.it), Istituto di Scienza e Tecnologie dell'Informazione "A. Faedo" - CNR, Pisa (Italy). * Alessandro Pieve (alessandro.pieve-AT-isti.cnr.it), Istituto di Scienza e Tecnologie dell'Informazione "A. Faedo" - CNR, Pisa (Italy).
Maintainers Maintainers
----------- -----------
* Luca Frosini (luca.frosini-AT-isti.cnr.it), Istituto di Scienza e Tecnologie dell'Informazione "A. Faedo" - CNR, Pisa (Italy). * Alessandro Pieve (alessandro.pieve-AT-isti.cnr.it), Istituto di Scienza e Tecnologie dell'Informazione "A. Faedo" - CNR, Pisa (Italy).
Download information Download information

17
pom.xml
View File

@ -8,7 +8,7 @@
</parent> </parent>
<groupId>org.gcube.accounting</groupId> <groupId>org.gcube.accounting</groupId>
<artifactId>accounting-aggregator-se-plugin</artifactId> <artifactId>accounting-aggregator-se-plugin</artifactId>
<version>1.0.1-SNAPSHOT</version> <version>1.1.0-SNAPSHOT</version>
<name>Accounting Aggregator</name> <name>Accounting Aggregator</name>
<description>Accounting Aggregator Smart Executor Plugin</description> <description>Accounting Aggregator Smart Executor Plugin</description>
@ -42,21 +42,14 @@
<version>[1.0.0-SNAPSHOT, 2.0.0-SNAPSHOT)</version> <version>[1.0.0-SNAPSHOT, 2.0.0-SNAPSHOT)</version>
<scope>provided</scope> <scope>provided</scope>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.gcube.common</groupId> <groupId>org.gcube.common</groupId>
<artifactId>common-authorization</artifactId> <artifactId>common-authorization</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.gcube.common</groupId> <groupId>org.gcube.common</groupId>
<artifactId>authorization-client</artifactId> <artifactId>authorization-client</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.gcube.core</groupId> <groupId>org.gcube.core</groupId>
<artifactId>common-scope</artifactId> <artifactId>common-scope</artifactId>
@ -68,7 +61,6 @@
<artifactId>home-library-jcr</artifactId> <artifactId>home-library-jcr</artifactId>
<version>[2.0.0-SNAPSHOT,3.0.0-SNAPSHOT)</version> <version>[2.0.0-SNAPSHOT,3.0.0-SNAPSHOT)</version>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.gcube.common</groupId> <groupId>org.gcube.common</groupId>
<artifactId>home-library</artifactId> <artifactId>home-library</artifactId>
@ -76,18 +68,15 @@
<scope>compile</scope> <scope>compile</scope>
</dependency> </dependency>
<!-- END Home Library --> <!-- END Home Library -->
<dependency> <dependency>
<groupId>org.gcube.common</groupId> <groupId>org.gcube.common</groupId>
<artifactId>common-authorization</artifactId> <artifactId>common-authorization</artifactId>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.gcube.common</groupId> <groupId>org.gcube.common</groupId>
<artifactId>authorization-client</artifactId> <artifactId>authorization-client</artifactId>
<scope>provided</scope> <scope>provided</scope>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.slf4j</groupId> <groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId> <artifactId>slf4j-api</artifactId>
@ -114,15 +103,13 @@
<version>[1.0.1-SNAPSHOT, 2.0.0-SNAPSHOT)</version> <version>[1.0.1-SNAPSHOT, 2.0.0-SNAPSHOT)</version>
</dependency> </dependency>
<dependency> <dependency>
<artifactId>document-store-lib</artifactId>
<groupId>org.gcube.data.publishing</groupId> <groupId>org.gcube.data.publishing</groupId>
<artifactId>document-store-lib</artifactId>
<scope>provided</scope> <scope>provided</scope>
</dependency> </dependency>
<dependency> <dependency>
<groupId>org.gcube.accounting</groupId> <groupId>org.gcube.accounting</groupId>
<artifactId>accounting-lib</artifactId> <artifactId>accounting-lib</artifactId>
<version>[2.2.0-SNAPSHOT,3.0.0-SNAPSHOT)</version>
<scope>provided</scope> <scope>provided</scope>
</dependency> </dependency>
<dependency> <dependency>

View File

@ -35,9 +35,6 @@ public class Aggregation {
//list Aggregate record //list Aggregate record
protected Map<String, List<AggregatedRecord<?,?>>> bufferedRecords = new HashMap<String, List<AggregatedRecord<?,?>>>(); protected Map<String, List<AggregatedRecord<?,?>>> bufferedRecords = new HashMap<String, List<AggregatedRecord<?,?>>>();
public Aggregation() { public Aggregation() {
super(); super();
} }
@ -79,16 +76,13 @@ public class Aggregation {
continue; continue;
} }
AggregationUtility util = new AggregationUtility(bufferedRecord); AggregationUtility util = new AggregationUtility(bufferedRecord);
//verify a record is aggregable //verify a record is aggregable
//logger.debug("record: {}",record.toString());
if (util.isAggregable(record)){ if (util.isAggregable(record)){
try { try {
AggregatedRecord bufferedAggregatedRecord = (AggregatedRecord) bufferedRecord; AggregatedRecord bufferedAggregatedRecord = (AggregatedRecord) bufferedRecord;
// TODO check compatibility using getAggregable
//logger.debug("if -- madeAggregation aggregate"); //logger.debug("if -- madeAggregation aggregate");
bufferedAggregatedRecord.aggregate((AggregatedRecord) record); bufferedAggregatedRecord.aggregate((AggregatedRecord) record);
//patch for not changed a creation time //patch for not changed a creation time
//bufferedAggregatedRecord.setCreationTime(bufferedAggregatedRecord.getStartTime());
bufferedAggregatedRecord.setCreationTime(record.getCreationTime()); bufferedAggregatedRecord.setCreationTime(record.getCreationTime());
found = true; found = true;
break; break;
@ -97,20 +91,15 @@ public class Aggregation {
} }
} }
} }
if(!found){ if(!found){
//logger.debug("Aggregated Record not found with execption");
//logger.debug("if -- madeAggregation not found with execption add");
records.add(record); records.add(record);
totalBufferedRecords++; totalBufferedRecords++;
return; return;
} }
}else{ }else{
//logger.debug("else if record contains "+recordType);
records = new ArrayList<AggregatedRecord<?,?>>(); records = new ArrayList<AggregatedRecord<?,?>>();
try { try {
//logger.debug("else -- add getAggregatedRecord");
records.add(getAggregatedRecord(record)); records.add(getAggregatedRecord(record));
} catch (Exception e) { } catch (Exception e) {
logger.debug("pre Exception but records"); logger.debug("pre Exception but records");
@ -122,8 +111,6 @@ public class Aggregation {
} }
} }
/** /**
* Reset buffer records * Reset buffer records
*/ */
@ -132,7 +119,6 @@ public class Aggregation {
bufferedRecords.clear(); bufferedRecords.clear();
} }
/** /**
* *
* @return * @return
@ -177,12 +163,5 @@ public class Aggregation {
madeAggregation(record); madeAggregation(record);
} }
} }
} }

View File

@ -91,7 +91,6 @@ public class AggregationUtility<T extends AggregatedRecord<T,?>> {
Serializable recordValue = record.getResourceProperty(field); Serializable recordValue = record.getResourceProperty(field);
Serializable thisValue = t.getResourceProperty(field); Serializable thisValue = t.getResourceProperty(field);
//logger.error("isAggregable-field:{} ,recordValue:{}, thisValue:{}",field,recordValue,thisValue);
if(recordValue instanceof Comparable && thisValue instanceof Comparable){ if(recordValue instanceof Comparable && thisValue instanceof Comparable){
@SuppressWarnings("rawtypes") @SuppressWarnings("rawtypes")
@ -103,14 +102,7 @@ public class AggregationUtility<T extends AggregatedRecord<T,?>> {
return false; return false;
} }
}else{ }else{
/*
if (recordValue==null){
//logger.trace("{} != {}", recordValue, thisValue);
return false;
}
*/
if(recordValue.hashCode()!=this.hashCode()){ if(recordValue.hashCode()!=this.hashCode()){
//logger.trace("{} != {}", recordValue, thisValue);
return false; return false;
} }

View File

@ -9,7 +9,6 @@ package org.gcube.accounting.aggregator.persistence;
public interface AggregatorPersistenceBackendQuery { public interface AggregatorPersistenceBackendQuery {
public static final int KEY_VALUES_LIMIT = 25; public static final int KEY_VALUES_LIMIT = 25;
public void prepareConnection( public void prepareConnection(
AggregatorPersistenceBackendQueryConfiguration configuration) AggregatorPersistenceBackendQueryConfiguration configuration)
throws Exception; throws Exception;

View File

@ -17,7 +17,7 @@ public class AggregatorPersistenceBackendQueryConfiguration extends AccountingPe
} }
/** /**
* @param class1 The class of the persistence to instantiate * @param class The class of the persistence to instantiate
* @throws Exception if fails * @throws Exception if fails
*/ */
@SuppressWarnings({ "unchecked", "rawtypes" }) @SuppressWarnings({ "unchecked", "rawtypes" })

View File

@ -1,10 +1,16 @@
package org.gcube.accounting.aggregator.plugin; package org.gcube.accounting.aggregator.plugin;
import java.io.BufferedReader;
import java.io.File; import java.io.File;
import java.io.FileOutputStream;
import java.io.FileReader;
import java.io.PrintStream;
import java.io.Serializable; import java.io.Serializable;
import java.text.DateFormat;
import java.text.SimpleDateFormat; import java.text.SimpleDateFormat;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Calendar; import java.util.Calendar;
import java.util.Date;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
@ -18,13 +24,20 @@ import org.gcube.accounting.aggregator.madeaggregation.Aggregation;
import org.gcube.accounting.aggregator.madeaggregation.AggregationType; import org.gcube.accounting.aggregator.madeaggregation.AggregationType;
import org.gcube.accounting.aggregator.persistence.AggregatorPersistenceBackendQueryConfiguration; import org.gcube.accounting.aggregator.persistence.AggregatorPersistenceBackendQueryConfiguration;
import org.gcube.accounting.aggregator.recovery.RecoveryRecord; import org.gcube.accounting.aggregator.recovery.RecoveryRecord;
import org.gcube.accounting.datamodel.aggregation.AggregatedJobUsageRecord;
import org.gcube.accounting.datamodel.aggregation.AggregatedPortletUsageRecord;
import org.gcube.accounting.datamodel.aggregation.AggregatedServiceUsageRecord; import org.gcube.accounting.datamodel.aggregation.AggregatedServiceUsageRecord;
import org.gcube.accounting.datamodel.aggregation.AggregatedStorageUsageRecord;
import org.gcube.accounting.datamodel.aggregation.AggregatedTaskUsageRecord;
import org.gcube.accounting.datamodel.usagerecords.JobUsageRecord;
import org.gcube.accounting.datamodel.usagerecords.PortletUsageRecord;
import org.gcube.accounting.datamodel.usagerecords.ServiceUsageRecord; import org.gcube.accounting.datamodel.usagerecords.ServiceUsageRecord;
import org.gcube.accounting.datamodel.usagerecords.StorageUsageRecord;
import org.gcube.accounting.datamodel.usagerecords.TaskUsageRecord;
import org.gcube.common.scope.api.ScopeProvider; import org.gcube.common.scope.api.ScopeProvider;
import org.gcube.documentstore.exception.InvalidValueException; import org.gcube.documentstore.exception.InvalidValueException;
import org.gcube.documentstore.persistence.PersistenceCouchBase; import org.gcube.documentstore.persistence.PersistenceCouchBase;
import org.gcube.documentstore.records.AggregatedRecord; import org.gcube.documentstore.records.AggregatedRecord;
import org.gcube.documentstore.records.Record;
import org.gcube.documentstore.records.RecordUtility; import org.gcube.documentstore.records.RecordUtility;
import org.gcube.vremanagement.executor.plugin.Plugin; import org.gcube.vremanagement.executor.plugin.Plugin;
import org.slf4j.Logger; import org.slf4j.Logger;
@ -105,15 +118,37 @@ public class AccountingAggregatorPlugin extends Plugin<AccountingAggregatorPlugi
throw new IllegalArgumentException("Interval and type must be defined"); throw new IllegalArgumentException("Interval and type must be defined");
AggregationType aggType =AggregationType.valueOf((String)inputs.get("type")); AggregationType aggType =AggregationType.valueOf((String)inputs.get("type"));
Integer interval=(Integer)inputs.get("interval")* aggType.getMultiplierFactor(); Integer intervaTot=(Integer)inputs.get("interval");
Integer interval=intervaTot* aggType.getMultiplierFactor();
//new feature for not elaborate the full range but a set of small intervals //new feature for not elaborate the full range but a set of small intervals
if (inputs.containsKey("intervalStep")) if (inputs.containsKey("intervalStep"))
interval=(Integer)inputs.get("intervalStep"); interval=(Integer)inputs.get("intervalStep");
Integer inputStartTime=null; Integer inputStartTime=null;
String pathFile = null;
if (inputs.containsKey("startTime")) if (inputs.containsKey("startTime"))
inputStartTime=(Integer)inputs.get("startTime"); inputStartTime=(Integer)inputs.get("startTime");
else{
//get start time with file
logger.debug("Attention get start Time from file");
if (inputs.containsKey("pathFile")){
//get start time from file
pathFile=(String) inputs.get("pathFile");
logger.error("open file:{}",pathFile);
BufferedReader reader = new BufferedReader(new FileReader(pathFile));
String line;
while ((line = reader.readLine()) != null)
{
line=line.trim();
inputStartTime=Integer.valueOf(line);
logger.debug("Start Time:{}",inputStartTime);
}
reader.close();
}
}
Boolean currentScope =false; Boolean currentScope =false;
String scope=null; String scope=null;
@ -183,28 +218,60 @@ public class AccountingAggregatorPlugin extends Plugin<AccountingAggregatorPlugi
Cluster cluster = CouchbaseCluster.create(ENV, url); Cluster cluster = CouchbaseCluster.create(ENV, url);
//Define a type for aggregate //Define a type for aggregate
RecordUtility.addRecordPackage(PortletUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(AggregatedPortletUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(JobUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(AggregatedJobUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(TaskUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(AggregatedTaskUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(StorageUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(AggregatedStorageUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(ServiceUsageRecord.class.getPackage()); RecordUtility.addRecordPackage(ServiceUsageRecord.class.getPackage());
RecordUtility.addRecordPackage(AggregatedServiceUsageRecord.class.getPackage()); RecordUtility.addRecordPackage(AggregatedServiceUsageRecord.class.getPackage());
//end define
initFolder();
Date today = new Date();
Date endScriptTime = new Date();
if ((recoveryMode==2)||(recoveryMode==0)){ if (inputs.containsKey("endScriptTime")){
logger.debug("Recovery mode enabled"); DateFormat df = new SimpleDateFormat ("MM/dd/yyyy HH:mm");
RecoveryRecord.searchFile(cluster,configuration);
endScriptTime = df.parse ((today.getMonth()+1)+"/"+today.getDate()+"/"+(today.getYear()+1900) +" "+(String)inputs.get("endScriptTime"));
logger.debug("Script Run until :{}"+endScriptTime);
} }
if (recoveryMode!=2){ do {
for (String bucket:listBucket){ logger.debug("--Start Time Loop:{}"+inputStartTime);
logger.trace("OpenBucket:{}",bucket); initFolder();
accountingBucket = cluster.openBucket(bucket,password); if ((recoveryMode==2)||(recoveryMode==0)){
//elaborate bucket, with scope, type aggregation and interval logger.debug("Recovery mode enabled");
elaborateBucket(bucket,scope, inputStartTime, interval, aggType); RecoveryRecord.searchFile(cluster,configuration);
}
if (recoveryMode!=2){
for (String bucket:listBucket){
logger.trace("OpenBucket:{}",bucket);
accountingBucket = cluster.openBucket(bucket,password);
//elaborate bucket, with scope, type aggregation and interval
elaborateBucket(bucket,scope, inputStartTime, interval, aggType);
}
if (inputs.containsKey("pathFile")){
//update a file for new start time
FileOutputStream file = new FileOutputStream(pathFile);
PrintStream output = new PrintStream(file);
logger.debug("Update pathfile:{} with new start time:{}",pathFile,inputStartTime-intervaTot);
output.println(inputStartTime-intervaTot);
inputStartTime=inputStartTime-intervaTot;
today = new Date();
}
logger.debug("Complete countInsert{}, countDelete{}",countInsert,countDelete);
} }
logger.debug("Complete countInsert{}, countDelete{}",countInsert,countDelete); } while(today.compareTo(endScriptTime)<0);
} logger.debug("Plugin Terminated");
} }
@ -280,8 +347,7 @@ public class AccountingAggregatorPlugin extends Plugin<AccountingAggregatorPlugi
String startAllKeyString = format.format(nowTemp.getTime()); String startAllKeyString = format.format(nowTemp.getTime());
if (backup){ if (backup){
logger.debug("Start Backup"); logger.debug("Start Backup");
WorkSpaceManagement.onSaveBackupFile(accountingBucket,bucket,scope,startAllKeyString, endAllKeyString,aggType); WorkSpaceManagement.onSaveBackupFile(accountingBucket,bucket,scope,startAllKeyString, endAllKeyString,aggType);
//logger.debug("Backup complete startKeyString{}, endKeyString{}",startAllKeyString,endAllKeyString);
} }
else else
logger.debug("No Backup required"); logger.debug("No Backup required");
@ -317,9 +383,8 @@ public class AccountingAggregatorPlugin extends Plugin<AccountingAggregatorPlugi
} catch (Exception e) { } catch (Exception e) {
logger.error("Exception error VIEW",e.getLocalizedMessage(),e); logger.error("Exception error VIEW",e.getLocalizedMessage(),e);
//throw e;
} }
// Iterate through the returned ViewRows // Iterate through the returned ViewRows
aggregate = new Aggregation(); aggregate = new Aggregation();
documentElaborate.clear(); documentElaborate.clear();
@ -328,13 +393,15 @@ public class AccountingAggregatorPlugin extends Plugin<AccountingAggregatorPlugi
for (ViewRow row : viewResult) for (ViewRow row : viewResult)
resultElaborate=elaborateRow(row,documentElaborate); resultElaborate=elaborateRow(row,documentElaborate);
logger.debug("End elaborate row"); logger.debug("End elaborate row");
//File backup have a name with scope e
//Backup File saved
String nameFileBackup=""; String nameFileBackup="";
if (scope!=null) if (scope!=null)
nameFileBackup=scope.replace("/", "")+"-"+startKeyString+"-"+endKeyString; nameFileBackup=scope.replace("/", "")+"-"+startKeyString+"-"+endKeyString;
else else
nameFileBackup=startKeyString+"-"+endKeyString; nameFileBackup=startKeyString+"-"+endKeyString;
//save into db (delete no aggregate record and insert a record aggregate)
reallyFlush(aggregate,documentElaborate,nameFileBackup); reallyFlush(aggregate,documentElaborate,nameFileBackup);
endKeyString = startKeyString; endKeyString = startKeyString;
@ -450,6 +517,7 @@ public class AccountingAggregatorPlugin extends Plugin<AccountingAggregatorPlugi
Integer index=0; Integer index=0;
boolean succesfulDelete=false; boolean succesfulDelete=false;
logger.trace("Start a delete document:{}",docs.size()); logger.trace("Start a delete document:{}",docs.size());
//before elaborate a record, create a backup file //before elaborate a record, create a backup file
List<JsonDocument> notDeleted = docs; List<JsonDocument> notDeleted = docs;
List<JsonDocument> notInserted = aggregate.reallyFlush(); List<JsonDocument> notInserted = aggregate.reallyFlush();
@ -506,6 +574,7 @@ public class AccountingAggregatorPlugin extends Plugin<AccountingAggregatorPlugi
logger.debug("notDeletedTemp size:{} notDeleted:{}",notDeletedTemp.size(),notDeleted.size()); logger.debug("notDeletedTemp size:{} notDeleted:{}",notDeletedTemp.size(),notDeleted.size());
logger.debug("Delete complete:{}, Start a insert aggregated document:{}",countDelete,notInserted.size()); logger.debug("Delete complete:{}, Start a insert aggregated document:{}",countDelete,notInserted.size());
// delete all record and ready for insert a new aggregated record // delete all record and ready for insert a new aggregated record
if (succesfulDelete){ if (succesfulDelete){
//if successful record delete, delete backup file //if successful record delete, delete backup file

View File

@ -12,8 +12,6 @@ public enum DesignID {
accounting_job("accounting_job","JobUsageRecordAggregated","all","scope"), accounting_job("accounting_job","JobUsageRecordAggregated","all","scope"),
accounting_task("accounting_task","TaskUsageRecordAggregated","all","scope"); accounting_task("accounting_task","TaskUsageRecordAggregated","all","scope");
private String nameBucket; private String nameBucket;
private String nameDesign; private String nameDesign;
private String nameView; private String nameView;

View File

@ -38,7 +38,7 @@ public class WorkSpaceManagement {
public static Logger logger = LoggerFactory.getLogger(Aggregation.class); public static Logger logger = LoggerFactory.getLogger(Aggregation.class);
/** /**
* Save a backup file compressed into workspace * Save a compressed backup file into workspace
* @param bucket * @param bucket
* @param startKeyString * @param startKeyString
* @param endKeyString * @param endKeyString
@ -90,7 +90,7 @@ public class WorkSpaceManagement {
throw e; throw e;
} }
//manage error
BufferedWriter filebackup =null; BufferedWriter filebackup =null;
File logFile = new File(namePathFile); File logFile = new File(namePathFile);
logFile.delete(); logFile.delete();
@ -168,7 +168,7 @@ public class WorkSpaceManagement {
try { try {
HomeManagerFactory factory = HomeLibrary.getHomeManagerFactory(); HomeManagerFactory factory = HomeLibrary.getHomeManagerFactory();
HomeManager manager = factory.getHomeManager(); HomeManager manager = factory.getHomeManager();
User userWS = manager.createUser(user); User userWS = manager.createUser(user);
Home home = manager.getHome(userWS); Home home = manager.getHome(userWS);
Workspace ws = home.getWorkspace(); Workspace ws = home.getWorkspace();
WorkspaceFolder root = ws.getRoot(); WorkspaceFolder root = ws.getRoot();
@ -234,8 +234,7 @@ public class WorkSpaceManagement {
else{ else{
ws.remove(name, folderId); ws.remove(name, folderId);
Thread.sleep(2000); Thread.sleep(2000);
ws.createExternalFile(name, description, null, inputStream, folderId); ws.createExternalFile(name, description, null, inputStream, folderId);
//ws.updateItem(projectItem.getId(), inputStream);
} }
return; return;
} catch (Exception e) { } catch (Exception e) {

View File

@ -55,7 +55,6 @@ public class RecoveryRecord {
*/ */
protected static void prepareConnection(Cluster cluster,AggregatorPersistenceBackendQueryConfiguration configuration) throws Exception { protected static void prepareConnection(Cluster cluster,AggregatorPersistenceBackendQueryConfiguration configuration) throws Exception {
String url = configuration.getProperty(ConfigurationServiceEndpoint.URL_PROPERTY_KEY);
String password = configuration.getProperty(ConfigurationServiceEndpoint.PASSWORD_PROPERTY_KEY); String password = configuration.getProperty(ConfigurationServiceEndpoint.PASSWORD_PROPERTY_KEY);
try { try {
@ -89,7 +88,7 @@ public class RecoveryRecord {
} }
@SuppressWarnings("null")
public static void searchFile(Cluster cluster,AggregatorPersistenceBackendQueryConfiguration configuration) throws Exception{ public static void searchFile(Cluster cluster,AggregatorPersistenceBackendQueryConfiguration configuration) throws Exception{
try{ try{
@ -135,8 +134,7 @@ public class RecoveryRecord {
catch(Exception e){ catch(Exception e){
logger.error("Error for list file:{}",e); logger.error("Error for list file:{}",e);
} }
//cluster.disconnect();
} }
public static boolean ElaborateDeleteFile(String nameFile) throws IOException{ public static boolean ElaborateDeleteFile(String nameFile) throws IOException{
HashMap<String, Object> mapper = new Gson().fromJson(new FileReader(new File(nameFile)), HashMap.class); HashMap<String, Object> mapper = new Gson().fromJson(new FileReader(new File(nameFile)), HashMap.class);
@ -208,27 +206,28 @@ public class RecoveryRecord {
usageRecordType=""; usageRecordType="";
if (recordType==null) if (recordType==null)
recordType=""; recordType="";
JsonDocument response = null;
if ((recordType.equals("ServiceUsageRecord")) || (usageRecordType.equals("ServiceUsageRecord"))){ if ((recordType.equals("ServiceUsageRecord")) || (usageRecordType.equals("ServiceUsageRecord"))){
JsonDocument document = JsonDocument.create(identifier, accounting); JsonDocument document = JsonDocument.create(identifier, accounting);
JsonDocument response = bucketService.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS); response = bucketService.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
} }
if ((recordType.equals("StorageUsageRecord")) || (usageRecordType.equals("StorageUsageRecord"))){ if ((recordType.equals("StorageUsageRecord")) || (usageRecordType.equals("StorageUsageRecord"))){
JsonDocument document = JsonDocument.create(identifier, accounting); JsonDocument document = JsonDocument.create(identifier, accounting);
JsonDocument response = bucketStorage.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS); response = bucketStorage.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
} }
if ((recordType.equals("JobUsageRecord")) || (usageRecordType.equals("JobUsageRecord"))){ if ((recordType.equals("JobUsageRecord")) || (usageRecordType.equals("JobUsageRecord"))){
JsonDocument document = JsonDocument.create(identifier, accounting); JsonDocument document = JsonDocument.create(identifier, accounting);
JsonDocument response = bucketJob.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS); response = bucketJob.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
} }
if ((recordType.equals("TaskUsageRecord")) || (usageRecordType.equals("TaskUsageRecord"))){ if ((recordType.equals("TaskUsageRecord")) || (usageRecordType.equals("TaskUsageRecord"))){
JsonDocument document = JsonDocument.create(identifier, accounting); JsonDocument document = JsonDocument.create(identifier, accounting);
JsonDocument response = bucketTask.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS); response = bucketTask.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
} }
if ((recordType.equals("PortletUsageRecord")) || (usageRecordType.equals("PortletUsageRecord"))){ if ((recordType.equals("PortletUsageRecord")) || (usageRecordType.equals("PortletUsageRecord"))){
JsonDocument document = JsonDocument.create(identifier, accounting); JsonDocument document = JsonDocument.create(identifier, accounting);
JsonDocument response = bucketPortlet.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS); response = bucketPortlet.upsert(document,PersistTo.MASTER,Constant.CONNECTION_TIMEOUT_BUCKET, TimeUnit.SECONDS);
} }
logger.trace("Elaborate Insert fileJsondocument response:{}",response);
}catch(Exception e){ }catch(Exception e){
logger.error("Problem with recovery file and insert record excepiton:{}",e.getLocalizedMessage()); logger.error("Problem with recovery file and insert record excepiton:{}",e.getLocalizedMessage());
throw e; throw e;

View File

@ -37,7 +37,10 @@ public class Tests {
inputs.put("interval",1 ); inputs.put("interval",1 );
/* OPTIONAL INPUT */ /* OPTIONAL INPUT */
//change to time //change to time
inputs.put("startTime", 6); //inputs.put("startTime", 6);
//inputs.put("pathFile","/home/pieve/startTime");
//inputs.put("endScriptTime","16:00");
//specify bucket //specify bucket
inputs.put("bucket","accounting_service"); inputs.put("bucket","accounting_service");
@ -45,6 +48,9 @@ public class Tests {
//current scope //current scope
inputs.put("currentScope",false); inputs.put("currentScope",false);
//specify user for save to workspace //specify user for save to workspace
/*OPTIONAL INPUT for work a partial interval */
//inputs.put("intervalStep",6);
//specify a recovery 0 default recovery and aggregate, 1 only aggregate, 2 only recovery //specify a recovery 0 default recovery and aggregate, 1 only aggregate, 2 only recovery
inputs.put("recovery",0); inputs.put("recovery",0);