This commit is contained in:
Gianpaolo Coro 2016-03-17 17:21:47 +00:00
parent b3ffd0f40b
commit d873449db9
54 changed files with 1139 additions and 106 deletions

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="DBSCAN", abstrakt="A clustering algorithm for real valued vectors that relies on the density-based spatial clustering of applications with noise (DBSCAN) algorithm. A maximum of 4000 points is allowed.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.clusterers.DBSCAN", version = "1.1.0")
@Algorithm(statusSupported=true, title="DBSCAN", abstrakt="A clustering algorithm for real valued vectors that relies on the density-based spatial clustering of applications with noise (DBSCAN) algorithm. A maximum of 4000 points is allowed.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.clusterers.DBSCAN", version = "1.1.0")
public class DBSCAN extends AbstractEcologicalEngineMapper implements IClusterer{
@ComplexDataInput(abstrakt="Name of the parameter: OccurrencePointsTable. Occurrence Points Table. Max 4000 points [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="Occurrence Points Table. Max 4000 points [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "OccurrencePointsTable", binding = GenericFileDataBinding.class) public void setOccurrencePointsTable(GenericFileData file) {inputs.put("OccurrencePointsTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: FeaturesColumnNames. column Names for the features [a sequence of names of columns from OccurrencePointsTable separated by | ]", defaultValue="", title="column Names for the features [a sequence of names of columns from OccurrencePointsTable separated by | ]", identifier = "FeaturesColumnNames", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setFeaturesColumnNames(String data) {inputs.put("FeaturesColumnNames",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="KMEANS", abstrakt="A clustering algorithm for real valued vectors that relies on the k-means algorithm, i.e. a method aiming to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean, serving as a prototype of the cluster. A Maximum of 4000 points is allowed.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.clusterers.KMEANS", version = "1.1.0")
@Algorithm(statusSupported=true, title="KMEANS", abstrakt="A clustering algorithm for real valued vectors that relies on the k-means algorithm, i.e. a method aiming to partition n observations into k clusters in which each observation belongs to the cluster with the nearest mean, serving as a prototype of the cluster. A Maximum of 4000 points is allowed.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.clusterers.KMEANS", version = "1.1.0")
public class KMEANS extends AbstractEcologicalEngineMapper implements IClusterer{
@ComplexDataInput(abstrakt="Name of the parameter: OccurrencePointsTable. Occurrence Points Table. Max 4000 points [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="Occurrence Points Table. Max 4000 points [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "OccurrencePointsTable", binding = GenericFileDataBinding.class) public void setOccurrencePointsTable(GenericFileData file) {inputs.put("OccurrencePointsTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: FeaturesColumnNames. column Names for the features [a sequence of names of columns from OccurrencePointsTable separated by | ]", defaultValue="", title="column Names for the features [a sequence of names of columns from OccurrencePointsTable separated by | ]", identifier = "FeaturesColumnNames", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setFeaturesColumnNames(String data) {inputs.put("FeaturesColumnNames",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="LOF", abstrakt="Local Outlier Factor (LOF). A clustering algorithm for real valued vectors that relies on Local Outlier Factor algorithm, i.e. an algorithm for finding anomalous data points by measuring the local deviation of a given data point with respect to its neighbours. A Maximum of 4000 points is allowed.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.clusterers.LOF", version = "1.1.0")
@Algorithm(statusSupported=true, title="LOF", abstrakt="Local Outlier Factor (LOF). A clustering algorithm for real valued vectors that relies on Local Outlier Factor algorithm, i.e. an algorithm for finding anomalous data points by measuring the local deviation of a given data point with respect to its neighbours. A Maximum of 4000 points is allowed.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.clusterers.LOF", version = "1.1.0")
public class LOF extends AbstractEcologicalEngineMapper implements IClusterer{
@ComplexDataInput(abstrakt="Name of the parameter: PointsTable. Table containing points or observations. Max 4000 points [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="Table containing points or observations. Max 4000 points [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "PointsTable", binding = GenericFileDataBinding.class) public void setPointsTable(GenericFileData file) {inputs.put("PointsTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: FeaturesColumnNames. column Names for the features [a sequence of names of columns from PointsTable separated by | ]", defaultValue="", title="column Names for the features [a sequence of names of columns from PointsTable separated by | ]", identifier = "FeaturesColumnNames", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setFeaturesColumnNames(String data) {inputs.put("FeaturesColumnNames",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="XMEANS", abstrakt="A clustering algorithm for occurrence points that relies on the X-Means algorithm, i.e. an extended version of the K-Means algorithm improved by an Improve-Structure part. A Maximum of 4000 points is allowed.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.clusterers.XMEANS", version = "1.1.0")
@Algorithm(statusSupported=true, title="XMEANS", abstrakt="A clustering algorithm for occurrence points that relies on the X-Means algorithm, i.e. an extended version of the K-Means algorithm improved by an Improve-Structure part. A Maximum of 4000 points is allowed.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.clusterers.XMEANS", version = "1.1.0")
public class XMEANS extends AbstractEcologicalEngineMapper implements IClusterer{
@ComplexDataInput(abstrakt="Name of the parameter: OccurrencePointsTable. Occurrence Points Table. Max 4000 points [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="Occurrence Points Table. Max 4000 points [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "OccurrencePointsTable", binding = GenericFileDataBinding.class) public void setOccurrencePointsTable(GenericFileData file) {inputs.put("OccurrencePointsTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: FeaturesColumnNames. column Names for the features [a sequence of names of columns from OccurrencePointsTable separated by | ]", defaultValue="", title="column Names for the features [a sequence of names of columns from OccurrencePointsTable separated by | ]", identifier = "FeaturesColumnNames", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setFeaturesColumnNames(String data) {inputs.put("FeaturesColumnNames",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="DISCREPANCY_ANALYSIS", abstrakt="An evaluator algorithm that compares two tables containing real valued vectors. It drives the comparison by relying on a geographical distance threshold and a threshold for K-Statistic.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.evaluators.DISCREPANCY_ANALYSIS", version = "1.1.0")
@Algorithm(statusSupported=true, title="DISCREPANCY_ANALYSIS", abstrakt="An evaluator algorithm that compares two tables containing real valued vectors. It drives the comparison by relying on a geographical distance threshold and a threshold for K-Statistic.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.evaluators.DISCREPANCY_ANALYSIS", version = "1.1.0")
public class DISCREPANCY_ANALYSIS extends AbstractEcologicalEngineMapper implements IEvaluator{
@ComplexDataInput(abstrakt="Name of the parameter: FirstTable. First Table [a http link to a table in UTF-8 encoding following this template: (HSPEC) http://goo.gl/OvKa1h]", title="First Table [a http link to a table in UTF-8 encoding following this template: (HSPEC) http://goo.gl/OvKa1h]", maxOccurs=1, minOccurs=1, identifier = "FirstTable", binding = GenericFileDataBinding.class) public void setFirstTable(GenericFileData file) {inputs.put("FirstTable",file);}
@ComplexDataInput(abstrakt="Name of the parameter: SecondTable. Second Table [a http link to a table in UTF-8 encoding following this template: (HSPEC) http://goo.gl/OvKa1h]", title="Second Table [a http link to a table in UTF-8 encoding following this template: (HSPEC) http://goo.gl/OvKa1h]", maxOccurs=1, minOccurs=1, identifier = "SecondTable", binding = GenericFileDataBinding.class) public void setSecondTable(GenericFileData file) {inputs.put("SecondTable",file);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="HRS", abstrakt="An evaluator algorithm that calculates the Habitat Representativeness Score, i.e. an indicator of the assessment of whether a specific survey coverage or another environmental features dataset, contains data that are representative of all available habitat variable combinations in an area.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.evaluators.HRS", version = "1.1.0")
@Algorithm(statusSupported=true, title="HRS", abstrakt="An evaluator algorithm that calculates the Habitat Representativeness Score, i.e. an indicator of the assessment of whether a specific survey coverage or another environmental features dataset, contains data that are representative of all available habitat variable combinations in an area.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.evaluators.HRS", version = "1.1.0")
public class HRS extends AbstractEcologicalEngineMapper implements IEvaluator{
@ComplexDataInput(abstrakt="Name of the parameter: ProjectingAreaTable. A Table containing projecting area information [a http link to a table in UTF-8 encoding following this template: (HCAF) http://goo.gl/SZG9uM]", title="A Table containing projecting area information [a http link to a table in UTF-8 encoding following this template: (HCAF) http://goo.gl/SZG9uM]", maxOccurs=1, minOccurs=1, identifier = "ProjectingAreaTable", binding = GenericFileDataBinding.class) public void setProjectingAreaTable(GenericFileData file) {inputs.put("ProjectingAreaTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: OptionalCondition. optional filter for taking area rows", defaultValue="where oceanarea>0", title="optional filter for taking area rows", identifier = "OptionalCondition", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setOptionalCondition(String data) {inputs.put("OptionalCondition",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="MAPS_COMPARISON", abstrakt="An algorithm for comparing two OGC/NetCDF maps in seamless way to the user. The algorithm assesses the similarities between two geospatial maps by comparing them in a point-to-point fashion. It accepts as input the two geospatial maps (via their UUIDs in the infrastructure spatial data repository - recoverable through the Geoexplorer portlet) and some parameters affecting the comparison such as the z-index, the time index, the comparison threshold. Note: in the case of WFS layers it makes comparisons on the last feature column.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.evaluators.MAPS_COMPARISON", version = "1.1.0")
@Algorithm(statusSupported=true, title="MAPS_COMPARISON", abstrakt="An algorithm for comparing two OGC/NetCDF maps in seamless way to the user. The algorithm assesses the similarities between two geospatial maps by comparing them in a point-to-point fashion. It accepts as input the two geospatial maps (via their UUIDs in the infrastructure spatial data repository - recoverable through the Geoexplorer portlet) and some parameters affecting the comparison such as the z-index, the time index, the comparison threshold. Note: in the case of WFS layers it makes comparisons on the last feature column.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.evaluators.MAPS_COMPARISON", version = "1.1.0")
public class MAPS_COMPARISON extends AbstractEcologicalEngineMapper implements IEvaluator{
@LiteralDataInput(abstrakt="Name of the parameter: Layer_1. First Layer Title or UUID: The title or the UUID (preferred) of a layer indexed in the e-Infrastructure on GeoNetwork - You can retrieve it from GeoExplorer", defaultValue="", title="First Layer Title or UUID: The title or the UUID (preferred) of a layer indexed in the e-Infrastructure on GeoNetwork - You can retrieve it from GeoExplorer", identifier = "Layer_1", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLayer_1(String data) {inputs.put("Layer_1",data);}
@LiteralDataInput(abstrakt="Name of the parameter: Layer_2. Second Layer Title or UUID: The title or the UUID (preferred) of a second layer indexed in the e-Infrastructure on GeoNetwork - You can retrieve it from GeoExplorer", defaultValue="", title="Second Layer Title or UUID: The title or the UUID (preferred) of a second layer indexed in the e-Infrastructure on GeoNetwork - You can retrieve it from GeoExplorer", identifier = "Layer_2", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLayer_2(String data) {inputs.put("Layer_2",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="QUALITY_ANALYSIS", abstrakt="An evaluator algorithm that assesses the effectiveness of a distribution model by computing the Receiver Operating Characteristics (ROC), the Area Under Curve (AUC) and the Accuracy of a model", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.evaluators.QUALITY_ANALYSIS", version = "1.1.0")
@Algorithm(statusSupported=true, title="QUALITY_ANALYSIS", abstrakt="An evaluator algorithm that assesses the effectiveness of a distribution model by computing the Receiver Operating Characteristics (ROC), the Area Under Curve (AUC) and the Accuracy of a model", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.evaluators.QUALITY_ANALYSIS", version = "1.1.0")
public class QUALITY_ANALYSIS extends AbstractEcologicalEngineMapper implements IEvaluator{
@ComplexDataInput(abstrakt="Name of the parameter: PositiveCasesTable. A Table containing positive cases [a http link to a table in UTF-8 encoding following this template: (HCAF) http://goo.gl/SZG9uM]", title="A Table containing positive cases [a http link to a table in UTF-8 encoding following this template: (HCAF) http://goo.gl/SZG9uM]", maxOccurs=1, minOccurs=1, identifier = "PositiveCasesTable", binding = GenericFileDataBinding.class) public void setPositiveCasesTable(GenericFileData file) {inputs.put("PositiveCasesTable",file);}
@ComplexDataInput(abstrakt="Name of the parameter: NegativeCasesTable. A Table containing negative cases [a http link to a table in UTF-8 encoding following this template: (HCAF) http://goo.gl/SZG9uM]", title="A Table containing negative cases [a http link to a table in UTF-8 encoding following this template: (HCAF) http://goo.gl/SZG9uM]", maxOccurs=1, minOccurs=1, identifier = "NegativeCasesTable", binding = GenericFileDataBinding.class) public void setNegativeCasesTable(GenericFileData file) {inputs.put("NegativeCasesTable",file);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="BIONYM", abstrakt="An algorithm implementing BiOnym, a flexible workflow approach to taxon name matching. The workflow allows to activate several taxa names matching algorithms and to get the list of possible transcriptions for a list of input raw species names with possible authorship indication.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.BIONYM", version = "1.1.0")
@Algorithm(statusSupported=true, title="BIONYM", abstrakt="An algorithm implementing BiOnym, a flexible workflow approach to taxon name matching. The workflow allows to activate several taxa names matching algorithms and to get the list of possible transcriptions for a list of input raw species names with possible authorship indication.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.BIONYM", version = "1.1.0")
public class BIONYM extends AbstractEcologicalEngineMapper implements IGenerator{
@ComplexDataInput(abstrakt="Name of the parameter: RawTaxaNamesTable. Input table containing raw taxa names that you want to match [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="Input table containing raw taxa names that you want to match [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "RawTaxaNamesTable", binding = GenericFileDataBinding.class) public void setRawTaxaNamesTable(GenericFileData file) {inputs.put("RawTaxaNamesTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: RawNamesColumn. The column containing the raw taxa names with or without authoship information [the name of a column from RawTaxaNamesTable]", defaultValue="rawnames", title="The column containing the raw taxa names with or without authoship information [the name of a column from RawTaxaNamesTable]", identifier = "RawNamesColumn", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setRawNamesColumn(String data) {inputs.put("RawNamesColumn",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="CMSY", abstrakt="An algorithm to estimate the Maximum Sustainable Yield from a catch statistic. If also a Biomass trend is provided, MSY estimation is provided also with higher precision. The method has been developed by R. Froese, G. Coro, N. Demirel and K. Kleisner.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.CMSY", version = "1.1.0")
@Algorithm(statusSupported=true, title="CMSY", abstrakt="An algorithm to estimate the Maximum Sustainable Yield from a catch statistic. If also a Biomass trend is provided, MSY estimation is provided also with higher precision. The method has been developed by R. Froese, G. Coro, N. Demirel and K. Kleisner.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.CMSY", version = "1.1.0")
public class CMSY extends AbstractEcologicalEngineMapper implements IGenerator{
@LiteralDataInput(abstrakt="Name of the parameter: IDsFile. Http link to a file containing prior information about the stocks, in WKLife IV format. Example: http://goo.gl/9rg3qK", defaultValue="", title="Http link to a file containing prior information about the stocks, in WKLife IV format. Example: http://goo.gl/9rg3qK", identifier = "IDsFile", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setIDsFile(String data) {inputs.put("IDsFile",data);}
@LiteralDataInput(abstrakt="Name of the parameter: StocksFile. Http link to a file containing catch and biomass (or CPUE) trends , in WKLife IV format. Example: http://goo.gl/Mp2ZLY", defaultValue="", title="Http link to a file containing catch and biomass (or CPUE) trends , in WKLife IV format. Example: http://goo.gl/Mp2ZLY", identifier = "StocksFile", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setStocksFile(String data) {inputs.put("StocksFile",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="ICCAT_VPA", abstrakt="An algorithm for stock assessment of catch statistics published by the International Commission for the Conservation of Atlantic Tunas (ICCAT). Produces summary statistics about a stock, involving assessment of fishing mortality, abundance, catch trend, fecundity and recruitment. Developed by IFREMER and IRD. Contact persons: Sylvain Bonhommeau sylvain.bonhommeau@ifremer.fr, Julien Barde julien.barde@ird.fr.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.ICCAT_VPA", version = "1.1.0")
@Algorithm(statusSupported=true, title="ICCAT_VPA", abstrakt="An algorithm for stock assessment of catch statistics published by the International Commission for the Conservation of Atlantic Tunas (ICCAT). Produces summary statistics about a stock, involving assessment of fishing mortality, abundance, catch trend, fecundity and recruitment. Developed by IFREMER and IRD. Contact persons: Sylvain Bonhommeau sylvain.bonhommeau@ifremer.fr, Julien Barde julien.barde@ird.fr.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.generators.ICCAT_VPA", version = "1.1.0")
public class ICCAT_VPA extends AbstractEcologicalEngineMapper implements IGenerator{
@LiteralDataInput(abstrakt="Name of the parameter: StartYear. First year of the dataset temporal extent", defaultValue="1950", title="First year of the dataset temporal extent", identifier = "StartYear", maxOccurs=1, minOccurs=1, binding = LiteralIntBinding.class) public void setStartYear(Integer data) {inputs.put("StartYear",""+data);}
@LiteralDataInput(abstrakt="Name of the parameter: EndYear. Last year of the dataset temporal extent", defaultValue="2013", title="Last year of the dataset temporal extent", identifier = "EndYear", maxOccurs=1, minOccurs=1, binding = LiteralIntBinding.class) public void setEndYear(Integer data) {inputs.put("EndYear",""+data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="ABSENCE_CELLS_FROM_AQUAMAPS", abstrakt="An algorithm producing cells and features (HCAF) for a species containing absense points taken by an Aquamaps Distribution", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ABSENCE_CELLS_FROM_AQUAMAPS", version = "1.1.0")
@Algorithm(statusSupported=true, title="ABSENCE_CELLS_FROM_AQUAMAPS", abstrakt="An algorithm producing cells and features (HCAF) for a species containing absense points taken by an Aquamaps Distribution", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ABSENCE_CELLS_FROM_AQUAMAPS", version = "1.1.0")
public class ABSENCE_CELLS_FROM_AQUAMAPS extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: Table_Label. the name of the Filtered Hcaf", defaultValue="AbsenceCells_", title="the name of the Filtered Hcaf", identifier = "Table_Label", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setTable_Label(String data) {inputs.put("Table_Label",data);}
@ComplexDataInput(abstrakt="Name of the parameter: Aquamaps_HSPEC. an Aquamaps table from which to produce the absence points [a http link to a table in UTF-8 encoding following this template: (HSPEC) http://goo.gl/OvKa1h]", title="an Aquamaps table from which to produce the absence points [a http link to a table in UTF-8 encoding following this template: (HSPEC) http://goo.gl/OvKa1h]", maxOccurs=1, minOccurs=1, identifier = "Aquamaps_HSPEC", binding = GenericFileDataBinding.class) public void setAquamaps_HSPEC(GenericFileData file) {inputs.put("Aquamaps_HSPEC",file);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="BIONYM_LOCAL", abstrakt="A fast version of the algorithm implementing BiOnym, a flexible workflow approach to taxon name matching. The workflow allows to activate several taxa names matching algorithms and to get the list of possible transcriptions for a list of input raw species names with possible authorship indication.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.BIONYM_LOCAL", version = "1.1.0")
@Algorithm(statusSupported=true, title="BIONYM_LOCAL", abstrakt="A fast version of the algorithm implementing BiOnym, a flexible workflow approach to taxon name matching. The workflow allows to activate several taxa names matching algorithms and to get the list of possible transcriptions for a list of input raw species names with possible authorship indication.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.BIONYM_LOCAL", version = "1.1.0")
public class BIONYM_LOCAL extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: SpeciesAuthorName. The scientific name of the species, possibly with authorship", defaultValue="Gadus morhua (Linnaeus, 1758)", title="The scientific name of the species, possibly with authorship", identifier = "SpeciesAuthorName", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setSpeciesAuthorName(String data) {inputs.put("SpeciesAuthorName",data);}
@LiteralDataInput(abstrakt="Name of the parameter: Taxa_Authority_File. The reference dataset to use", allowedValues= {"ASFIS","FISHBASE","OBIS","OBIS_ANIMALIA","OBIS_CNIDARIA","OBIS_ECHINODERMATA","OBIS_PLATYHELMINTHES","COL_FULL","COL_CHORDATA","COL_MAMMALIA","IRMNG_ACTINOPTERYGII","WORMS_ANIMALIA","WORMS_PISCES"}, defaultValue="FISHBASE", title="The reference dataset to use", identifier = "Taxa_Authority_File", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setTaxa_Authority_File(String data) {inputs.put("Taxa_Authority_File",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="CSQUARE_COLUMN_CREATOR", abstrakt="An algorithm that adds a column containing the CSquare codes associated to longitude and latitude columns.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CSQUARE_COLUMN_CREATOR", version = "1.1.0")
@Algorithm(statusSupported=true, title="CSQUARE_COLUMN_CREATOR", abstrakt="An algorithm that adds a column containing the CSquare codes associated to longitude and latitude columns.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.CSQUARE_COLUMN_CREATOR", version = "1.1.0")
public class CSQUARE_COLUMN_CREATOR extends AbstractEcologicalEngineMapper implements ITransducer{
@ComplexDataInput(abstrakt="Name of the parameter: InputTable. The table to which the algorithm adds the csquare column [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="The table to which the algorithm adds the csquare column [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "InputTable", binding = GenericFileDataBinding.class) public void setInputTable(GenericFileData file) {inputs.put("InputTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: Longitude_Column. The column containing Longitude information [the name of a column from InputTable]", defaultValue="x", title="The column containing Longitude information [the name of a column from InputTable]", identifier = "Longitude_Column", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLongitude_Column(String data) {inputs.put("Longitude_Column",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="ESRI_GRID_EXTRACTION", abstrakt="An algorithm to extract values associated to an environmental feature repository (e.g. NETCDF, ASC, GeoTiff files etc. ). A grid of points at a certain resolution is specified by the user and values are associated to the points from the environmental repository. It accepts as one geospatial repository ID (via their UUIDs in the infrastructure spatial data repository - recoverable through the Geoexplorer portlet) or a direct link to a file and the specification about time and space. The algorithm produces one ESRI GRID ASCII file containing the values associated to the selected bounding box.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ESRI_GRID_EXTRACTION", version = "1.1.0")
@Algorithm(statusSupported=true, title="ESRI_GRID_EXTRACTION", abstrakt="An algorithm to extract values associated to an environmental feature repository (e.g. NETCDF, ASC, GeoTiff files etc. ). A grid of points at a certain resolution is specified by the user and values are associated to the points from the environmental repository. It accepts as one geospatial repository ID (via their UUIDs in the infrastructure spatial data repository - recoverable through the Geoexplorer portlet) or a direct link to a file and the specification about time and space. The algorithm produces one ESRI GRID ASCII file containing the values associated to the selected bounding box.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ESRI_GRID_EXTRACTION", version = "1.1.0")
public class ESRI_GRID_EXTRACTION extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: Layer. Layer Title or UUID or HTTP link. E.g. the title or the UUID (preferred) of a layer indexed in the e-Infrastructure on GeoNetwork - You can retrieve it from GeoExplorer. Otherwise you can supply the direct HTTP link of the layer. The format will be guessed from the link. The default is GeoTiff. Supports several standards (NETCDF, WFS, WCS ASC, GeoTiff )", defaultValue="", title="Layer Title or UUID or HTTP link. E.g. the title or the UUID (preferred) of a layer indexed in the e-Infrastructure on GeoNetwork - You can retrieve it from GeoExplorer. Otherwise you can supply the direct HTTP link of the layer. The format will be guessed from the link. The default is GeoTiff. Supports several standards (NETCDF, WFS, WCS ASC, GeoTiff )", identifier = "Layer", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLayer(String data) {inputs.put("Layer",data);}
@LiteralDataInput(abstrakt="Name of the parameter: BBox_LowerLeftLat. Lower Left Latitute of the Bounding Box", defaultValue="-60", title="Lower Left Latitute of the Bounding Box", identifier = "BBox_LowerLeftLat", maxOccurs=1, minOccurs=1, binding = LiteralDoubleBinding.class) public void setBBox_LowerLeftLat(Double data) {inputs.put("BBox_LowerLeftLat",""+data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="FAO_OCEAN_AREA_COLUMN_CREATOR", abstrakt="An algorithm that adds a column containing the FAO Ocean Area codes associated to longitude and latitude columns.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.FAO_OCEAN_AREA_COLUMN_CREATOR", version = "1.1.0")
@Algorithm(statusSupported=true, title="FAO_OCEAN_AREA_COLUMN_CREATOR", abstrakt="An algorithm that adds a column containing the FAO Ocean Area codes associated to longitude and latitude columns.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.FAO_OCEAN_AREA_COLUMN_CREATOR", version = "1.1.0")
public class FAO_OCEAN_AREA_COLUMN_CREATOR extends AbstractEcologicalEngineMapper implements ITransducer{
@ComplexDataInput(abstrakt="Name of the parameter: InputTable. The table to which the algorithm adds the csquare column [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="The table to which the algorithm adds the csquare column [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "InputTable", binding = GenericFileDataBinding.class) public void setInputTable(GenericFileData file) {inputs.put("InputTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: Longitude_Column. The column containing Longitude information [the name of a column from InputTable]", defaultValue="x", title="The column containing Longitude information [the name of a column from InputTable]", identifier = "Longitude_Column", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLongitude_Column(String data) {inputs.put("Longitude_Column",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="FAO_OCEAN_AREA_COLUMN_CREATOR_FROM_QUADRANT", abstrakt="An algorithm that adds a column containing the FAO Ocean Area codes associated to longitude, latitude and quadrant columns.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.FAO_OCEAN_AREA_COLUMN_CREATOR_FROM_QUADRANT", version = "1.1.0")
@Algorithm(statusSupported=true, title="FAO_OCEAN_AREA_COLUMN_CREATOR_FROM_QUADRANT", abstrakt="An algorithm that adds a column containing the FAO Ocean Area codes associated to longitude, latitude and quadrant columns.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.FAO_OCEAN_AREA_COLUMN_CREATOR_FROM_QUADRANT", version = "1.1.0")
public class FAO_OCEAN_AREA_COLUMN_CREATOR_FROM_QUADRANT extends AbstractEcologicalEngineMapper implements ITransducer{
@ComplexDataInput(abstrakt="Name of the parameter: InputTable. The table to which the algorithm adds the csquare column [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="The table to which the algorithm adds the csquare column [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "InputTable", binding = GenericFileDataBinding.class) public void setInputTable(GenericFileData file) {inputs.put("InputTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: Longitude_Column. The column containing Longitude information [the name of a column from InputTable]", defaultValue="x", title="The column containing Longitude information [the name of a column from InputTable]", identifier = "Longitude_Column", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLongitude_Column(String data) {inputs.put("Longitude_Column",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="GENERIC_CHARTS", abstrakt="An algorithm producing generic charts of attributes vs. quantities. Charts are displayed per quantity column. Histograms, Scattering and Radar charts are produced for the top ten quantities. A gaussian distribution reports overall statistics for the quantities.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.GENERIC_CHARTS", version = "1.1.0")
@Algorithm(statusSupported=true, title="GENERIC_CHARTS", abstrakt="An algorithm producing generic charts of attributes vs. quantities. Charts are displayed per quantity column. Histograms, Scattering and Radar charts are produced for the top ten quantities. A gaussian distribution reports overall statistics for the quantities.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.GENERIC_CHARTS", version = "1.1.0")
public class GENERIC_CHARTS extends AbstractEcologicalEngineMapper implements ITransducer{
@ComplexDataInput(abstrakt="Name of the parameter: InputTable. The input table [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="The input table [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "InputTable", binding = GenericFileDataBinding.class) public void setInputTable(GenericFileData file) {inputs.put("InputTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: TopElementsNumber. Max number of elements, with highest values, to visualize", defaultValue="10", title="Max number of elements, with highest values, to visualize", identifier = "TopElementsNumber", maxOccurs=1, minOccurs=1, binding = LiteralIntBinding.class) public void setTopElementsNumber(Integer data) {inputs.put("TopElementsNumber",""+data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="GEO_CHART", abstrakt="An algorithm producing a charts that displays quantities as colors of countries. The color indicates the sum of the values recorded in a country.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.GEO_CHART", version = "1.1.0")
@Algorithm(statusSupported=true, title="GEO_CHART", abstrakt="An algorithm producing a charts that displays quantities as colors of countries. The color indicates the sum of the values recorded in a country.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.GEO_CHART", version = "1.1.0")
public class GEO_CHART extends AbstractEcologicalEngineMapper implements ITransducer{
@ComplexDataInput(abstrakt="Name of the parameter: InputTable. The input table [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="The input table [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "InputTable", binding = GenericFileDataBinding.class) public void setInputTable(GenericFileData file) {inputs.put("InputTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: Longitude. The column containing longitude decimal values [the name of a column from InputTable]", defaultValue="long", title="The column containing longitude decimal values [the name of a column from InputTable]", identifier = "Longitude", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLongitude(String data) {inputs.put("Longitude",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="HCAF_FILTER", abstrakt="An algorithm producing a HCAF table on a selected Bounding Box (default identifies Indonesia)", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.HCAF_FILTER", version = "1.1.0")
@Algorithm(statusSupported=true, title="HCAF_FILTER", abstrakt="An algorithm producing a HCAF table on a selected Bounding Box (default identifies Indonesia)", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.HCAF_FILTER", version = "1.1.0")
public class HCAF_FILTER extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: Table_Label. the name of the Filtered Hcaf", defaultValue="hcaf_filtered", title="the name of the Filtered Hcaf", identifier = "Table_Label", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setTable_Label(String data) {inputs.put("Table_Label",data);}
@LiteralDataInput(abstrakt="Name of the parameter: B_Box_Left_Lower_Lat. the left lower latitude of the bounding box (range [-90,+90])", defaultValue="-17.098", title="the left lower latitude of the bounding box (range [-90,+90])", identifier = "B_Box_Left_Lower_Lat", maxOccurs=1, minOccurs=1, binding = LiteralDoubleBinding.class) public void setB_Box_Left_Lower_Lat(Double data) {inputs.put("B_Box_Left_Lower_Lat",""+data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="MAX_ENT_NICHE_MODELLING", abstrakt="A Maximum-Entropy model for species habitat modeling, based on the implementation by Shapire et al. v 3.3.3k, Princeton University, http://www.cs.princeton.edu/schapire/maxent/. In this adaptation for the D4Science infrastructure, the software accepts a table produced by the Species Product Discovery service and a set of environmental layers in various formats (NetCDF, WFS, WCS, ASC, GeoTiff) via direct links or GeoExplorer UUIDs. The user can also establish the bounding box and the spatial resolution (in decimal deg.) of the training and the projection. The application will adapt the layers to that resolution if this is higher than the native one.The output contains: a thumbnail map of the projected model, the ROC curve, the Omission/Commission chart, a table containing the raw assigned values, a threshold to transform the table into a 0-1 probability distribution, a report of the importance of the used layers in the model, ASCII representations of the input layers to check their alignment.Other processes can be later applied to the raw values to produce a GIS map (e.g. the Statistical Manager Points-to-Map process) and results can be shared. Demo video: http://goo.gl/TYYnTO and instructions http://wiki.i-marine.eu/index.php/MaxEnt", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.MAX_ENT_NICHE_MODELLING", version = "1.1.0")
@Algorithm(statusSupported=true, title="MAX_ENT_NICHE_MODELLING", abstrakt="A Maximum-Entropy model for species habitat modeling, based on the implementation by Shapire et al. v 3.3.3k, Princeton University, http://www.cs.princeton.edu/schapire/maxent/. In this adaptation for the D4Science infrastructure, the software accepts a table produced by the Species Product Discovery service and a set of environmental layers in various formats (NetCDF, WFS, WCS, ASC, GeoTiff) via direct links or GeoExplorer UUIDs. The user can also establish the bounding box and the spatial resolution (in decimal deg.) of the training and the projection. The application will adapt the layers to that resolution if this is higher than the native one.The output contains: a thumbnail map of the projected model, the ROC curve, the Omission/Commission chart, a table containing the raw assigned values, a threshold to transform the table into a 0-1 probability distribution, a report of the importance of the used layers in the model, ASCII representations of the input layers to check their alignment.Other processes can be later applied to the raw values to produce a GIS map (e.g. the Statistical Manager Points-to-Map process) and results can be shared. Demo video: http://goo.gl/TYYnTO and instructions http://wiki.i-marine.eu/index.php/MaxEnt", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.MAX_ENT_NICHE_MODELLING", version = "1.1.0")
public class MAX_ENT_NICHE_MODELLING extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: OutputTableLabel. The name of the table to produce", defaultValue="maxent_", title="The name of the table to produce", identifier = "OutputTableLabel", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setOutputTableLabel(String data) {inputs.put("OutputTableLabel",data);}
@LiteralDataInput(abstrakt="Name of the parameter: SpeciesName. The name of the species to model and the occurrence records refer to", defaultValue="generic_species", title="The name of the species to model and the occurrence records refer to", identifier = "SpeciesName", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setSpeciesName(String data) {inputs.put("SpeciesName",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="OBIS_MOST_OBSERVED_SPECIES", abstrakt="An algorithm producing a bar chart for the most observed species in a certain years range (with respect to the OBIS database)", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OBIS_MOST_OBSERVED_SPECIES", version = "1.1.0")
@Algorithm(statusSupported=true, title="OBIS_MOST_OBSERVED_SPECIES", abstrakt="An algorithm producing a bar chart for the most observed species in a certain years range (with respect to the OBIS database)", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OBIS_MOST_OBSERVED_SPECIES", version = "1.1.0")
public class OBIS_MOST_OBSERVED_SPECIES extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: Species_number. Number of species to report (max 17 will be visualized on the chart)", defaultValue="10", title="Number of species to report (max 17 will be visualized on the chart)", identifier = "Species_number", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setSpecies_number(String data) {inputs.put("Species_number",data);}
@LiteralDataInput(abstrakt="Name of the parameter: Start_year. Starting year of the analysis", defaultValue="1800", title="Starting year of the analysis", identifier = "Start_year", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setStart_year(String data) {inputs.put("Start_year",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="OBIS_MOST_OBSERVED_TAXA", abstrakt="An algorithm producing a bar chart for the most observed taxa in a certain years range (with respect to the OBIS database)", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OBIS_MOST_OBSERVED_TAXA", version = "1.1.0")
@Algorithm(statusSupported=true, title="OBIS_MOST_OBSERVED_TAXA", abstrakt="An algorithm producing a bar chart for the most observed taxa in a certain years range (with respect to the OBIS database)", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OBIS_MOST_OBSERVED_TAXA", version = "1.1.0")
public class OBIS_MOST_OBSERVED_TAXA extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: Taxa_number. Number of taxa to report", defaultValue="10", title="Number of taxa to report", identifier = "Taxa_number", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setTaxa_number(String data) {inputs.put("Taxa_number",data);}
@LiteralDataInput(abstrakt="Name of the parameter: Level. Choose the taxonomy level", allowedValues= {"GENUS","FAMILY","ORDER","CLASS"}, defaultValue="GENUS", title="Choose the taxonomy level", identifier = "Level", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLevel(String data) {inputs.put("Level",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="OBIS_SINGLE_SPECIES_DISTRIBUTION_PER_AREA", abstrakt="An algorithm producing a bar chart for the distribution of a species along a certain type of marine area (e.g. LME or MEOW)", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OBIS_SINGLE_SPECIES_DISTRIBUTION_PER_AREA", version = "1.1.0")
@Algorithm(statusSupported=true, title="OBIS_SINGLE_SPECIES_DISTRIBUTION_PER_AREA", abstrakt="An algorithm producing a bar chart for the distribution of a species along a certain type of marine area (e.g. LME or MEOW)", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OBIS_SINGLE_SPECIES_DISTRIBUTION_PER_AREA", version = "1.1.0")
public class OBIS_SINGLE_SPECIES_DISTRIBUTION_PER_AREA extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: Species. The species to analyze", defaultValue="", title="The species to analyze", identifier = "Species", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setSpecies(String data) {inputs.put("Species",data);}
@LiteralDataInput(abstrakt="Name of the parameter: Area. Choose the area type", allowedValues= {"LME","MEOW"}, defaultValue="LME", title="Choose the area type", identifier = "Area", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setArea(String data) {inputs.put("Area",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="OBIS_SPECIES_OBSERVATIONS_PER_LME_AREA", abstrakt="Algorithm returning most observed species in a specific years range (data collected from OBIS database).", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OBIS_SPECIES_OBSERVATIONS_PER_LME_AREA", version = "1.1.0")
@Algorithm(statusSupported=true, title="OBIS_SPECIES_OBSERVATIONS_PER_LME_AREA", abstrakt="Algorithm returning most observed species in a specific years range (data collected from OBIS database).", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OBIS_SPECIES_OBSERVATIONS_PER_LME_AREA", version = "1.1.0")
public class OBIS_SPECIES_OBSERVATIONS_PER_LME_AREA extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: Area_type. Choose the area name", allowedValues= {"AGULHAS CURRENT","ANTARCTICA","ARABIAN SEA","BALTIC SEA","BARENTS SEA","BAY OF BENGAL","BEAUFORT SEA","BENGUELA CURRENT","BLACK SEA","CALIFORNIA CURRENT","CANARY CURRENT","CARIBBEAN SEA","CELTIC-BISCAY SHELF","CHUKCHI SEA","EAST BERING SEA","EAST BRAZIL SHELF","EAST CENTRAL AUSTRALIAN SHELF","EAST CHINA SEA","EAST GREENLAND SHELF","EAST SIBERIAN SEA","FAROE PLATEAU","GUINEA CURRENT","GULF OF ALASKA","GULF OF CALIFORNIA","GULF OF MEXICO","GULF OF THAILAND","HUDSON BAY","HUMBOLDT CURRENT","IBERIAN COASTAL","ICELAND SHELF","INDONESIAN SEA","INSULAR PACIFIC-HAWAIIAN","KARA SEA","KUROSHIO CURRENT","LAPTEV SEA","MEDITERRANEAN SEA","NEWFOUNDLAND-LABRADOR SHELF","NEW ZEALAND SHELF","NORTH AUSTRALIAN SHELF","NORTH BRAZIL SHELF","NORTHEAST AUSTRALIAN SHELF","NORTHEAST U.S. CONTINENTAL SHELF","NORTH SEA","NORTHWEST AUSTRALIAN SHELF","NORWEGIAN SEA","OYASHIO CURRENT","PACIFIC CENTRAL-AMERICAN COASTAL","PATAGONIAN SHELF","RED SEA","SCOTIAN SHELF","SEA OF JAPAN","SEA OF OKHOTSK","SOMALI COASTAL CURRENT","SOUTH BRAZIL SHELF","SOUTH CHINA SEA","SOUTHEAST AUSTRALIAN SHELF","SOUTHEAST U.S. CONTINENTAL SHELF","SOUTHWEST AUSTRALIAN SHELF","SULU-CELEBES SEA","WEST BERING SEA","WEST CENTRAL AUSTRALIAN SHELF","WEST GREENLAND SHELF","YELLOW SEA"}, defaultValue="AGULHAS CURRENT", title="Choose the area name", identifier = "Area_type", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setArea_type(String data) {inputs.put("Area_type",data);}
@LiteralDataInput(abstrakt="Name of the parameter: Start_year. Starting year of the analysis", defaultValue="1800", title="Starting year of the analysis", identifier = "Start_year", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setStart_year(String data) {inputs.put("Start_year",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="OBIS_SPECIES_OBSERVATIONS_PER_YEAR", abstrakt="An algorithm producing the trend of the observations for a certain species in a certain years range.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OBIS_SPECIES_OBSERVATIONS_PER_YEAR", version = "1.1.0")
@Algorithm(statusSupported=true, title="OBIS_SPECIES_OBSERVATIONS_PER_YEAR", abstrakt="An algorithm producing the trend of the observations for a certain species in a certain years range.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OBIS_SPECIES_OBSERVATIONS_PER_YEAR", version = "1.1.0")
public class OBIS_SPECIES_OBSERVATIONS_PER_YEAR extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: Start_year. Starting year of the analysis", defaultValue="1800", title="Starting year of the analysis", identifier = "Start_year", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setStart_year(String data) {inputs.put("Start_year",data);}
@LiteralDataInput(abstrakt="Name of the parameter: End_year. Ending year of the analysis", defaultValue="2020", title="Ending year of the analysis", identifier = "End_year", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setEnd_year(String data) {inputs.put("End_year",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="OBIS_TAXA_OBSERVATIONS_PER_YEAR", abstrakt="Algorithm returning most observations taxonomy trend in a specific years range (with respect to the OBIS database)", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OBIS_TAXA_OBSERVATIONS_PER_YEAR", version = "1.1.0")
@Algorithm(statusSupported=true, title="OBIS_TAXA_OBSERVATIONS_PER_YEAR", abstrakt="Algorithm returning most observations taxonomy trend in a specific years range (with respect to the OBIS database)", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OBIS_TAXA_OBSERVATIONS_PER_YEAR", version = "1.1.0")
public class OBIS_TAXA_OBSERVATIONS_PER_YEAR extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: Level. Choose the taxonomy level", allowedValues= {"GENUS","FAMILY","ORDER","CLASS"}, defaultValue="GENUS", title="Choose the taxonomy level", identifier = "Level", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLevel(String data) {inputs.put("Level",data);}
@LiteralDataInput(abstrakt="Name of the parameter: Start_year. Starting year of the analysis", defaultValue="1800", title="Starting year of the analysis", identifier = "Start_year", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setStart_year(String data) {inputs.put("Start_year",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="OCCURRENCE_ENRICHMENT", abstrakt="An algorithm performing occurrences enrichment. Takes as input one table containing occurrence points for a set of species and a list of environmental layer, taken either from the e-infrastructure GeoNetwork (through the GeoExplorer application) or from direct HTTP links. Produces one table reporting the set of environmental values associated to the occurrence points.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OCCURRENCE_ENRICHMENT", version = "1.1.0")
@Algorithm(statusSupported=true, title="OCCURRENCE_ENRICHMENT", abstrakt="An algorithm performing occurrences enrichment. Takes as input one table containing occurrence points for a set of species and a list of environmental layer, taken either from the e-infrastructure GeoNetwork (through the GeoExplorer application) or from direct HTTP links. Produces one table reporting the set of environmental values associated to the occurrence points.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.OCCURRENCE_ENRICHMENT", version = "1.1.0")
public class OCCURRENCE_ENRICHMENT extends AbstractEcologicalEngineMapper implements ITransducer{
@ComplexDataInput(abstrakt="Name of the parameter: OccurrenceTable. A geospatial table containing occurrence records, following the template of the Species Products Discovery datasets [a http link to a table in UTF-8 encoding following this template: (OCCURRENCE_SPECIES) http://goo.gl/4ExuR5]", title="A geospatial table containing occurrence records, following the template of the Species Products Discovery datasets [a http link to a table in UTF-8 encoding following this template: (OCCURRENCE_SPECIES) http://goo.gl/4ExuR5]", maxOccurs=1, minOccurs=1, identifier = "OccurrenceTable", binding = GenericFileDataBinding.class) public void setOccurrenceTable(GenericFileData file) {inputs.put("OccurrenceTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: LongitudeColumn. The column containing longitude values [the name of a column from OccurrenceTable]", defaultValue="decimallongitude", title="The column containing longitude values [the name of a column from OccurrenceTable]", identifier = "LongitudeColumn", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLongitudeColumn(String data) {inputs.put("LongitudeColumn",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="PRESENCE_CELLS_GENERATION", abstrakt="An algorithm producing cells and features (HCAF) for a species containing presence points", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.PRESENCE_CELLS_GENERATION", version = "1.1.0")
@Algorithm(statusSupported=true, title="PRESENCE_CELLS_GENERATION", abstrakt="An algorithm producing cells and features (HCAF) for a species containing presence points", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.PRESENCE_CELLS_GENERATION", version = "1.1.0")
public class PRESENCE_CELLS_GENERATION extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: Table_Label. the name of the Filtered Hcaf", defaultValue="PresenceCells_", title="the name of the Filtered Hcaf", identifier = "Table_Label", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setTable_Label(String data) {inputs.put("Table_Label",data);}
@LiteralDataInput(abstrakt="Name of the parameter: Number_of_Points. Maximum number of points to take (-1 to take all)", defaultValue="-1", title="Maximum number of points to take (-1 to take all)", identifier = "Number_of_Points", maxOccurs=1, minOccurs=1, binding = LiteralIntBinding.class) public void setNumber_of_Points(Integer data) {inputs.put("Number_of_Points",""+data);}

View File

@ -1,28 +0,0 @@
package org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers;
import java.io.File;
import java.net.URL;
import java.io.IOException;
import java.io.InputStream;
import java.util.LinkedHashMap;
import java.io.StringWriter;
import org.apache.commons.io.IOUtils;
import org.apache.xmlbeans.XmlObject;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.bindings.*;
import org.n52.wps.algorithm.annotation.*;
import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="RASTER_DATA_PUBLISHER", abstrakt="This algorithm publishes a raster file as a maps or datasets in the e-Infrastructure. NetCDF-CF files are encouraged, as WMS and WCS maps will be produced using this format. For other types of files (GeoTiffs, ASC etc.) only the raw datasets will be published. The resulting map or dataset will be accessible via the VRE GeoExplorer by the VRE participants.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.RASTER_DATA_PUBLISHER", version = "1.1.0")
public class RASTER_DATA_PUBLISHER extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: DatasetTitle. Title of the geospatial dataset to be shown on GeoExplorer", defaultValue="Generic Raster Layer", title="Title of the geospatial dataset to be shown on GeoExplorer", identifier = "DatasetTitle", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setDatasetTitle(String data) {inputs.put("DatasetTitle",data);}
@LiteralDataInput(abstrakt="Name of the parameter: DatasetAbstract. Abstract defining the content, the references and usage policies", defaultValue="Abstract", title="Abstract defining the content, the references and usage policies", identifier = "DatasetAbstract", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setDatasetAbstract(String data) {inputs.put("DatasetAbstract",data);}
@LiteralDataInput(abstrakt="Name of the parameter: InnerLayerName. Name of the inner layer or band to be published as a Map (ignored for non-NetCDF files)", defaultValue="band_1", title="Name of the inner layer or band to be published as a Map (ignored for non-NetCDF files)", identifier = "InnerLayerName", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setInnerLayerName(String data) {inputs.put("InnerLayerName",data);}
@LiteralDataInput(abstrakt="Name of the parameter: FileNameOnInfra. Name of the file that will be created in the infrastructures", defaultValue="raster-1452703434486.nc", title="Name of the file that will be created in the infrastructures", identifier = "FileNameOnInfra", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setFileNameOnInfra(String data) {inputs.put("FileNameOnInfra",data);}
@ComplexDataInput(abstrakt="Name of the parameter: RasterFile. Raster dataset to process", title="Raster dataset to process", maxOccurs=1, minOccurs=1, identifier = "RasterFile", binding = D4ScienceDataInputBinding.class) public void setRasterFile(GenericFileData file) {inputs.put("RasterFile",file);}
@LiteralDataInput(abstrakt="Name of the parameter: Topics. Topics to be attached to the published dataset. E.g. Biodiversity, D4Science, Environment, Weather [a sequence of values separated by | ] (format: String)", defaultValue="", title="Topics to be attached to the published dataset. E.g. Biodiversity, D4Science, Environment, Weather [a sequence of values separated by | ] (format: String)", identifier = "Topics", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setTopics(String data) {inputs.put("Topics",data);}
@LiteralDataInput(abstrakt="Name of the parameter: SpatialResolution. The resolution of the layer. For NetCDF file this is automatically estimated by data (leave -1)", defaultValue="-1d", title="The resolution of the layer. For NetCDF file this is automatically estimated by data (leave -1)", identifier = "SpatialResolution", maxOccurs=1, minOccurs=1, binding = LiteralDoubleBinding.class) public void setSpatialResolution(Double data) {inputs.put("SpatialResolution",""+data);}
@ComplexDataOutput(abstrakt="Output that is not predetermined", title="NonDeterministicOutput", identifier = "non_deterministic_output", binding = GenericXMLDataBinding.class)
public XmlObject getNon_deterministic_output() {return (XmlObject) outputs.get("non_deterministic_output");}
@Execute public void run() throws Exception { super.run(); } }

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="SGVM_INTERPOLATION", abstrakt="An interpolation method relying on the implementation by the Study Group on VMS (SGVMS). The method uses two interpolation approached to simulate vessels points at a certain temporal resolution. The input is a file in TACSAT format uploaded on the Statistical Manager. The output is another TACSAT file containing interpolated points.The underlying R code has been extracted from the SGVM VMSTools framework. This algorithm comes after a feasibility study (http://goo.gl/risQre) which clarifies the features an e-Infrastructure adds to the original scripts. Limitation: the input will be processed up to 10000 vessels trajectory points. Credits: Hintzen, N. T., Bastardie, F., Beare, D., Piet, G. J., Ulrich, C., Deporte, N., Egekvist, J., et al. 2012. VMStools: Open-source software for the processing, analysis and visualisation of fisheries logbook and VMS data. Fisheries Research, 115-116: 31-43. Hintzen, N. T., Piet, G. J., and Brunel, T. 2010. Improved estimation of trawling tracks using cubic Hermite spline interpolation of position registration data. Fisheries Research, 101: 108-115. VMStools, available as an add-on package for R. Documentation available at https://code.google.com/p/vmstools/. Build versions of VMStools for Window, Mac, Linux available at https://docs.google.com/. Authors: Niels T. Hintzen, Doug Beare", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SGVM_INTERPOLATION", version = "1.1.0")
@Algorithm(statusSupported=true, title="SGVM_INTERPOLATION", abstrakt="An interpolation method relying on the implementation by the Study Group on VMS (SGVMS). The method uses two interpolation approached to simulate vessels points at a certain temporal resolution. The input is a file in TACSAT format uploaded on the Statistical Manager. The output is another TACSAT file containing interpolated points.The underlying R code has been extracted from the SGVM VMSTools framework. This algorithm comes after a feasibility study (http://goo.gl/risQre) which clarifies the features an e-Infrastructure adds to the original scripts. Limitation: the input will be processed up to 10000 vessels trajectory points. Credits: Hintzen, N. T., Bastardie, F., Beare, D., Piet, G. J., Ulrich, C., Deporte, N., Egekvist, J., et al. 2012. VMStools: Open-source software for the processing, analysis and visualisation of fisheries logbook and VMS data. Fisheries Research, 115-116: 31-43. Hintzen, N. T., Piet, G. J., and Brunel, T. 2010. Improved estimation of trawling tracks using cubic Hermite spline interpolation of position registration data. Fisheries Research, 101: 108-115. VMStools, available as an add-on package for R. Documentation available at https://code.google.com/p/vmstools/. Build versions of VMStools for Window, Mac, Linux available at https://docs.google.com/. Authors: Niels T. Hintzen, Doug Beare", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SGVM_INTERPOLATION", version = "1.1.0")
public class SGVM_INTERPOLATION extends AbstractEcologicalEngineMapper implements ITransducer{
@ComplexDataInput(abstrakt="Name of the parameter: InputFile. Input file in TACSAT format. E.g. http://goo.gl/i16kPw", title="Input file in TACSAT format. E.g. http://goo.gl/i16kPw", maxOccurs=1, minOccurs=1, identifier = "InputFile", binding = D4ScienceDataInputBinding.class) public void setInputFile(GenericFileData file) {inputs.put("InputFile",file);}
@LiteralDataInput(abstrakt="Name of the parameter: npoints. The number of pings or positions required between each real or actual vessel position or ping", defaultValue="10", title="The number of pings or positions required between each real or actual vessel position or ping", identifier = "npoints", maxOccurs=1, minOccurs=1, binding = LiteralIntBinding.class) public void setnpoints(Integer data) {inputs.put("npoints",""+data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="SUBMITQUERY", abstrakt="Algorithm that allows to submit a query", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SUBMITQUERY", version = "1.1.0")
@Algorithm(statusSupported=true, title="SUBMITQUERY", abstrakt="Algorithm that allows to submit a query", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.SUBMITQUERY", version = "1.1.0")
public class SUBMITQUERY extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: ResourceName. The name of the resource", defaultValue="", title="The name of the resource", identifier = "ResourceName", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setResourceName(String data) {inputs.put("ResourceName",data);}
@LiteralDataInput(abstrakt="Name of the parameter: DatabaseName. The name of the database", defaultValue="", title="The name of the database", identifier = "DatabaseName", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setDatabaseName(String data) {inputs.put("DatabaseName",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="TIMEEXTRACTION", abstrakt="An algorithm to extract a time series of values associated to a geospatial features repository (e.g. NETCDF, ASC, GeoTiff files etc. ). The algorithm analyses the time series and automatically searches for hidden periodicities. It produces one chart of the time series, one table containing the time series values and possibly the spectrogram.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIMEEXTRACTION", version = "1.1.0")
@Algorithm(statusSupported=true, title="TIMEEXTRACTION", abstrakt="An algorithm to extract a time series of values associated to a geospatial features repository (e.g. NETCDF, ASC, GeoTiff files etc. ). The algorithm analyses the time series and automatically searches for hidden periodicities. It produces one chart of the time series, one table containing the time series values and possibly the spectrogram.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIMEEXTRACTION", version = "1.1.0")
public class TIMEEXTRACTION extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: Layer. Layer Title or UUID or HTTP link. E.g. the title or the UUID (preferred) of a layer indexed in the e-Infrastructure on GeoNetwork - You can retrieve it from GeoExplorer. Otherwise you can supply the direct HTTP link of the layer. The format will be guessed from the link. The default is GeoTiff. Supports several standards (NETCDF, WFS, WCS ASC, GeoTiff )", defaultValue="", title="Layer Title or UUID or HTTP link. E.g. the title or the UUID (preferred) of a layer indexed in the e-Infrastructure on GeoNetwork - You can retrieve it from GeoExplorer. Otherwise you can supply the direct HTTP link of the layer. The format will be guessed from the link. The default is GeoTiff. Supports several standards (NETCDF, WFS, WCS ASC, GeoTiff )", identifier = "Layer", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLayer(String data) {inputs.put("Layer",data);}
@LiteralDataInput(abstrakt="Name of the parameter: OutputTableLabel. The name of the table to produce", defaultValue="extr_", title="The name of the table to produce", identifier = "OutputTableLabel", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setOutputTableLabel(String data) {inputs.put("OutputTableLabel",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="TIMEEXTRACTION_TABLE", abstrakt="An algorithm to extract a time series of values associated to a table containing geospatial information. The algorithm analyses the time series and automatically searches for hidden periodicities. It produces one chart of the time series, one table containing the time series values and possibly the spectrogram.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIMEEXTRACTION_TABLE", version = "1.1.0")
@Algorithm(statusSupported=true, title="TIMEEXTRACTION_TABLE", abstrakt="An algorithm to extract a time series of values associated to a table containing geospatial information. The algorithm analyses the time series and automatically searches for hidden periodicities. It produces one chart of the time series, one table containing the time series values and possibly the spectrogram.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIMEEXTRACTION_TABLE", version = "1.1.0")
public class TIMEEXTRACTION_TABLE extends AbstractEcologicalEngineMapper implements ITransducer{
@ComplexDataInput(abstrakt="Name of the parameter: geoReferencedTableName. A geospatial table containing at least x,y information [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="A geospatial table containing at least x,y information [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "geoReferencedTableName", binding = GenericFileDataBinding.class) public void setgeoReferencedTableName(GenericFileData file) {inputs.put("geoReferencedTableName",file);}
@LiteralDataInput(abstrakt="Name of the parameter: xColumn. The column containing x (longitude) information [the name of a column from geoReferencedTableName]", defaultValue="x", title="The column containing x (longitude) information [the name of a column from geoReferencedTableName]", identifier = "xColumn", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setxColumn(String data) {inputs.put("xColumn",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="TIME_GEO_CHART", abstrakt="An algorithm producing an animated gif displaying quantities as colors in time. The color indicates the sum of the values recorded in a country.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIME_GEO_CHART", version = "1.1.0")
@Algorithm(statusSupported=true, title="TIME_GEO_CHART", abstrakt="An algorithm producing an animated gif displaying quantities as colors in time. The color indicates the sum of the values recorded in a country.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIME_GEO_CHART", version = "1.1.0")
public class TIME_GEO_CHART extends AbstractEcologicalEngineMapper implements ITransducer{
@ComplexDataInput(abstrakt="Name of the parameter: InputTable. The input table [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="The input table [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "InputTable", binding = GenericFileDataBinding.class) public void setInputTable(GenericFileData file) {inputs.put("InputTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: Longitude. The column containing longitude decimal values [the name of a column from InputTable]", defaultValue="long", title="The column containing longitude decimal values [the name of a column from InputTable]", identifier = "Longitude", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLongitude(String data) {inputs.put("Longitude",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="TIME_SERIES_ANALYSIS", abstrakt="An algorithms applying signal processing to a non uniform time series. A maximum of 10000 distinct points in time is allowed to be processed. The process uniformly samples the series, then extracts hidden periodicities and signal properties. The sampling period is the shortest time difference between two points. Finally, by using Caterpillar-SSA the algorithm forecasts the Time Series. The output shows the detected periodicity, the forecasted signal and the spectrogram.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIME_SERIES_ANALYSIS", version = "1.1.0")
@Algorithm(statusSupported=true, title="TIME_SERIES_ANALYSIS", abstrakt="An algorithms applying signal processing to a non uniform time series. A maximum of 10000 distinct points in time is allowed to be processed. The process uniformly samples the series, then extracts hidden periodicities and signal properties. The sampling period is the shortest time difference between two points. Finally, by using Caterpillar-SSA the algorithm forecasts the Time Series. The output shows the detected periodicity, the forecasted signal and the spectrogram.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIME_SERIES_ANALYSIS", version = "1.1.0")
public class TIME_SERIES_ANALYSIS extends AbstractEcologicalEngineMapper implements ITransducer{
@ComplexDataInput(abstrakt="Name of the parameter: TimeSeriesTable. The table containing the time series [a http link to a table in UTF-8 encoding following this template: (TIMESERIES) http://goo.gl/DoW6fg]", title="The table containing the time series [a http link to a table in UTF-8 encoding following this template: (TIMESERIES) http://goo.gl/DoW6fg]", maxOccurs=1, minOccurs=1, identifier = "TimeSeriesTable", binding = GenericFileDataBinding.class) public void setTimeSeriesTable(GenericFileData file) {inputs.put("TimeSeriesTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: ValueColum. The column containing the values of the time series [the name of a column from TimeSeriesTable]", defaultValue="values", title="The column containing the values of the time series [the name of a column from TimeSeriesTable]", identifier = "ValueColum", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setValueColum(String data) {inputs.put("ValueColum",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="TIME_SERIES_CHARTS", abstrakt="An algorithm producing time series charts of attributes vs. quantities. Charts are displayed per quantity column and superposing quantities are summed.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIME_SERIES_CHARTS", version = "1.1.0")
@Algorithm(statusSupported=true, title="TIME_SERIES_CHARTS", abstrakt="An algorithm producing time series charts of attributes vs. quantities. Charts are displayed per quantity column and superposing quantities are summed.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.TIME_SERIES_CHARTS", version = "1.1.0")
public class TIME_SERIES_CHARTS extends AbstractEcologicalEngineMapper implements ITransducer{
@ComplexDataInput(abstrakt="Name of the parameter: InputTable. The input table [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="The input table [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "InputTable", binding = GenericFileDataBinding.class) public void setInputTable(GenericFileData file) {inputs.put("InputTable",file);}
@LiteralDataInput(abstrakt="Name of the parameter: Attributes. The dimensions to consider in the charts [a sequence of names of columns from InputTable separated by | ]", defaultValue="", title="The dimensions to consider in the charts [a sequence of names of columns from InputTable separated by | ]", identifier = "Attributes", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setAttributes(String data) {inputs.put("Attributes",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="XYEXTRACTOR", abstrakt="An algorithm to extract values associated to an environmental feature repository (e.g. NETCDF, ASC, GeoTiff files etc. ). A grid of points at a certain resolution is specified by the user and values are associated to the points from the environmental repository. It accepts as one geospatial repository ID (via their UUIDs in the infrastructure spatial data repository - recoverable through the Geoexplorer portlet) or a direct link to a file and the specification about time and space. The algorithm produces one table containing the values associated to the selected bounding box.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.XYEXTRACTOR", version = "1.1.0")
@Algorithm(statusSupported=true, title="XYEXTRACTOR", abstrakt="An algorithm to extract values associated to an environmental feature repository (e.g. NETCDF, ASC, GeoTiff files etc. ). A grid of points at a certain resolution is specified by the user and values are associated to the points from the environmental repository. It accepts as one geospatial repository ID (via their UUIDs in the infrastructure spatial data repository - recoverable through the Geoexplorer portlet) or a direct link to a file and the specification about time and space. The algorithm produces one table containing the values associated to the selected bounding box.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.XYEXTRACTOR", version = "1.1.0")
public class XYEXTRACTOR extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: Layer. Layer Title or UUID or HTTP link. E.g. the title or the UUID (preferred) of a layer indexed in the e-Infrastructure on GeoNetwork - You can retrieve it from GeoExplorer. Otherwise you can supply the direct HTTP link of the layer. The format will be guessed from the link. The default is GeoTiff. Supports several standards (NETCDF, WFS, WCS ASC, GeoTiff )", defaultValue="", title="Layer Title or UUID or HTTP link. E.g. the title or the UUID (preferred) of a layer indexed in the e-Infrastructure on GeoNetwork - You can retrieve it from GeoExplorer. Otherwise you can supply the direct HTTP link of the layer. The format will be guessed from the link. The default is GeoTiff. Supports several standards (NETCDF, WFS, WCS ASC, GeoTiff )", identifier = "Layer", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLayer(String data) {inputs.put("Layer",data);}
@LiteralDataInput(abstrakt="Name of the parameter: BBox_LowerLeftLat. Lower Left Latitute of the Bounding Box", defaultValue="-60", title="Lower Left Latitute of the Bounding Box", identifier = "BBox_LowerLeftLat", maxOccurs=1, minOccurs=1, binding = LiteralDoubleBinding.class) public void setBBox_LowerLeftLat(Double data) {inputs.put("BBox_LowerLeftLat",""+data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="XYEXTRACTOR_TABLE", abstrakt="An algorithm to extract values associated to a table containing geospatial features (e.g. Vessel Routes, Species distribution maps etc. ). A grid of points at a certain resolution is specified by the user and values are associated to the points from the environmental repository. It accepts as one geospatial table and the specification about time and space. The algorithm produces one table containing the values associated to the selected bounding box.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.XYEXTRACTOR_TABLE", version = "1.1.0")
@Algorithm(statusSupported=true, title="XYEXTRACTOR_TABLE", abstrakt="An algorithm to extract values associated to a table containing geospatial features (e.g. Vessel Routes, Species distribution maps etc. ). A grid of points at a certain resolution is specified by the user and values are associated to the points from the environmental repository. It accepts as one geospatial table and the specification about time and space. The algorithm produces one table containing the values associated to the selected bounding box.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.XYEXTRACTOR_TABLE", version = "1.1.0")
public class XYEXTRACTOR_TABLE extends AbstractEcologicalEngineMapper implements ITransducer{
@ComplexDataInput(abstrakt="Name of the parameter: geoReferencedTableName. A geospatial table containing at least x,y information [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="A geospatial table containing at least x,y information [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "geoReferencedTableName", binding = GenericFileDataBinding.class) public void setgeoReferencedTableName(GenericFileData file) {inputs.put("geoReferencedTableName",file);}
@LiteralDataInput(abstrakt="Name of the parameter: xColumn. The column containing x (longitude) information [the name of a column from geoReferencedTableName]", defaultValue="x", title="The column containing x (longitude) information [the name of a column from geoReferencedTableName]", identifier = "xColumn", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setxColumn(String data) {inputs.put("xColumn",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="ZEXTRACTION", abstrakt="An algorithm to extract the Z values from a geospatial features repository (e.g. NETCDF, ASC, GeoTiff files etc. ). The algorithm analyses the repository and automatically extracts the Z values according to the resolution wanted by the user. It produces one chart of the Z values and one table containing the values.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ZEXTRACTION", version = "1.1.0")
@Algorithm(statusSupported=true, title="ZEXTRACTION", abstrakt="An algorithm to extract the Z values from a geospatial features repository (e.g. NETCDF, ASC, GeoTiff files etc. ). The algorithm analyses the repository and automatically extracts the Z values according to the resolution wanted by the user. It produces one chart of the Z values and one table containing the values.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ZEXTRACTION", version = "1.1.0")
public class ZEXTRACTION extends AbstractEcologicalEngineMapper implements ITransducer{
@LiteralDataInput(abstrakt="Name of the parameter: Layer. Layer Title or UUID or HTTP link. E.g. the title or the UUID (preferred) of a layer indexed in the e-Infrastructure on GeoNetwork - You can retrieve it from GeoExplorer. Otherwise you can supply the direct HTTP link of the layer. The format will be guessed from the link. The default is GeoTiff. Supports several standards (NETCDF, WFS, WCS ASC, GeoTiff )", defaultValue="", title="Layer Title or UUID or HTTP link. E.g. the title or the UUID (preferred) of a layer indexed in the e-Infrastructure on GeoNetwork - You can retrieve it from GeoExplorer. Otherwise you can supply the direct HTTP link of the layer. The format will be guessed from the link. The default is GeoTiff. Supports several standards (NETCDF, WFS, WCS ASC, GeoTiff )", identifier = "Layer", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setLayer(String data) {inputs.put("Layer",data);}
@LiteralDataInput(abstrakt="Name of the parameter: OutputTableLabel. The name of the table to produce", defaultValue="extr_", title="The name of the table to produce", identifier = "OutputTableLabel", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setOutputTableLabel(String data) {inputs.put("OutputTableLabel",data);}

View File

@ -13,7 +13,7 @@ import org.n52.wps.io.data.*;
import org.n52.wps.io.data.binding.complex.*;
import org.n52.wps.io.data.binding.literal.*;
import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;import org.n52.wps.server.*;import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.*;
@Algorithm(statusSupported=false, title="ZEXTRACTION_TABLE", abstrakt="An algorithm to extract a time series of values associated to a table containing geospatial information. The algorithm analyses the time series and automatically searches for hidden periodicities. It produces one chart of the time series, one table containing the time series values and possibly the spectrogram.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ZEXTRACTION_TABLE", version = "1.1.0")
@Algorithm(statusSupported=true, title="ZEXTRACTION_TABLE", abstrakt="An algorithm to extract a time series of values associated to a table containing geospatial information. The algorithm analyses the time series and automatically searches for hidden periodicities. It produces one chart of the time series, one table containing the time series values and possibly the spectrogram.", identifier="org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.transducerers.ZEXTRACTION_TABLE", version = "1.1.0")
public class ZEXTRACTION_TABLE extends AbstractEcologicalEngineMapper implements ITransducer{
@ComplexDataInput(abstrakt="Name of the parameter: geoReferencedTableName. A geospatial table containing at least x,y information [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", title="A geospatial table containing at least x,y information [a http link to a table in UTF-8 encoding following this template: (GENERIC) A generic comma separated csv file in UTF-8 encoding]", maxOccurs=1, minOccurs=1, identifier = "geoReferencedTableName", binding = GenericFileDataBinding.class) public void setgeoReferencedTableName(GenericFileData file) {inputs.put("geoReferencedTableName",file);}
@LiteralDataInput(abstrakt="Name of the parameter: xColumn. The column containing x (longitude) information [the name of a column from geoReferencedTableName]", defaultValue="x", title="The column containing x (longitude) information [the name of a column from geoReferencedTableName]", identifier = "xColumn", maxOccurs=1, minOccurs=1, binding = LiteralStringBinding.class) public void setxColumn(String data) {inputs.put("xColumn",data);}

View File

@ -27,6 +27,9 @@ import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.I
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.IGenerator;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.IModeller;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mappedclasses.ITransducer;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace.ComputationData;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace.DataspaceManager;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace.StoredData;
import org.hibernate.SessionFactory;
import org.n52.wps.algorithm.annotation.Execute;
import org.n52.wps.server.AbstractAnnotatedAlgorithm;
@ -67,28 +70,32 @@ import org.slf4j.LoggerFactory;
// TODO include WS links in the output
// WONTFIX manage Gis Links
// WONTFIX Manage the order of the inputs in the WPS description (currently
// TODO manage status
// not
// supported by 52 N)
public class AbstractEcologicalEngineMapper extends AbstractAnnotatedAlgorithm {
/**
* Deploying procedure: 1 - modify configuration files 2 - modify resource
* file: resources/templates/setup.cfg 3 - generate classes with
* ClassGenerator 4 - add new classes in the wps_config.xml on the wps web
* app config folder 5 - produce the Jar file of this project 6 - copy the
* jar file in the lib folder of the wps web app
* change the server parameters in the wps_config.xml file
* Deploying procedure: 1 - modify configuration files 2 - modify resource file: resources/templates/setup.cfg 3 - generate classes with ClassGenerator 4 - add new classes in the wps_config.xml on the wps web app config folder 5 - produce the Jar file of this project 6 - copy the jar file in the lib folder of the wps web app change the server parameters in the wps_config.xml file
*/
// inputs and outputs
public LinkedHashMap<String, Object> inputs = new LinkedHashMap<String, Object>();
public LinkedHashMap<String, Object> outputs = new LinkedHashMap<String, Object>();
public LinkedHashMap<String, Long> times = new LinkedHashMap<String, Long>();
public String startTime;
public String endTime;
public static HashMap<String, DatabaseInfo> databaseParametersMemoryCache = new HashMap<String, DatabaseInfo>();
public static HashMap<String, String> runningcomputations = new HashMap<String, String>();
ComputationalAgent agent;
public String wpsExternalID = null;
ComputationData currentComputation;
public void setWpsExternalID(String wpsExternalID) {
this.wpsExternalID = wpsExternalID;
}
public static synchronized void addComputation(String session, String user) {
runningcomputations.put(session, user);
}
@ -210,9 +217,8 @@ public class AbstractEcologicalEngineMapper extends AbstractAnnotatedAlgorithm {
} catch (Exception e) {
AnalysisLogger.getLogger().debug("Could not drop Temporary Table: " + table);
}
}
else
AnalysisLogger.getLogger().debug("Could not drop Temporary Table: " + table+" table is null");
} else
AnalysisLogger.getLogger().debug("Could not drop Temporary Table: " + table + " table is null");
}
} catch (Exception e) {
e.printStackTrace();
@ -223,52 +229,99 @@ public class AbstractEcologicalEngineMapper extends AbstractAnnotatedAlgorithm {
}
}
public void deleteGeneratedFiles(List<File> generatedFiles) throws Exception {
public static void deleteGeneratedFiles(List<File> generatedFiles) throws Exception {
if (generatedFiles != null) {
for (File file : generatedFiles) {
if (file.exists()) {
AnalysisLogger.getLogger().debug("Deleting File " + file.getAbsolutePath());
AnalysisLogger.getLogger().debug("Deleting File Check " + file.delete());
}
else
} else
AnalysisLogger.getLogger().debug("File does not exist " + file.getAbsolutePath());
}
}
}
public void manageUserToken(){
public void manageUserToken() {
String scope = null;
String username = null;
//DONE get scope and username from SmartGears
//get scope from SmartGears
// DONE get scope and username from SmartGears
// get scope from SmartGears
TokenManager tokenm = new TokenManager();
tokenm.getCredentials();
scope = tokenm.getScope();
username = tokenm.getUserName();
//set parameters
// set parameters
inputs.put(ConfigurationManager.scopeParameter, scope);
inputs.put(ConfigurationManager.usernameParameter, username);
}
long statusInterrupt = 0;
float previousStatus = 0;
public void updateStatus(float status) {
if (agent != null) {
long stream = org.n52.wps.server.database.DatabaseFactory.getDatabase().getContentLengthForStoreResponse(wpsExternalID);
//AnalysisLogger.getLogger().debug("STATUS bytes " + stream + " interrupt bytes " + statusInterrupt);
if (statusInterrupt == 0 || statusInterrupt > stream - 3) {
statusInterrupt = stream;
} else {
AnalysisLogger.getLogger().debug("STATUS INTERRUPTED!");
agent.shutdown();
statusInterrupt = -1;
agent = null;
status = -1f;
System.gc();
}
if (status!=previousStatus){
AnalysisLogger.getLogger().debug("STATUS update to:" + status);
previousStatus=status;
super.update(new Integer((int) status));
updateComputationOnWS(status, null);
}
}
}
public void updateComputationOnWS(float status, String exception) {
if (currentComputation != null) {
currentComputation.setStatus(""+status);
if (exception!=null && exception.length()>0)
currentComputation.setException(exception);
DataspaceManager manager = new DataspaceManager(config, currentComputation, null, null, null);
try {
manager.writeRunningComputationData();
} catch (Exception ez) {
AnalysisLogger.getLogger().debug("Dataspace->Status updater->Impossible to write computation information on the Workspace");
AnalysisLogger.getLogger().debug(ez);
}
}
}
@Execute
public void run() throws Exception {
String algorithm = "";
List<String> generatedInputTables = null;
List<String> generatedOutputTables = null;
List<File> generatedFiles = null;
String computationSession = UUID.randomUUID().toString();
String date = new java.text.SimpleDateFormat("dd_MM_yyyy_HH:mm:ss").format(System.currentTimeMillis());
String computationSession = this.getAlgorithmClass().getSimpleName() + "_ID_" + UUID.randomUUID().toString();
if (wpsExternalID != null) {
AnalysisLogger.getLogger().info("Using wps External ID " + wpsExternalID);
computationSession = this.getAlgorithmClass().getSimpleName() + "_ID_" + wpsExternalID;
} else
AnalysisLogger.getLogger().info("Wps External ID not set");
try {
// wait for server resources to be available
startTime = new java.text.SimpleDateFormat("dd/MM/yyyy HH:mm:ss").format(System.currentTimeMillis());
time("WPS Algorithm objects Initialization: Session " + computationSession);
ch.qos.logback.classic.Logger root = (ch.qos.logback.classic.Logger) LoggerFactory.getLogger(ch.qos.logback.classic.Logger.ROOT_LOGGER_NAME);
root.setLevel(ch.qos.logback.classic.Level.OFF);
// set the configuration environment for this algorithm
ConfigurationManager configManager = new ConfigurationManager(); //initializes parameters from file
ConfigurationManager configManager = new ConfigurationManager(); // initializes parameters from file
manageUserToken();
configManager.configAlgorithmEnvironment(inputs);
configManager.setComputationId(computationSession);
config = configManager.getConfig();
AnalysisLogger.getLogger().info("Configured algorithm with session " + computationSession);
time("Configuration");
@ -296,7 +349,7 @@ public class AbstractEcologicalEngineMapper extends AbstractAnnotatedAlgorithm {
} else
AnalysisLogger.getLogger().info("Using cached database information: " + supportDatabaseInfo);
AnalysisLogger.getLogger().info("Retrieved Central Database: " + supportDatabaseInfo);
InputsManager inputsManager = new InputsManager(inputs, config);
InputsManager inputsManager = new InputsManager(inputs, config, computationSession);
inputsManager.configSupportDatabaseParameters(supportDatabaseInfo);
time("Central database information retrieval");
// retrieve the algorithm to execute
@ -313,13 +366,17 @@ public class AbstractEcologicalEngineMapper extends AbstractAnnotatedAlgorithm {
// merging wps with ecological engine parameters - modifies the
// config
AnalysisLogger.getLogger().info("6 - Translating WPS Inputs into Ecological Engine Inputs");
// build computation Data
currentComputation = new ComputationData(config.getTaskID(), config.getAgent(), "", "", startTime, "-", "0", config.getTaskID(), configManager.getUsername());
inputsManager.mergeWpsAndEcologicalInputs(supportDatabaseInfo);
generatedInputTables = inputsManager.getGeneratedTables();
generatedFiles = inputsManager.getGeneratedInputFiles();
time("Setup and download of input parameters with tables creation");
// retrieve the computational agent given the configuration
AnalysisLogger.getLogger().info("7 - Retrieving Ecological Engine algorithm");
ComputationalAgent agent = getComputationalAgent(algorithm);
agent = getComputationalAgent(algorithm);
currentComputation.setOperatorDescription(agent.getDescription());
currentComputation.setInfrastructure(agent.getInfrastructure().name());
AnalysisLogger.getLogger().debug("Found Ecological Engine Algorithm: " + agent);
time("Algorithm initialization");
// take the a priori declared wps output
@ -332,7 +389,8 @@ public class AbstractEcologicalEngineMapper extends AbstractAnnotatedAlgorithm {
}
time("A priori output retrieval");
// run the computation
AnalysisLogger.getLogger().info("9 - Running the computation");
AnalysisLogger.getLogger().info("9 - Running the computation and updater");
runStatusUpdater();
agent.init();
agent.compute();
AnalysisLogger.getLogger().info("The computation has finished. Retrieving output");
@ -344,7 +402,7 @@ public class AbstractEcologicalEngineMapper extends AbstractAnnotatedAlgorithm {
time("Output retrieval");
// merge the posterior and prior outputs
AnalysisLogger.getLogger().info("11 - Merging the a priori and a posteriori output");
OutputsManager outputmanager = new OutputsManager(config,computationSession);
OutputsManager outputmanager = new OutputsManager(config, computationSession);
outputs = outputmanager.createOutput(prioroutput, postoutput);
// in the case of storage usage, delete all local files
generatedOutputTables = outputmanager.getGeneratedTables();
@ -353,32 +411,78 @@ public class AbstractEcologicalEngineMapper extends AbstractAnnotatedAlgorithm {
time("Output preparation for WPS document (using storage)");
} else
time("Output preparation for WPS document (no storage manager)");
outputmanager.shutdown();
// delete all temporary tables
AnalysisLogger.getLogger().info("12 - Deleting possible generated temporary tables");
AnalysisLogger.getLogger().debug("Final Computation Output: " + outputs);
AnalysisLogger.getLogger().info("12 - Deleting possible generated temporary tables");
AnalysisLogger.getLogger().debug("All done");
endTime = new java.text.SimpleDateFormat("dd/MM/yyyy HH:mm:ss").format(System.currentTimeMillis());
saveComputationOnWS(inputsManager.getProvenanceData(), outputmanager.getProvenanceData(), agent, generatedFiles);
} catch (Exception e) {
AnalysisLogger.getLogger().debug("Error in Algorithm execution: " + algorithm);
AnalysisLogger.getLogger().debug(e);
e.printStackTrace();
updateComputationOnWS(-2,e.getMessage());
throw e;
} finally {
AnalysisLogger.getLogger().debug("Deleting Input Tables");
deleteTemporaryTables(generatedInputTables);
AnalysisLogger.getLogger().debug("Deleting Output Tables");
deleteTemporaryTables(generatedOutputTables);
AnalysisLogger.getLogger().debug("Deleting Files");
deleteGeneratedFiles(generatedFiles);
// AnalysisLogger.getLogger().debug("Deleting Files");
// deleteGeneratedFiles(generatedFiles);
// remove this computation from the list
removeComputation(computationSession);
// cleanResources();
time("Cleaning of resources");
displayTimes();
cleanResources();
}
}
public class StatusUpdater implements Runnable {
@Override
public void run() {
while (agent != null && agent.getStatus() < 100) {
try {
updateStatus(agent.getStatus());
Thread.sleep(10000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
AnalysisLogger.getLogger().info("Status updater terminated");
}
}
private void runStatusUpdater() {
StatusUpdater updater = new StatusUpdater();
Thread t = new Thread(updater);
t.start();
AnalysisLogger.getLogger().debug("Provenance manager running");
}
private void saveComputationOnWS(List<StoredData> inputData, List<StoredData> outputData, ComputationalAgent agent, List<File> generatedFiles) {
AnalysisLogger.getLogger().debug("Provenance manager started");
ComputationData computation = new ComputationData(config.getTaskID(), config.getAgent(), agent.getDescription(), agent.getInfrastructure().name(), startTime, endTime, "100", config.getTaskID(),config.getParam(ConfigurationManager.serviceUserNameParameterVariable));
// post on WS
DataspaceManager manager = new DataspaceManager(config, computation, inputData, outputData, generatedFiles);
Thread t = new Thread(manager);
t.start();
AnalysisLogger.getLogger().debug("Provenance manager running");
}
private void time(String label) {
times.put(label, System.currentTimeMillis());
}
@ -403,6 +507,7 @@ public class AbstractEcologicalEngineMapper extends AbstractAnnotatedAlgorithm {
private void cleanResources() {
times = null;
agent = null;
System.gc();
}

View File

@ -74,6 +74,10 @@ public class ConfigurationManager {
return config;
}
public void setComputationId(String computationId){
config.setTaskID(computationId);
}
public void configAlgorithmEnvironment(LinkedHashMap<String, Object> inputs) throws Exception {
// set config container
config = new AlgorithmConfiguration();

View File

@ -20,8 +20,13 @@ import java.util.List;
import java.util.UUID;
import org.apache.commons.io.IOUtils;
import org.gcube.common.scope.api.ScopeProvider;
import org.gcube.contentmanagement.blobstorage.service.IClient;
import org.gcube.contentmanagement.lexicalmatcher.utils.AnalysisLogger;
import org.gcube.contentmanagement.lexicalmatcher.utils.FileTools;
import org.gcube.contentmanager.storageclient.wrapper.AccessType;
import org.gcube.contentmanager.storageclient.wrapper.MemoryType;
import org.gcube.contentmanager.storageclient.wrapper.StorageClient;
import org.gcube.dataanalysis.ecoengine.configuration.AlgorithmConfiguration;
import org.gcube.dataanalysis.ecoengine.datatypes.DatabaseType;
import org.gcube.dataanalysis.ecoengine.datatypes.InputTable;
@ -35,6 +40,9 @@ import org.gcube.dataanalysis.ecoengine.datatypes.enumtypes.ServiceParameters;
import org.gcube.dataanalysis.ecoengine.utils.DatabaseUtils;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.infrastructure.DatabaseInfo;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.infrastructure.InfrastructureDialoguer;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace.DataProvenance;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace.StoredData;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace.StoredType;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.utils.GML2CSV;
import org.hibernate.SessionFactory;
import org.n52.wps.io.data.GenericFileData;
@ -45,15 +53,24 @@ public class InputsManager {
List<File> generatedFiles;
HashMap<String, String> inputTableTemplates = new HashMap<String, String>();
AlgorithmConfiguration config;
String computationId;
List<StoredData> provenanceData = new ArrayList<StoredData>();
public List<StoredData> getProvenanceData() {
return provenanceData;
}
public static String inputsSeparator = "\\|";
public AlgorithmConfiguration getConfig() {
return config;
}
public InputsManager(LinkedHashMap<String, Object> inputs, AlgorithmConfiguration config) {
public InputsManager(LinkedHashMap<String, Object> inputs, AlgorithmConfiguration config, String computationId) {
this.inputs = inputs;
this.config = config;
this.computationId = computationId;
generatedTables = new ArrayList<String>();
generatedFiles = new ArrayList<File>();
}
@ -92,6 +109,8 @@ public class InputsManager {
String inputAlgo = ((String) input).trim().replaceAll(inputsSeparator, AlgorithmConfiguration.listSeparator);
AnalysisLogger.getLogger().debug("Simple Input Transformed: " + inputAlgo);
config.setParam(inputName, inputAlgo);
saveInputData(inputName,inputName,inputAlgo);
}
// case of Complex Input
else if (input instanceof GenericFileData) {
@ -121,6 +140,8 @@ public class InputsManager {
inputtables = inputtables + AlgorithmConfiguration.getListSeparator();
inputtables += tableName;
saveInputData(inputName, inputName, tableFile.getAbsolutePath());
}
// the only possible complex input is a table - check the WPS
// parsers
@ -379,4 +400,25 @@ public class InputsManager {
}
}
private void saveInputData(String name, String description, String payload){
String id = name;
DataProvenance provenance = DataProvenance.IMPORTED;
String creationDate = new java.text.SimpleDateFormat("dd/MM/yyyy HH:mm:ss").format(System.currentTimeMillis());
String operator = config.getAgent();
StoredType type = StoredType.STRING;
if (payload != null && (new File (payload).exists())) {
type = StoredType.DATA;
}
StoredData data = new StoredData(name, description, id, provenance, creationDate, operator, computationId, type,payload);
provenanceData.add(data);
}
}

View File

@ -2,9 +2,7 @@ package org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
@ -17,6 +15,10 @@ import org.gcube.contentmanager.storageclient.wrapper.MemoryType;
import org.gcube.contentmanager.storageclient.wrapper.StorageClient;
import org.gcube.dataanalysis.ecoengine.configuration.AlgorithmConfiguration;
import org.gcube.dataanalysis.ecoengine.datatypes.StatisticalType;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace.DataProvenance;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace.DataspaceManager;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace.StoredData;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace.StoredType;
public class OutputsManager {
@ -26,6 +28,15 @@ public class OutputsManager {
private List<String> generatedTables = new ArrayList<String>();
private IClient storageclient;
private String computationsession;
private List<StoredData> provenanceData = new ArrayList<StoredData>();
public List<StoredData> getProvenanceData() {
return provenanceData;
}
public List<File> getGeneratedData() {
return generatedFiles;
}
public List<File> getGeneratedFiles() {
return generatedFiles;
@ -113,6 +124,7 @@ public class OutputsManager {
AnalysisLogger.getLogger().debug("Output was not expected: " + okey);
ndoutput.put(okey, postInfo);
}
saveProvenanceData(postInfo);
}
System.gc();
@ -132,6 +144,25 @@ public class OutputsManager {
return outputs;
}
private void saveProvenanceData(IOWPSInformation info){
String name = info.getName();
String id = info.getName();
DataProvenance provenance = DataProvenance.COMPUTED;
String creationDate = new java.text.SimpleDateFormat("dd/MM/yyyy HH:mm:ss").format(System.currentTimeMillis());
String operator = config.getAgent();
String computationId = computationsession;
StoredType type = StoredType.STRING;
if (info.getLocalMachineContent() != null) {
type = StoredType.DATA;
}
String payload = info.getContent();
StoredData data = new StoredData(name, info.getAbstractStr(),id, provenance, creationDate, operator, computationId, type,payload);
provenanceData.add(data);
}
private void prepareForStoring() {
AnalysisLogger.getLogger().debug("Preparing storage client");
String scope = config.getGcubeScope();
@ -142,13 +173,16 @@ public class OutputsManager {
storageclient = new StorageClient(serviceClass, serviceName, owner, AccessType.SHARED, MemoryType.VOLATILE).getClient();
AnalysisLogger.getLogger().debug("Storage client ready");
}
private String uploadFileOnStorage(String localfile, String mimetype) throws Exception {
AnalysisLogger.getLogger().debug("Start uploading on storage the following file: " + localfile);
File localFile = new File(localfile);
String remotef = "/wps_synch_output/" +config.getAgent()+"/"+computationsession+"/"+ localFile.getName();
storageclient.put(true).LFile(localfile).RFile(remotef);
String url = storageclient.getHttpUrl().RFile(remotef);
/*
if (config.getGcubeScope().startsWith("/gcube"))
url = "http://data-d.d4science.org/uri-resolver/smp?smp-uri=" + url + "&fileName=" + localFile.getName() + "&contentType=" + mimetype;
@ -159,7 +193,7 @@ public class OutputsManager {
return url;
}
public String cleanTagString(String tag) {
return tag.replace(" ", "_").replaceAll("[\\]\\[!\"#$%&'()*+,\\./:;<=>?@\\^`{|}~-]", "");
}
@ -249,4 +283,12 @@ public class OutputsManager {
return xmlData;
}
public void shutdown(){
try{
storageclient.close();
}catch(Exception e){
}
}
}

View File

@ -0,0 +1,85 @@
package org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace;
public class ComputationData {
public ComputationData(String name, String operator, String operatorDescription, String infrastructure, String startDate, String endDate, String status, String id, String user) {
super();
this.name = name;
this.operator = operator;
this.operatorDescription = operatorDescription;
this.infrastructure = infrastructure;
this.startDate = startDate;
this.endDate = endDate;
this.status = status;
this.id = id;
this.user=user;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getOperator() {
return operator;
}
public void setOperator(String operator) {
this.operator = operator;
}
public String getOperatorDescription() {
return operatorDescription;
}
public void setOperatorDescription(String operatorDescription) {
this.operatorDescription = operatorDescription;
}
public String getInfrastructure() {
return infrastructure;
}
public void setInfrastructure(String infrastructure) {
this.infrastructure = infrastructure;
}
public String getStartDate() {
return startDate;
}
public void setStartDate(String startDate) {
this.startDate = startDate;
}
public String getEndDate() {
return endDate;
}
public void setEndDate(String endDate) {
this.endDate = endDate;
}
public String getStatus() {
return status;
}
public void setStatus(String status) {
this.status = status;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public String getException() {
return exception;
}
public void setException(String exception) {
this.exception = exception;
}
public String exception;
public String name;
public String operator;
public String operatorDescription;
public String infrastructure;
public String startDate;
public String endDate;
public String status;
public String id;
public String user;
}

View File

@ -0,0 +1,5 @@
package org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace;
public enum DataProvenance {
IMPORTED,COMPUTED
}

View File

@ -0,0 +1,345 @@
package org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.ArrayList;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.UUID;
import org.gcube.common.homelibrary.home.Home;
import org.gcube.common.homelibrary.home.HomeLibrary;
import org.gcube.common.homelibrary.home.HomeManager;
import org.gcube.common.homelibrary.home.HomeManagerFactory;
import org.gcube.common.homelibrary.home.User;
import org.gcube.common.homelibrary.home.workspace.Workspace;
import org.gcube.common.homelibrary.home.workspace.WorkspaceFolder;
import org.gcube.common.homelibrary.home.workspace.WorkspaceItem;
import org.gcube.common.homelibrary.home.workspace.folder.FolderItem;
import org.gcube.common.homelibrary.util.WorkspaceUtil;
import org.gcube.contentmanagement.lexicalmatcher.utils.AnalysisLogger;
import org.gcube.contentmanagement.lexicalmatcher.utils.FileTools;
import org.gcube.dataanalysis.ecoengine.configuration.AlgorithmConfiguration;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;
public class DataspaceManager implements Runnable {
public static String dataminerFolder = "DataMiner";
public static String importedDataFolder = "Input Data Sets";
public static String computedDataFolder = "Output Data Sets";
public static String computationsFolder = "Computations";
AlgorithmConfiguration config;
ComputationData computation;
List<StoredData> inputData;
List<StoredData> outputData;
List<File> generatedFiles;
public static String computation_id = "computation_id";
public static String data_id = "data_id";
public static String data_type = "data_type";
public static String operator_name = "operator_name";
public static String operator_description = "operator_description";
public static String data_description = "data_description";
public static String creation_date = "creation_date";
public static String start_date = "start_date";
public static String end_date = "end_date";
public static String status = "status";
public static String execution_platform = "execution_type";
public static String error = "error";
public static String IO = "IO";
public static String operator = "operator";
public static String payload = "payload";
public DataspaceManager(AlgorithmConfiguration config, ComputationData computation, List<StoredData> inputData, List<StoredData> outputData, List<File> generatedFiles) {
this.config = config;
this.computation = computation;
this.inputData = inputData;
this.outputData = outputData;
this.generatedFiles = generatedFiles;
}
public void run() {
try {
AnalysisLogger.getLogger().debug("Dataspace->Deleting running computation");
try {
deleteRunningComputationData();
} catch (Exception e) {
AnalysisLogger.getLogger().debug("Dataspace->No running computation available");
}
AnalysisLogger.getLogger().debug("Dataspace->Writing provenance information");
writeProvenance(computation, inputData, outputData);
} catch (Exception e) {
e.printStackTrace();
AnalysisLogger.getLogger().debug("Dataspace-> error writing provenance information " + e.getLocalizedMessage());
AnalysisLogger.getLogger().debug(e);
}
}
public void createFoldersNetwork(Workspace ws, WorkspaceFolder root) throws Exception {
AnalysisLogger.getLogger().debug("Dataspace->Creating folders for DataMiner");
// manage folders: create the folders network
if (!ws.exists(dataminerFolder, root.getId())) {
AnalysisLogger.getLogger().debug("Dataspace->Creating DataMiner main folder");
root.createFolder(dataminerFolder, "A folder collecting DataMiner experiments data and computation information");
}
WorkspaceFolder dataminerFolderWS = (WorkspaceFolder) root.find(dataminerFolder);
if (!ws.exists(importedDataFolder, dataminerFolderWS.getId())) {
AnalysisLogger.getLogger().debug("Dataspace->Creating DataMiner imported data folder");
dataminerFolderWS.createFolder(importedDataFolder, "A folder collecting DataMiner imported data");
}
if (!ws.exists(computedDataFolder, dataminerFolderWS.getId())) {
AnalysisLogger.getLogger().debug("Dataspace->Creating DataMiner computed data folder");
dataminerFolderWS.createFolder(computedDataFolder, "A folder collecting DataMiner computed data");
}
if (!ws.exists(computationsFolder, dataminerFolderWS.getId())) {
AnalysisLogger.getLogger().debug("Dataspace->Creating DataMiner computations folder");
dataminerFolderWS.createFolder(computationsFolder, "A folder collecting DataMiner computations information");
}
}
public String uploadData(StoredData data, WorkspaceFolder wsFolder) throws Exception {
String filenameonwsString = WorkspaceUtil.getUniqueName(data.name, wsFolder);
InputStream in = null;
String url = "";
if (data.type == StoredType.DATA) {
if (new File(data.payload).exists()) {
AnalysisLogger.getLogger().debug("Dataspace->Uploading file " + data.payload);
in = new FileInputStream(new File(data.payload));
} else {
AnalysisLogger.getLogger().debug("Dataspace->Uploading via URL " + data.payload);
URL urlc = new URL(data.payload);
HttpURLConnection urlConnection = (HttpURLConnection) urlc.openConnection();
in = new BufferedInputStream(urlConnection.getInputStream());
}
// AnalysisLogger.getLogger().debug("Dataspace->final file name on ws " + data.name+" description "+data.description);
FolderItem fileItem = WorkspaceUtil.createExternalFile(wsFolder, filenameonwsString, data.description, null, in);
fileItem.getProperties().addProperty(computation_id, data.computationId);
fileItem.getProperties().addProperty(creation_date, data.creationDate);
fileItem.getProperties().addProperty(operator, data.operator);
fileItem.getProperties().addProperty(data_id, data.id);
fileItem.getProperties().addProperty(data_description, data.description);
fileItem.getProperties().addProperty(IO, data.provenance.name());
fileItem.getProperties().addProperty(data_type, data.type.name());
url = fileItem.getPublicLink(true);
fileItem.getProperties().addProperty(payload, url);
try {
in.close();
} catch (Exception e) {
}
AnalysisLogger.getLogger().debug("Dataspace->File created " + data.name);
} else {
AnalysisLogger.getLogger().debug("Dataspace->Uploading string " + data.payload);
url = data.payload;
}
return url;
}
public List<String> uploadInputData(List<StoredData> inputData, WorkspaceFolder dataminerFolder) throws Exception {
AnalysisLogger.getLogger().debug("Dataspace->uploading input data " + inputData.size());
WorkspaceItem folderItem = dataminerFolder.find(importedDataFolder);
List<String> urls = new ArrayList<String>();
if (folderItem != null && folderItem.isFolder()) {
WorkspaceFolder destinationFolder = (WorkspaceFolder) folderItem;
for (StoredData input : inputData) {
String url = uploadData(input, destinationFolder);
urls.add(url);
}
} else
AnalysisLogger.getLogger().debug("Dataspace->folder is not valid");
AnalysisLogger.getLogger().debug("Dataspace->finished uploading input data");
return urls;
}
public List<String> uploadOutputData(List<StoredData> outputData, WorkspaceFolder dataminerFolder) throws Exception {
AnalysisLogger.getLogger().debug("Dataspace->uploading output data" + outputData.size());
WorkspaceItem folderItem = dataminerFolder.find(computedDataFolder);
List<String> urls = new ArrayList<String>();
if (folderItem != null && folderItem.isFolder()) {
WorkspaceFolder destinationFolder = (WorkspaceFolder) folderItem;
for (StoredData output : outputData) {
String url = uploadData(output, destinationFolder);
urls.add(url);
}
} else
AnalysisLogger.getLogger().debug("Dataspace->folder is not valid");
AnalysisLogger.getLogger().debug("Dataspace->finished uploading output data");
return urls;
}
public void uploadComputationData(ComputationData computation, List<StoredData> inputData, List<StoredData> outputData, WorkspaceFolder dataminerFolder, Workspace ws) throws Exception {
AnalysisLogger.getLogger().debug("Dataspace->uploading computation data");
WorkspaceItem folderItem = dataminerFolder.find(computationsFolder);
if (folderItem != null && folderItem.isFolder()) {
// create a folder in here
AnalysisLogger.getLogger().debug("Dataspace->Creating computation folder " + computation.id);
WorkspaceFolder cfolder = ((WorkspaceFolder) folderItem);
String cfoldername = WorkspaceUtil.getUniqueName(computation.id, cfolder);
WorkspaceFolder newcomputationFolder = cfolder.createFolder(cfoldername, computation.operatorDescription);
String itemType = "COMPUTATION";
// create IO folders
AnalysisLogger.getLogger().debug("Dataspace->creating IO folders under "+cfoldername);
newcomputationFolder.createFolder(importedDataFolder, importedDataFolder);
newcomputationFolder.createFolder(computedDataFolder, computedDataFolder);
// copy IO in those folders
List<String> inputurls = uploadInputData(inputData, newcomputationFolder);
List<String> outputurls = uploadOutputData(outputData, newcomputationFolder);
AnalysisLogger.getLogger().debug("Dataspace->creating gCube Item");
// write a computation item for the computation
LinkedHashMap<String, String> properties = new LinkedHashMap<String, String>();
properties.put(computation_id, computation.id);
newcomputationFolder.getProperties().addProperty(computation_id, computation.id);
properties.put(operator_name, config.getAgent());
newcomputationFolder.getProperties().addProperty(operator_name, config.getAgent());
properties.put(operator_description, computation.operatorDescription);
newcomputationFolder.getProperties().addProperty(operator_description, computation.operatorDescription);
properties.put(start_date, computation.startDate);
newcomputationFolder.getProperties().addProperty(start_date, computation.startDate);
properties.put(end_date, computation.endDate);
newcomputationFolder.getProperties().addProperty(end_date, computation.endDate);
properties.put(status, computation.status);
newcomputationFolder.getProperties().addProperty(status, computation.status);
properties.put(execution_platform, computation.infrastructure);
newcomputationFolder.getProperties().addProperty(execution_platform, computation.infrastructure);
int ninput = inputurls.size();
int noutput = outputurls.size();
AnalysisLogger.getLogger().debug("Dataspace->Adding input properties for "+ninput+" inputs");
for (int i = 1; i <= ninput; i++) {
properties.put("input"+i+"_"+inputData.get(i-1).name, inputurls.get(i-1));
newcomputationFolder.getProperties().addProperty("input"+i+"_"+inputData.get(i-1).name, inputurls.get(i-1));
}
AnalysisLogger.getLogger().debug("Dataspace->Adding output properties for "+noutput+" outputs");
for (int i = 1; i <= noutput; i++) {
properties.put("output"+i+"_"+outputData.get(i-1).name, outputurls.get(i-1));
newcomputationFolder.getProperties().addProperty("output"+i+"_"+outputData.get(i-1).name, outputurls.get(i-1));
}
AnalysisLogger.getLogger().debug("Dataspace->Saving properties to ProvO XML file "+noutput+" outputs");
/*XStream xstream = new XStream();
String xmlproperties = xstream.toXML(properties);
*/
try{String xmlproperties = ProvOGenerator.toProvO(computation, inputData, outputData);
File xmltosave = new File(config.getPersistencePath(),"properties_"+UUID.randomUUID());
FileTools.saveString(xmltosave.getAbsolutePath(), xmlproperties, true, "UTF-8");
InputStream sis = new FileInputStream(xmltosave);
WorkspaceUtil.createExternalFile(newcomputationFolder, computation.id+".xml", computation.operatorDescription, null, sis);
sis.close();
}catch(Exception e){
AnalysisLogger.getLogger().debug("Dataspace->Failed creating ProvO XML file "+e.getLocalizedMessage());
AnalysisLogger.getLogger().debug(e);
e.printStackTrace();
}
List<String> scopes = new ArrayList<String>();
scopes.add(config.getGcubeScope());
ws.createGcubeItem(computation.id, computation.operatorDescription, scopes, computation.user, itemType, properties, newcomputationFolder.getId());
}
AnalysisLogger.getLogger().debug("Dataspace->finished uploading computation data");
}
public void writeProvenance(ComputationData computation, List<StoredData> inputData, List<StoredData> outputData) throws Exception {
AnalysisLogger.getLogger().debug("Dataspace->connecting to Workspace");
HomeManagerFactory factory = HomeLibrary.getHomeManagerFactory();
HomeManager manager = factory.getHomeManager();
AnalysisLogger.getLogger().debug("Dataspace->getting user");
User user = manager.createUser(computation.user);
Home home = manager.getHome(user);
AnalysisLogger.getLogger().debug("Dataspace->getting root folder");
Workspace ws = home.getWorkspace();
WorkspaceFolder root = ws.getRoot();
AnalysisLogger.getLogger().debug("Dataspace->create folders network");
createFoldersNetwork(ws, root);
WorkspaceFolder dataminerItem = (WorkspaceFolder) root.find(dataminerFolder);
AnalysisLogger.getLogger().debug("Dataspace->uploading input files");
// uploadInputData(inputData, dataminerItem);
AnalysisLogger.getLogger().debug("Dataspace->uploading output files");
uploadOutputData(outputData, dataminerItem);
AnalysisLogger.getLogger().debug("Dataspace->uploading computation files");
uploadComputationData(computation, inputData, outputData, dataminerItem, ws);
AnalysisLogger.getLogger().debug("Dataspace->provenance management finished");
AnalysisLogger.getLogger().debug("Dataspace->deleting generated files");
AbstractEcologicalEngineMapper.deleteGeneratedFiles(generatedFiles);
AnalysisLogger.getLogger().debug("Dataspace->generated files deleted");
}
public void writeRunningComputationData() throws Exception {
try {
deleteRunningComputationData();
} catch (Exception e) {
AnalysisLogger.getLogger().debug("Dataspace->impossible to delete running computation");
}
// AnalysisLogger.getLogger().debug("Dataspace->updating computation status");
// AnalysisLogger.getLogger().debug("Dataspace->connecting to Workspace");
HomeManagerFactory factory = HomeLibrary.getHomeManagerFactory();
HomeManager manager = factory.getHomeManager();
// AnalysisLogger.getLogger().debug("Dataspace->getting user");
User user = manager.createUser(computation.user);
Home home = manager.getHome(user);
// AnalysisLogger.getLogger().debug("Dataspace->getting root folder");
Workspace ws = home.getWorkspace();
WorkspaceFolder root = ws.getRoot();
// AnalysisLogger.getLogger().debug("Dataspace->create folders network");
createFoldersNetwork(ws, root);
WorkspaceFolder dataminerFolderWS = (WorkspaceFolder) root.find(dataminerFolder);
WorkspaceItem computationsFolderItem = dataminerFolderWS.find(computationsFolder);
// AnalysisLogger.getLogger().debug("Dataspace->Creating computation item " + computation.id+" with status"+computation.status);
String itemType = "COMPUTATION";
// write a computation item for the computation
LinkedHashMap<String, String> properties = new LinkedHashMap<String, String>();
properties.put(computation_id, computation.id);
properties.put(operator_name, config.getAgent());
properties.put(operator_description, computation.operatorDescription);
properties.put(start_date, computation.startDate);
properties.put(end_date, computation.endDate);
properties.put(status, computation.status);
properties.put(execution_platform, computation.infrastructure);
if (computation.exception != null && computation.exception.length() > 0)
properties.put(error, computation.exception);
List<String> scopes = new ArrayList<String>();
scopes.add(config.getGcubeScope());
ws.createGcubeItem(computation.id, computation.operatorDescription, scopes, computation.user, itemType, properties, computationsFolderItem.getId());
AnalysisLogger.getLogger().debug("Dataspace->finished uploading computation data");
}
public void deleteRunningComputationData() throws Exception {
AnalysisLogger.getLogger().debug("Dataspace->deleting computation item");
AnalysisLogger.getLogger().debug("Dataspace->connecting to Workspace");
HomeManagerFactory factory = HomeLibrary.getHomeManagerFactory();
HomeManager manager = factory.getHomeManager();
AnalysisLogger.getLogger().debug("Dataspace->getting user");
User user = manager.createUser(computation.user);
Home home = manager.getHome(user);
AnalysisLogger.getLogger().debug("Dataspace->getting root folder");
Workspace ws = home.getWorkspace();
WorkspaceFolder root = ws.getRoot();
WorkspaceFolder dataminerFolderWS = (WorkspaceFolder) root.find(dataminerFolder);
WorkspaceItem computationsFolderItem = dataminerFolderWS.find(computationsFolder);
AnalysisLogger.getLogger().debug("Dataspace->removing computation data");
((WorkspaceFolder) computationsFolderItem).find(computation.id).remove();
AnalysisLogger.getLogger().debug("Dataspace->finished removing computation data");
}
}

View File

@ -0,0 +1,219 @@
package org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace;
import java.io.StringReader;
import java.io.StringWriter;
import java.util.ArrayList;
import java.util.List;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.transform.OutputKeys;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import org.xml.sax.InputSource;
public class ProvOGenerator {
static String document ="<prov:document xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\" xmlns:xsd=\"http://www.w3.org/2001/XMLSchema\" " +
"xmlns:prov=\"http://www.w3.org/ns/prov#\" xmlns:d4s=\"http://d4science.org/#\">" +
"#DOCUMENT#" +
"</prov:document>";
static String activity = "<prov:activity prov:id=\"#ID#\">"+
"<prov:startTime>#START_TIME#</prov:startTime>"+
"<prov:endTime>#END_TIME#</prov:endTime>"+
"<prov:type xsi:type=\"xsd:QName\">d4s:computation</prov:type>"+
"<prov:softwareAgent prov:id=\"d4s:dataminer.d4science.org\" />"+
"<prov:person prov:id=\"d4s:#PERSON#\" />"+
"#ENTITIES#"+
"</prov:activity>";
static String entity="<prov:entity prov:id=\"d4s:#ENTITY_NAME#\">"+
//"<prov:type xsi:type=\"xsd:QName\"></prov:type>"+
"<prov:value xsi:type=\"xsd:string\">#ENTITY_VALUE#</prov:value>"+
"#SUBENTITY#"+
"</prov:entity>";
static String entityWithTime="<prov:entity prov:id=\"d4s:#ENTITY_NAME#\">"+
//"<prov:type xsi:type=\"xsd:QName\"></prov:type>"+
"<prov:value xsi:type=\"xsd:string\">#ENTITY_VALUE#</prov:value>"+
"<prov:time>#TIME#</prov:time>"+
"#SUBENTITY#"+
"</prov:entity>";
static String attribute = "<prov:type xsi:type=\"xsd:QName\">d4s:#NAME#</prov:type>";
static String referenceActivity = "<prov:activity prov:ref=\"d4s:#ID#\"/>";
static String referenceEntity = "<prov:entity prov:ref=\"d4s:#ID#\"/>";
public static void main(String[] args) {
String name = "DBSCAN_1234";
String startDate = "17/03/2016 11:32:22";
String endDate = "17/03/2016 12:42:22";
String operator = "DBSCAN";
String operatorDescription = "example test";
String infrastructure = "LOCAL";
String status = "100";
String id = name;
String user = "gianpaolo.coro";
ComputationData computation = new ComputationData(name, operator, operatorDescription, infrastructure, startDate, endDate, status, name,user);
/*
V public static String operator_description="operator_description";
Vpublic static String data_description="data_description";
public static String creation_date="creation_date";
public static String start_date="start_date";
public static String end_date="end_date";
public static String status="status";
public static String execution_type="execution_type";
public static String error="error";
public static String IO="IO";
public static String operator="operator";
*/
List<StoredData> inputData = new ArrayList<StoredData>();
List<StoredData> outputData = new ArrayList<StoredData>();
StoredData in = new StoredData("inputT1","descrT1", "inputT1", DataProvenance.IMPORTED, "15/03/2016 11:32:22", operator, id, StoredType.STRING, "hello");
inputData.add(in);
StoredData out = new StoredData("outputT1","descrT1", "outputT1", DataProvenance.IMPORTED, "16/03/2016 11:32:22", operator, id, StoredType.STRING, "hellooutput");
outputData.add(out);
//System.out.println(dataToEntity(in));
System.out.println(toProvO(computation, inputData, outputData));
}
public static String getDataIOAttribute(String IO){
return attribute(IO);
}
public static String getDataTypeAttribute(String type){
return attribute(type);
}
public static String getDataDescriptionEntity(String datadescription){
return entity(DataspaceManager.data_description, datadescription);
}
public static String getOperatorRefEntity(String operator_id){
return refentity(operator_id);
}
public static String getComputationRefEntity(String computation_id){
return refactivity(computation_id);
}
public static String dataToEntity(StoredData data){
String io = getDataIOAttribute(data.provenance.name());
String type = getDataTypeAttribute(data.type.name());
String description = getDataDescriptionEntity(data.description);
String operator = getOperatorRefEntity(data.operator);
String computation = getComputationRefEntity(data.computationId);
String subentity = computation+operator+description+io+type;
String dataEntity = completeEntityWithTime(data.id, data.payload, data.creationDate, subentity);
return dataEntity;
}
public static String getStatusEntity(String status){
return entity(DataspaceManager.status, status);
}
public static String getExecutionPlatformEntity(String executionPlatform){
return entity(DataspaceManager.execution_platform, executionPlatform);
}
public static String getOperatorDescriptionEntity(String description){
return entity(DataspaceManager.operator_description, description);
}
public static String getOperatorEntity(String operator){
return entity(DataspaceManager.operator, operator);
}
public static String computationToAction(ComputationData computation,String subEntities){
String status = getStatusEntity(computation.status);
String description = getOperatorDescriptionEntity(computation.operatorDescription);
String operator = getOperatorEntity(computation.operator);
String subents =operator+description+status+subEntities;
String activity = completeActivity(computation.id,computation.startDate,computation.endDate,computation.user,subents);
return activity;
}
public static String toProvO(ComputationData computation, List<StoredData> input, List<StoredData> output){
StringBuffer sb = new StringBuffer();
for (StoredData in:input){
sb.append(dataToEntity(in));
}
for (StoredData out:output){
sb.append(dataToEntity(out));
}
String action = computationToAction(computation, sb.toString());
String documentString = document.replace("#DOCUMENT#", action);
documentString = formatXML(documentString);
return documentString;
}
public static String entity(String name, String value){
return entity.replace("#ENTITY_NAME#", name).replace("#ENTITY_VALUE#", value).replace("#SUBENTITY#","");
}
public static String refentity(String id){
return referenceEntity.replace("#ID#", id);
}
public static String refactivity(String id){
return referenceActivity.replace("#ID#", id);
}
public static String attribute(String name){
return attribute.replace("#NAME#", name);
}
public static String entityWithTime(String name, String value,String time){
return entity.replace("#ENTITY_NAME#", name).replace("#ENTITY_VALUE#", value).replace("#TIME#", time).replace("#SUBENTITY#","");
}
public static String completeEntityWithTime(String name, String value,String time,String subEntity){
return entity.replace("#ENTITY_NAME#", name).replace("#ENTITY_VALUE#", value).replace("#TIME#", time).replace("#SUBENTITY#",subEntity);
}
public static String completeActivity(String id, String startTime,String endTime,String person, String subEntity){
return activity.replace("#ID#", id).replace("#PERSON#", person).replace("#START_TIME#", startTime).replace("#END_TIME#", endTime).replace("#ENTITIES#",subEntity);
}
public static String formatXML(String input)
{
try
{
Transformer transformer = TransformerFactory.newInstance()
.newTransformer();
transformer.setOutputProperty(OutputKeys.INDENT, "yes");
transformer.setOutputProperty(
"{http://xml.apache.org/xslt}indent-amount", "3");
StreamResult result = new StreamResult(new StringWriter());
DOMSource source = new DOMSource(parseXml(input));
transformer.transform(source, result);
return result.getWriter().toString();
} catch (Exception e)
{
e.printStackTrace();
return input;
}
}
private static org.w3c.dom.Document parseXml(String in)
{
try
{
DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
DocumentBuilder db = dbf.newDocumentBuilder();
InputSource is = new InputSource(new StringReader(in));
return db.parse(is);
} catch (Exception e)
{
throw new RuntimeException(e);
}
}
}

View File

@ -0,0 +1,26 @@
package org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace;
public class StoredData {
public StoredData(String name, String description, String id, DataProvenance provenance, String creationDate, String operator, String computationId, StoredType type, String payload) {
super();
this.name = name;
this.id = id;
this.description = description;
this.provenance = provenance;
this.creationDate = creationDate;
this.operator = operator;
this.computationId = computationId;
this.type = type;
this.payload=payload;
}
String name;
String description;
String id;
DataProvenance provenance;
String creationDate;
String operator;
String computationId;
StoredType type;
String payload;
}

View File

@ -0,0 +1,5 @@
package org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.dataspace;
public enum StoredType {
DATA,STRING
}

View File

@ -0,0 +1,171 @@
package org.gcube.dataanalysis.wps.statisticalmanager.synchserver.web;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintWriter;
import java.util.UUID;
import javax.servlet.ServletConfig;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang.StringUtils;
import org.gcube.contentmanagement.lexicalmatcher.utils.AnalysisLogger;
import org.n52.wps.commons.XMLUtil;
import org.n52.wps.server.database.DatabaseFactory;
import org.n52.wps.server.database.IDatabase;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class CancelComputation extends HttpServlet {
private final static Logger LOGGER = LoggerFactory.getLogger(CancelComputation.class);
private static final long serialVersionUID = -268198171054599696L;
// This is required for URL generation for response documents.
public final static String SERVLET_PATH = "RetrieveResultServlet";
// in future parameterize
private final boolean indentXML = false;
private final int uuid_length = 36;
@Override
public void init(ServletConfig config) throws ServletException {
super.init(config);
}
public static String empty = "<wps:ExecuteResponse service=\"WPS\" " +
"serviceInstance=\"\" statusLocation=\"\" version=\"1.0.0\" xml:lang=\"en-US\" xsi:schemaLocation=\"http://www.opengis.net/wps/1.0.0 http://schemas.opengis.net/wps/1.0.0/wpsExecute_response.xsd\" " +
"xmlns:ows=\"http://www.opengis.net/ows/1.1\" xmlns:wps=\"http://www.opengis.net/wps/1.0.0\" " +
"xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"> " +
"<wps:Process wps:processVersion=\"1.1.0\"/> " +
"</wps:ExecuteResponse>";
@Override
protected void doGet(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException {
// id of result to retrieve.
String id = request.getParameter("id");
AnalysisLogger.getLogger().debug("CANCEL COMPUTATION -> RETRIEVING ID " + id);
if (StringUtils.isEmpty(id)) {
errorResponse("id parameter missing", response);
} else {
AnalysisLogger.getLogger().debug("CANCEL COMPUTATION -> ID RETRIEVED" + id);
if (!isIDValid(id)) {
errorResponse("id parameter not valid", response);
}
AnalysisLogger.getLogger().debug("CANCEL COMPUTATION -> ID IS VALID " + id);
IDatabase db = DatabaseFactory.getDatabase();
try {
AnalysisLogger.getLogger().debug("CANCEL COMPUTATION -> DELETING ID " + id);
try {
// String empty = "";
InputStream stream = new ByteArrayInputStream(empty.getBytes("UTF-8"));
db.updateResponse(id, stream);
stream.close();
} catch (Exception e) {
e.printStackTrace();
AnalysisLogger.getLogger().debug(e);
}
AnalysisLogger.getLogger().debug("CANCEL COMPUTATION -> ID DELETED " + id);
long len = db.getContentLengthForStoreResponse(id);
AnalysisLogger.getLogger().debug("CANCEL COMPUTATION -> ID RESPONSE LENGTH " + len);
} catch (Exception e) {
e.printStackTrace();
logException(e);
AnalysisLogger.getLogger().debug(e);
} finally {
}
}
}
protected void errorResponse(String error, HttpServletResponse response) throws IOException {
response.setContentType("text/html");
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
PrintWriter writer = response.getWriter();
writer.write("<html><title>Error</title><body>" + error + "</body></html>");
writer.flush();
LOGGER.warn("Error processing response: " + error);
}
protected void copyResponseStream(InputStream inputStream, OutputStream outputStream, String id, long contentLength) throws IOException {
long contentWritten = 0;
try {
byte[] buffer = new byte[8192];
int bufferRead;
while ((bufferRead = inputStream.read(buffer)) != -1) {
outputStream.write(buffer, 0, bufferRead);
contentWritten += bufferRead;
}
} catch (IOException e) {
String exceptionMessage = contentLength > -1 ? String.format("Error writing response to output stream for id %s, %d of %d bytes written", id, contentWritten, contentLength) : String.format("Error writing response to output stream for id %s, %d bytes written", id, contentWritten);
throw new IOException(exceptionMessage, e);
}
LOGGER.info("{} bytes written in response to id {}", contentWritten, id);
}
protected void copyResponseAsXML(InputStream inputStream, OutputStream outputStream, boolean indent, String id) throws IOException {
try {
XMLUtil.copyXML(inputStream, outputStream, indent);
} catch (IOException e) {
throw new IOException("Error writing XML response for id " + id, e);
}
}
private void logException(Exception exception) {
StringBuilder errorBuilder = new StringBuilder(exception.getMessage());
Throwable cause = getRootCause(exception);
if (cause != exception) {
errorBuilder.append(", exception message: ").append(cause.getMessage());
}
LOGGER.error(errorBuilder.toString());
}
public static Throwable getRootCause(Throwable t) {
return t.getCause() == null ? t : getRootCause(t.getCause());
}
public boolean isIDValid(String id) {
if (id.length() <= uuid_length) {
try {
UUID checkUUID = UUID.fromString(id);
if (checkUUID.toString().equals(id)) {
return true;
} else {
return false;
}
} catch (Exception e) {
return false;
}
} else {
String uuidPartOne = id.substring(0, uuid_length);
String uuidPartTwo = id.substring(id.length() - uuid_length, id.length());
return isUUIDValid(uuidPartOne) && isUUIDValid(uuidPartTwo);
}
}
public boolean isUUIDValid(String uuid) {
// the following can be used to check whether the id is a valid UUID
try {
UUID checkUUID = UUID.fromString(uuid);
if (checkUUID.toString().equals(uuid)) {
return true;
} else {
return false;
}
} catch (Exception e) {
return false;
}
}
}

View File

@ -38,10 +38,12 @@ import org.apache.commons.io.IOUtils;
import org.apache.xmlbeans.XmlException;
import org.apache.xmlbeans.XmlObject;
import org.apache.xmlbeans.XmlOptions;
import org.gcube.dataanalysis.wps.statisticalmanager.synchserver.mapping.AbstractEcologicalEngineMapper;
import org.n52.wps.commons.context.ExecutionContext;
import org.n52.wps.commons.context.ExecutionContextFactory;
import org.n52.wps.io.data.IComplexData;
import org.n52.wps.io.data.IData;
import org.n52.wps.io.data.binding.literal.LiteralStringBinding;
import org.n52.wps.server.AbstractTransactionalAlgorithm;
import org.n52.wps.server.ExceptionReport;
import org.n52.wps.server.IAlgorithm;
@ -78,6 +80,7 @@ public class ExecuteRequest extends Request implements IObserver {
*/
public ExecuteRequest(Document doc) throws ExceptionReport {
super(doc);
initWpsID();
try {
System.out.println("Preparing the ExecuteRequest for POST");
XmlOptions option = new XmlOptions();
@ -112,6 +115,7 @@ public class ExecuteRequest extends Request implements IObserver {
*/
public ExecuteRequest(CaseInsensitiveMap ciMap) throws ExceptionReport {
super(ciMap);
initWpsID();
initForGET(ciMap);
// validate the client input
validate();
@ -543,6 +547,11 @@ public class ExecuteRequest extends Request implements IObserver {
*
* @throws ExceptionReport
*/
String wpsid = null;
private void initWpsID(){
wpsid = getUniqueId().toString();
}
public Response call() throws ExceptionReport {
IAlgorithm algorithm = null;
Map<String, List<IData>> inputMap = null;
@ -587,7 +596,10 @@ public class ExecuteRequest extends Request implements IObserver {
subject.addObserver(this);
}
if (algorithm instanceof AbstractEcologicalEngineMapper) {
((AbstractEcologicalEngineMapper) algorithm).setWpsExternalID(wpsid);
}
if (algorithm instanceof AbstractTransactionalAlgorithm) {
returnResults = ((AbstractTransactionalAlgorithm) algorithm).run(execDom);
} else {
@ -767,7 +779,7 @@ public class ExecuteRequest extends Request implements IObserver {
InputStream is = null;
try {
is = executeResponse.getAsStream();
DatabaseFactory.getDatabase().storeResponse(getUniqueId().toString(), is);
DatabaseFactory.getDatabase().storeResponse(wpsid, is);
} finally {
IOUtils.closeQuietly(is);
}
@ -782,7 +794,7 @@ public class ExecuteRequest extends Request implements IObserver {
InputStream is = null;
try {
is = executeDocument.newInputStream();
DatabaseFactory.getDatabase().insertRequest(getUniqueId().toString(), is, true);
DatabaseFactory.getDatabase().insertRequest(wpsid, is, true);
} catch (Exception e) {
LOGGER.error("Exception storing ExecuteRequest", e);
} finally {
@ -811,7 +823,7 @@ public class ExecuteRequest extends Request implements IObserver {
}
w.flush();
is = new ByteArrayInputStream(os.toByteArray());
DatabaseFactory.getDatabase().insertRequest(getUniqueId().toString(), is, false);
DatabaseFactory.getDatabase().insertRequest(wpsid, is, false);
} catch (Exception e) {
LOGGER.error("Exception storing ExecuteRequest", e);
} finally {