package org.gcube.dataanalysis.dataminer.poolmanager.service; import java.io.File; import java.io.FileNotFoundException; ***REMOVED*** import java.util.Scanner; ***REMOVED*** import org.gcube.dataanalysis.dataminer.poolmanager.datamodel.Algorithm; ***REMOVED*** import org.gcube.dataanalysis.dataminer.poolmanager.util.ClusterBuilder; import org.gcube.dataanalysis.dataminer.poolmanager.util.ServiceConfiguration; import org.gcube.dataanalysis.dataminer.poolmanager.util.SVNUpdater; import org.tmatesoft.svn.core.SVNException; public class DataminerPoolManager { private SVNUpdater svnUpdater; public DataminerPoolManager() { try { ***REMOVED***TODO: read this from configuration this.svnUpdater = new SVNUpdater(new ServiceConfiguration()); ***REMOVED*** catch (SVNException e) { e.printStackTrace(); ***REMOVED*** ***REMOVED*** public String stageAlgorithm(Algorithm algo) throws IOException, InterruptedException { Cluster stagingCluster = ClusterBuilder.getStagingDataminerCluster(); Cluster rProtoCluster = ClusterBuilder.getRProtoCluster(); DMPMJob job = new StagingJob(this.svnUpdater, algo, stagingCluster, rProtoCluster, ScopeProvider.instance.get()); String id = job.start(); return id; ***REMOVED*** public String publishAlgorithm(Algorithm algo, String targetVREToken, String targetVRE) throws IOException, InterruptedException { Cluster prodCluster = ClusterBuilder.getVRECluster(targetVREToken, targetVRE); DMPMJob job = new ProductionPublishingJob(this.svnUpdater, algo, prodCluster, targetVRE); String id = job.start(); return id; ***REMOVED*** public String getLogById(String id) throws FileNotFoundException{ ***REMOVED***TODO: load dir from configuration file File path = new File(System.getProperty("user.home") + File.separator + "dataminer-pool-manager/jobs/" + id); return new Scanner(path).useDelimiter("\\Z").next(); ***REMOVED*** ***REMOVED***