Roberto Cirillo 6 years ago
commit 7d0f512d13

@ -0,0 +1,36 @@
<?xml version="1.0" encoding="UTF-8"?>
<classpath>
<classpathentry kind="src" output="target/classes" path="src/main/java">
<attributes>
<attribute name="optional" value="true"/>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry excluding="**" kind="src" output="target/classes" path="src/main/resources">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="src" output="target/test-classes" path="src/test/java">
<attributes>
<attribute name="optional" value="true"/>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry excluding="**" kind="src" output="target/test-classes" path="src/test/resources">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.7">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="con" path="org.eclipse.m2e.MAVEN2_CLASSPATH_CONTAINER">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="output" path="target/classes"/>
</classpath>

@ -0,0 +1,23 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>storage-manager-core</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.m2e.core.maven2Builder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.jdt.core.javanature</nature>
<nature>org.eclipse.m2e.core.maven2Nature</nature>
</natures>
</projectDescription>

@ -0,0 +1,7 @@
#Wed May 02 15:06:23 CEST 2012
eclipse.preferences.version=1
encoding//src/main/java=UTF-8
encoding//src/main/resources=UTF-8
encoding//src/test/java=UTF-8
encoding//src/test/resources=UTF-8
encoding/<project>=UTF-8

@ -0,0 +1,12 @@
eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.7
org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
org.eclipse.jdt.core.compiler.compliance=1.7
org.eclipse.jdt.core.compiler.debug.lineNumber=generate
org.eclipse.jdt.core.compiler.debug.localVariable=generate
org.eclipse.jdt.core.compiler.debug.sourceFile=generate
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
org.eclipse.jdt.core.compiler.source=1.7

@ -0,0 +1,5 @@
#Wed May 02 14:48:43 CEST 2012
activeProfiles=
eclipse.preferences.version=1
resolveWorkspaceProjects=true
version=1

@ -0,0 +1,25 @@
v 2.4.0 (21-04-2015)
* upgrade mongo-java-driver to version 3.0.3
v 2.2.0 (21-04-2015)
* change smp protocol
* id as remote identifier for RFile methods
v 2.1.2 (31-10-2014)
* added methods for folder and user storage calculation
v 2.1.0 (01-04-2014)
* added metadata for every remote file in agreement with storage-manager-trigger module
v 2.0.3 (24-10-2013)
* added mongoOption for reinforce nwtwork failover #245 openBio
v 2.0.2 (19-06-2013)
* bug fix input stream with replace flag #208 openBio
v 2.0.1 (29-05-2013)
* bug fix incident: connection pending #606
v 2.0.0 (19-05-2013)
* added move, copy and link operations
*FWS integration
v 1.0.2 (15-01-2013)
* Integration with new gCube release 2.12.0 version system
v 1.0.1 (11-10-2012)
* added VOLATILE area
v. 1.0.0 (04-05-2012)
* First release

@ -0,0 +1,2 @@
${gcube.license}

Binary file not shown.

@ -0,0 +1,84 @@
<ReleaseNotes>
<Changeset component="org.gcube.content-management.storage-manager-core.2-9-0" date="2018-09-24">
<Change> SSL enabled</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2-8-1" date="2018-09-21">
<Change> fix throw RemoteBackendException in Resource class</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2-8-0" date="2018-05-16">
<Change>add softcopy operation</Change>
<Change>upgrade to mongo-java-driver 3.6.0</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2-7-0" date="2018-04-20">
<Change>add duplicate file operation</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2-6-1" date="2017-02-01">
<Change>fix: bug #5598</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2-6-0" date="2016-11-03">
<Change>Added method for retrieving the relative remote path by id</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2-5-0" date="2016-09-04">
<Change>fix: bug #5625</Change>
<Change>added suffix to url for object stored on VolatileArea</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2-4-1" date="2016-03-01">
<Change>fix: http url with encode resolution</Change>
<Change>fix-pom: downgrade commons-io to version 1.4</Change>
<Change>change: mongo authentication mechanism from CR to negotiation</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2-4-0" date="2016-01-17">
<Change>clean code</Change>
<Change>added getHttpUrl method</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2-3-0" date="2015-10-21">
<Change>code refactoring: new class MongoIO</Change>
<Change>move operation alternative version: using fs.files collection</Change>
<Change>add mimeType manager</Change>
<Change>add retry mechanism when retrieving remote objects</Change>
<Change>upgrade mongo-java-driver library to version 3.0.0</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2-2-1" date="2015-07-01">
<Change>enable smp uri without payload</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2.2.0" date="2015-04-21">
<Change>change smp protocol</Change>
<Change>id as remote identifier for RFile methods</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2.1.2" date="2014-04-01">
<Change>added methods for folder and user storage calculation</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2.1.1" date="2014-04-01">
<Change>removed dependency from slf4j-noop</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2.1.0" date="2014-04-01">
<Change>added metadata for every remote file in agreement with storage-manager-trigger module</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2.0.4" date="2013-10-24">
<Change>Integration with new gCube release 2.17.0 version system</Change>
<Change>added operations on folder</Change>
</Changeset>
<Changeset component="org.gcube.content-management.storage-manager-core.2.0.3" date="2013-10-24">
<Change>Integration with new gCube release 2.17.0 version system</Change>
<Change>added mongoOption for reinforce nwtwork failover #245 openBio</Change>
</Changeset>
<Changeset component="${build.finalName}" date="2013-05-29">
<Change>Integration with new gCube release 2.15.0 version system</Change>
<Change>bugfix incident on pending connections #606</Change>
</Changeset>
<Changeset component="${build.finalName}" date="2013-04-19">
<Change>Integration with new gCube release 2.14.0 version system</Change>
<Change>FWS integration</Change>
<Change>added move, copy and link operations</Change>
</Changeset>
<Changeset component="${build.finalName}" date="2013-01-15">
<Change>Integration with new gCube release 2.12.0 version system</Change>
</Changeset>
<Changeset component="${build.finalName}" date="2012-10-11">
<Change>Added VOLATILE area</Change>
</Changeset>
<Changeset component="${build.finalName}" date="2012-05-04">
<Change>First Release</Change>
</Changeset>
</ReleaseNotes>

@ -0,0 +1,32 @@
<assembly
xmlns="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
<id>servicearchive</id>
<formats>
<format>tar.gz</format>
</formats>
<baseDirectory>/</baseDirectory>
<fileSets>
<fileSet>
<directory>${distroDirectory}</directory>
<outputDirectory>/</outputDirectory>
<useDefaultExcludes>true</useDefaultExcludes>
<includes>
<include>README</include>
<include>LICENSE</include>
<include>changelog.xml</include>
<include>profile.xml</include>
</includes>
<fileMode>755</fileMode>
<filtered>true</filtered>
</fileSet>
</fileSets>
<files>
<file>
<source>target/${build.finalName}.${project.packaging}</source>
<outputDirectory>/${artifactId}</outputDirectory>
</file>
</files>
</assembly>

@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<Resource xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ID />
<Type>Service</Type>
<Profile>
<Description>${description}</Description>
<Class>ContentManagement</Class>
<Name>${artifactId}</Name>
<Version>1.0.0</Version>
<Packages>
<Software>
<Name>${artifactId}</Name>
<Version>${version}</Version>
<MavenCoordinates>
<groupId>${groupId}</groupId>
<artifactId>${artifactId}</artifactId>
<version>${version}</version>
</MavenCoordinates>
<Files>
<File>${build.finalName}.jar</File>
</Files>
</Software>
</Packages>
</Profile>
</Resource>

@ -0,0 +1,109 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>maven-parent</artifactId>
<groupId>org.gcube.tools</groupId>
<version>1.0.0</version>
<relativePath />
</parent>
<groupId>org.gcube.contentmanagement</groupId>
<artifactId>storage-manager-core</artifactId>
<version>2.9.0-SNAPSHOT</version>
<properties>
<distroDirectory>${project.basedir}/distro</distroDirectory>
</properties>
<scm>
<connection>scm:svn:http://svn.d4science.research-infrastructures.eu/gcube/trunk/content-management/${project.artifactId}</connection>
<developerConnection>scm:svn:https://svn.d4science.research-infrastructures.eu/gcube/trunk/content-management/${project.artifactId}</developerConnection>
<url> http://svn.research-infrastructures.eu/public/d4science/gcube/trunk/content-management/${project.artifactId}</url>
</scm>
<dependencyManagement>
<dependencies>
<dependency>
<groupId>org.gcube.distribution</groupId>
<artifactId>gcube-bom</artifactId>
<version>LATEST</version>
<type>pom</type>
<scope>import</scope>
</dependency>
</dependencies>
</dependencyManagement>
<dependencies>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-api</artifactId>
<!-- <version>1.6.4</version> -->
</dependency>
<dependency>
<groupId>org.mongodb</groupId>
<artifactId>mongo-java-driver</artifactId>
<version>3.6.0</version>
<!-- <version>[3.0.0, 3.2.0-rc0)</version> -->
</dependency>
<dependency>
<groupId>org.gcube.core</groupId>
<artifactId>common-encryption</artifactId>
<!-- <version>[1.0.0-SNAPSHOT, 2.0.0-SNAPSHOT)</version> -->
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
<version>1.4</version>
</dependency>
<dependency>
<groupId>commons-codec</groupId>
<artifactId>commons-codec</artifactId>
<version>1.8</version>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-resources-plugin</artifactId>
<version>2.5</version>
<executions>
<execution>
<id>copy-profile</id>
<phase>install</phase>
<goals>
<goal>copy-resources</goal>
</goals>
<configuration>
<outputDirectory>target</outputDirectory>
<resources>
<resource>
<directory>${distroDirectory}</directory>
<filtering>true</filtering>
<includes>
<include>profile.xml</include>
</includes>
</resource>
</resources>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<!-- <version>2.2</version> -->
<configuration>
<descriptors>
<descriptor>${distroDirectory}/descriptor.xml</descriptor>
</descriptors>
</configuration>
<executions>
<execution>
<id>servicearchive</id>
<phase>install</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
</plugins>
</build>
</project>

@ -0,0 +1,15 @@
log4j.rootLogger=INFO, A1, stdout
log4j.appender.A1=org.apache.log4j.RollingFileAppender
log4j.appender.A1.File=log.txt
log4j.appender.A1.layout=org.apache.log4j.PatternLayout
log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
# ***** Max file size is set to 100KB
log4j.appender.A1.MaxFileSize=100MB
# ***** Keep one backup file
log4j.appender.A1.MaxBackupIndex=1
#CONSOLE
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.Threshold=INFO
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%t] %-5p %c %d{dd MMM yyyy ;HH:mm:ss.SSS} - %m%n

@ -0,0 +1,48 @@
package org.gcube.contentmanagement.blobstorage.coding;
import java.io.InputStream;
import java.util.List;
/**
* Interface for coding a generic File in bytes, or base64 code Used for terrastore system
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public interface IEncode {
/**
* Encode a generic file in byte array
* @param path file's path
* @param isChunk indicates if the file is chuncked
* @param isBase64 the base64 coding option
* @return a byte array
*/
public byte[] encodeGenericFile(String path, boolean isChunk, boolean isBase64);
/**
* Reads a file storing intermediate data into a list. Fast method.
* @param path
* @param isChunk
* @param chunkDimension
* @return the list that contains the file
*/
public List<String> encodeFileChunked(String path, boolean isChunk, int chunkDimension);
/**
* Reads a file storing intermediate data into an array.
* @param in
* @param path
* @param isChunk
* @param chunkDimension
* @return the byte array that contains the file
*/
public byte[] encodeFileChunked2(InputStream in, String path, boolean isChunk, long chunkDimension);
/**
* Decode a byte array in a File
* @param encode
* @param path
* @param isBase64
*/
public void decodeByte2File(byte[] encode, String path, boolean isBase64);
}

@ -0,0 +1,36 @@
package org.gcube.contentmanagement.blobstorage.report;
/**
* Generic interface for accounting report
* @author Roberto Cirillo (ISTI-CNR)
*
*/
@Deprecated
public interface Report {
/**
* Set generic properties of report
* @param resourceType
* @param consumerId
* @param resourceOwner
* @param resourceScope
* @return
*/
public void init(String consumerId, String resourceScope);
/**
* set start time of the operation
* @return
*/
public void timeUpdate();
/**
* Set end time of operation and other specific properties
* @return
*/
public void ultimate(String owner, String uri, String operation, String size );
/**
* send report
* @return
*/
public void send();
}

@ -0,0 +1,28 @@
package org.gcube.contentmanagement.blobstorage.report;
/**
* Void implementation of Report interface
* @author Roberto Cirillo (ISTI-CNR)
*
*/
@Deprecated
public class ReportAccountingImpl implements Report {
@Override
public void init(String consumerId, String resourceScope) {
}
@Override
public void timeUpdate() {
}
@Override
public void ultimate(String owner, String uri, String operation, String size ) {
}
@Override
public void send() {
}
}

@ -0,0 +1,8 @@
package org.gcube.contentmanagement.blobstorage.report;
@Deprecated
public class ReportConfig {
// /** Report type - used by : Report factory class */
}

@ -0,0 +1,42 @@
package org.gcube.contentmanagement.blobstorage.report;
/**
* Report Exception class
* @author Roberto Cirillo (ISTI-CNR)
*
*/
public class ReportException extends Exception {
/**
*
*/
private static final long serialVersionUID = -7852250665598838026L;
private Exception exc = null;
/** The no-arg constructor */
public ReportException() {
}
/**
* Construct a ReportException with an error message
* @param message the error message
*/
public ReportException(String message) {
super(message);
}
public ReportException (Exception e)
{
this.setExc(e);
}
public Exception getExc() {
return exc;
}
public void setExc(Exception exc) {
this.exc = exc;
}
}

@ -0,0 +1,42 @@
package org.gcube.contentmanagement.blobstorage.report;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
/**
*
* @author Roberto Cirillo (ISTI-CNR)
*
*/
public class ReportFactory {
final static Logger logger=LoggerFactory.getLogger(ReportFactory.class);
/**
* <p> Instantiate the class specified by user </p>
* @return the Dao class
* @throws DAOException
*/
public static Report getReport(int ReportType) throws ReportException {
Report report = null;
try {
switch(ReportType) {
case Costants.ACCOUNTING_TYPE :
report = new ReportAccountingImpl();
break;
default :
throw new ReportException("MyDAOFactory.getDAO: ["+ReportType+"] is an UNKNOWN TYPE !");
}
logger.trace("ReportFactory.getDao : returning class ["+report.getClass().getName()+"]...");
} catch (Exception e) {
e.printStackTrace();
throw new ReportException("ReportFactory.getReport: Exception while getting Report type : \n" + e.getMessage());
}
logger.trace("MyReportFactory.getReport : returning class ["+report.getClass().getName()+"]...");
return report;
}
}

@ -0,0 +1,14 @@
package org.gcube.contentmanagement.blobstorage.resource;
/**
* define the kind of access to storage manager
* private: The file uploaded are visibility limited at the owner
* shared: the visibility is limited for all user that have the same serviceClass and serviceName
* public: the visibility is limited to all the infrastructured area
*
* @author Roberto Cirillo (ISTI-CNR)
*
*/
public enum AccessType {
PUBLIC, SHARED, PRIVATE
}

@ -0,0 +1,12 @@
package org.gcube.contentmanagement.blobstorage.resource;
/**
* Define the Memory type used for storage backend
* @author Roberto Cirillo (ISTI-CNR)
*
*/
public enum MemoryType {
PERSISTENT, VOLATILE, BOTH
}

@ -0,0 +1,694 @@
package org.gcube.contentmanagement.blobstorage.resource;
import java.io.InputStream;
import java.io.OutputStream;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.LOCAL_RESOURCE;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Class that define a file entity object. This entity, contains file properties and metadata.
* This type of resource is builded by ServiceEngine class and used by the TransportManager for requests to the remote System
* This class contains also the definition of the current operation:
* @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition
*
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class MyFile {
// file name
private String name;
// owner
private String owner;
// payload Terrastore
private byte[] content;
// local path
private String localPath;
// remote path
private String remotePath;
// absolute remote path
private String absoluteRemotePath;
// num of chunks client side Terrastore
private int numChunks;
// name of the key in the remote bucket Terrastore
private String key;
// local directory
private String localDir;
// inputStream of the resource
private InputStream inputStream;
//outputStream of the resource
private OutputStream outputStream;
// type of stream
private String type;
//if true the file is locked
private boolean lock;
// the key for unlocked the file
private String lockedKey;
private String remoteDir;
private long lifeTime;
private String id;
private String id2;
private long size;
private String extension;
private String creationTime;
// parameters for GCube instance Url calculation
private String serviceName;
private String serviceClass;
private String ownerGcube;
private String gcubeScope;
private AccessType gcubeAccessType;
private MemoryType gcubeMemoryType;
/**
* define the operation type on the current resource
*/
private OperationDefinition operation;
private String resolverHost;
private boolean forceCreation;
private String mimeType;
private String genericPropertyField;
private String genericPropertyValue;
private String passPhrase;
private String writeConcern;
private String readPreference;
private String rootPath;
private boolean replace=false;
final Logger logger = LoggerFactory.getLogger(MyFile.class);
public MyFile(boolean lock){
setLock(lock);
}
/**
* set some properties on the current resource
* @param author author name
* @param name name of the file
* @param pathClient local path of the file
*/
public MyFile(String author, String name, String pathClient, MemoryType memoryType){
this.setOwner(author);
this.setName(name);
this.setLocalPath(pathClient);
setGcubeMemoryType(memoryType);
}
/**
* set some properties on the current resource
* @param author author name
* @param name name of the file
* @param pathClient local path of the file
* @param pathServer remote path of the file
*/
public MyFile(String author, String name, String pathClient, String pathServer, MemoryType memoryType){
this.setOwner(author);
this.setName(name);
this.setLocalPath(pathClient);
this.setRemotePath(pathServer);
setGcubeMemoryType(memoryType);
}
public MyFile(MemoryType memoryType) {
setGcubeMemoryType(memoryType);
}
/**
* build a new object with only the name setted
* @param name file name
*/
public MyFile(String name, MemoryType memoryType){
setName(name);
setGcubeMemoryType(memoryType);
}
/**
* get number of chunks if the file is splitted in chunks
* @return number of chunks
*/
public int getNumChunks() {
return numChunks;
}
/**
* set the number of file chunks. default is 1
* @param numChunks
*/
public void setNumChunks(int numChunks) {
this.numChunks = numChunks;
}
/**
* get the local path of the resource
* @return local path
*/
public String getLocalPath() {
return localPath;
}
/**
* set the local path of the resource
* @param path the absolute path of the resource
*/
public void setLocalPath(String path) {
this.localPath = path;
}
/**
* get the file name
* @return file name
*/
public String getName() {
return name;
}
/**
* set the file name
* @param name file name
*/
public void setName(String name) {
this.name = name;
}
/**
* get the file owner
* @return file owner
*/
public String getOwner() {
return owner;
}
/**
* set the file owner
* @param author file owner
*/
public void setOwner(String author) {
this.owner = author;
}
/**
* get the file payload or null
*
* @return a byte array that contains the file payload
*/
public byte[] getContent() {
return content;
}
/**
* set the payload file
* @param currentChunk payload file
*/
public void setContent(byte[] currentChunk) {
this.content = currentChunk;
}
/**
* used only for chunk files. indicates the name of the current chunk
* @return the name of the current chunk
*/
public String getKey() {
return key;
}
/**
* used only for chunk files. indicates the name of the current chunk
* @param key chunk name
*/
public void setKey(String key) {
this.key = key;
}
/**
* returns a copy of the current resource
* @return the file copy
*/
public MyFile copyProperties(){
MyFile dest=new MyFile(getGcubeMemoryType());
dest.setOwner(getOwner());
dest.setLocalDir(this.getLocalDir());
dest.setRemoteDir(this.getRemoteDir());
dest.setKey(this.key);
dest.setName(this.name);
dest.setNumChunks(this.numChunks);
dest.setLocalPath(this.localPath);
dest.setRemotePath(this.remotePath);
return dest;
}
/**
* get the remote path of the resource
* @return remote path
*/
public String getRemotePath() {
return remotePath;
}
/**
* set the remote path of the resource
* @param pathServer remote path
*/
public void setRemotePath(String pathServer) {
this.remotePath = pathServer;
}
/**
* get the inputStream of the resource
* @return inputStream of the resource
*/
public InputStream getInputStream() {
return inputStream;
}
/**
* set the inputStream of the resource
* @param inputStream inputStream of the resource
*/
public void setInputStream(InputStream inputStream) {
this.inputStream = inputStream;
}
/**
* get the outputStream of the resource
* @return outputStream associated to the resource
*/
public OutputStream getOutputStream() {
return outputStream;
}
/**
* set the outputStream of the resource
* @param outputStream outputstream associated to the resource
*/
public void setOutputStream(OutputStream outputStream) {
this.outputStream = outputStream;
}
/**
* if the resource will be locked
* @return true if is lock
*/
public boolean isLock() {
return lock;
}
/**
* set locking on the resource
* @param lock
*/
public void setLock(boolean lock) {
this.lock = lock;
}
/**
* get the object type of the resource
* @return the class type of the resource
*/
public String getType() {
return type;
}
/**
* set the object type of the resource
* @param type
*/
public void setType(String type) {
this.type = type;
}
/**
* get the local direcotry where is the file
* @return the local directory
*/
public String getLocalDir() {
return localDir;
}
/**
* set the local direcotry where is the file or the origin directory in case of move or copy operations
* @param localDir
*/
public void setLocalDir(String localDir) {
this.localDir = localDir;
}
/**
* get the remote directory where the resource will be stored or the destination directory in case of copy, move operations
* @return the remote directory
*/
public String getRemoteDir() {
return remoteDir;
}
/**
* set the remote directory where the resource will be stored
* @param remoteDir the remote directory
*/
public void setRemoteDir(String remoteDir) {
this.remoteDir = remoteDir;
}
/**
* get the lock key or null
* @return the lock key
*/
public String getLockedKey() {
return lockedKey;
}
/**
* set the lock key
* @param lockedKey lock key
*/
public void setLockedKey(String lockedKey) {
this.lockedKey = lockedKey;
}
/**
* get the serviceName associated to the resource. This is need for build the remote root path.
* @return the serviceName
*/
public String getServiceName() {
return serviceName;
}
/**
* set the serviceName associated to the resource. This is need for build the remote root path.
* @param serviceName serviceName associated to the resource
*/
public void setServiceName(String serviceName) {
this.serviceName = serviceName;
}
/**
* get the serviceClass associated to the resource. This is need for build the remote root path.
* @return service class
*/
public String getServiceClass() {
return serviceClass;
}
/**
* set the serviceClass associated to the resource. This is need for build the remote root path.
* @param serviceClass serviceClass associated to the resource
*/
public void setServiceClass(String serviceClass) {
this.serviceClass = serviceClass;
}
/**
* get the file owner
* @return the file owner
*/
public String getOwnerGcube() {
return ownerGcube;
}
/**
* set the file owner
* @param ownerGcube file owner
*/
public void setOwnerGcube(String ownerGcube) {
this.ownerGcube = ownerGcube;
}
/**
* get gCube scope, is need for build the remote root path
* @return gcube scope string
*/
public String getGcubeScope() {
return gcubeScope;
}
/**
* set the gCube scope
* @param gcubeScope gcube scope
*/
public void setGcubeScope(String gcubeScope) {
this.gcubeScope = gcubeScope;
}
/**
* get the gcube accessType: PRIVATE, SHARED, PUBLIC
* @return gcube access type
*
*/
public AccessType getGcubeAccessType() {
return gcubeAccessType;
}
/**
* set the gcube accessType: PRIVATE, SHARED, PUBLIC
* @param gcubeAccessType
*/
public void setGcubeAccessType(AccessType gcubeAccessType) {
this.gcubeAccessType = gcubeAccessType;
}
/**
* get the gcube memoryType: PERSISTENT, VOLATILE
* @return the memory type
*/
public MemoryType getGcubeMemoryType() {
return gcubeMemoryType;
}
/**
* set the gcube memoryType: PERSISTENT, VOLATILE
* @param gcubeMemoryType
*/
public void setGcubeMemoryType(MemoryType gcubeMemoryType) {
this.gcubeMemoryType = gcubeMemoryType;
}
/**
* set the kind of operation
* @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition
* @param operation operation type
*/
public void setOperation(OperationDefinition operation) {
this.operation = operation;
}
/**
* set the kind of operation
* @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition#setOperation(OPERATION)
* @param operation
*/
public void setOperation(OPERATION operation) {
this.operation = new OperationDefinition(operation);
}
/**
* get the kind of operation
* @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition
* @return the operation definition on this resource
*/
public OperationDefinition getOperationDefinition(){
return operation;
}
/**
* get the local resource identifier
* @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition#getLocalResource()
* @return the local Resource identifier
*/
public LOCAL_RESOURCE getLocalResource() {
return operation.getLocalResource();
}
/**
* set the local resource identifier
* @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition#setLocalResource(LOCAL_RESOURCE)
* @param localResource local resource identifier
*/
public void setLocalResource(LOCAL_RESOURCE localResource) {
if(operation==null)
operation=new OperationDefinition(OPERATION.VOID);
operation.setLocalResource(localResource);
}
/**
* get the remote resource identifier
* @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition#getRemoteResource()
* @return the remote Resource identifier
*/
public REMOTE_RESOURCE getRemoteResource() {
return operation.getRemoteResource();
}
/**
* set the remote resource identifier
* @see org.gcube.contentmanagement.blobstorage.resource.OperationDefinition#setRemoteResource(REMOTE_RESOURCE)
* @param remoteResource local resource identifier */
public void setRemoteResource(REMOTE_RESOURCE remoteResource) {
if(operation==null)
operation=new OperationDefinition(OPERATION.VOID);
operation.setRemoteResource(remoteResource);
}
public String getAbsoluteRemotePath() {
return absoluteRemotePath;
}
public void setAbsoluteRemotePath(String absoluteRemotePath) {
this.absoluteRemotePath = absoluteRemotePath;
}
public long getLifeTime() {
return lifeTime;
}
public void setLifeTime(long lifeTime) {
this.lifeTime = lifeTime;
}
public OperationDefinition getOperation() {
return operation;
}
public String getId() {
return id;
}
public void setId(String id) {
this.id = id;
}
public long getSize() {
return size;
}
public void setSize(long size) {
this.size = size;
}
public String getExtension() {
return extension;
}
public void setExtension(String extension) {
this.extension = extension;
}
public String getCreationTime() {
return creationTime;
}
public void setCreationTime(String creationTime) {
this.creationTime = creationTime;
}
public void setResolverHost(String resolverHost) {
this.resolverHost=resolverHost;
}
public String getResolverHOst() {
return resolverHost;
}
public void forceCreation(boolean forceCreation) {
this.forceCreation=forceCreation;
}
public boolean isForceCreation(){
return this.forceCreation;
}
public String getMimeType(){
return this.mimeType;
}
public void setMimeType(String mime) {
this.mimeType=mime;
}
public String getGenericPropertyField() {
return genericPropertyField;
}
public void setGenericPropertyField(String genericPropertyField) {
this.genericPropertyField = genericPropertyField;
}
public String getGenericPropertyValue() {
return genericPropertyValue;
}
public void setGenericPropertyValue(String genericPropertyValue) {
this.genericPropertyValue = genericPropertyValue;
}
public String getPassPhrase() {
return passPhrase;
}
public void setPassPhrase(String passPhrase) {
this.passPhrase = passPhrase;
}
public String getWriteConcern() {
return writeConcern;
}
public void setWriteConcern(String writeConcern) {
this.writeConcern = writeConcern;
}
public String getReadPreference() {
return readPreference;
}
public void setReadPreference(String readConcern) {
this.readPreference = readConcern;
}
public void setRootPath(String rootPath) {
this.rootPath=rootPath;
}
public String getRootPath(){
return rootPath;
}
public void setReplaceOption(boolean replace) {
this.replace=replace;
}
public boolean isReplace(){
return replace;
}
public void print(){
logger.info("\n Object: \n\t path: "+this.getRemotePath()+ "\n\t id: "+this.getId());
}
public String getId2() {
return id2;
}
public void setId2(String id2) {
this.id2 = id2;
}
}

@ -0,0 +1,140 @@
package org.gcube.contentmanagement.blobstorage.resource;
/**
* Defines the identity of a remote operation.
* The enumerations: OPERATION, LOCAL_RESOURCE and REMOTE_RESOURCE, contains all you need to identify the kind of operation
* ex:
*
*
* if the operation is defined in this way:
*
*
* OPERATION: UPLOAD;
* LOCAL_RESOURCE: PATH;
* REMOTE_RESOURCE: PATH;
*
*
* It means that the client would be upload a file that have an absolute local path defined in pathClient field,
* on the remote location identifies by pathServer field of the resource MyFile
* @see org.gcube.contentmanagement.blobstorage.resource.MyFile
*
* @author Roberto Cirillo (ISTI-CNR)
*
*/
public class OperationDefinition {
/**
* Indicates the type of current operation
*
*/
public enum OPERATION {UPLOAD,DOWNLOAD, REMOVE, REMOVE_DIR, SHOW_DIR, GET_URL, UNLOCK, GET_TTL, RENEW_TTL, GET_SIZE, VOID, LOCK, COPY, COPY_DIR, LINK, MOVE, MOVE_DIR, GET_META_FILE, GET_TOTAL_USER_VOLUME, GET_USER_TOTAL_ITEMS, GET_FOLDER_TOTAL_VOLUME, GET_FOLDER_TOTAL_ITEMS, GET_FOLDER_LAST_UPDATE, CLOSE, GET_META_INFO, SET_META_INFO, GET_HTTP_URL, GET_HTTPS_URL, GET_REMOTE_PATH, EXIST, DUPLICATE, SOFT_COPY}
/**
* Indicates how the local resource is identifies
*
*/
public enum LOCAL_RESOURCE {INPUT_STREAM, OUTPUT_STREAM, PATH, VOID, ID}
/**
* Indicates how the remote resource is identifies
*
*/
public enum REMOTE_RESOURCE {INPUT_STREAM, OUTPUT_STREAM, PATH, VOID, PATH_FOR_INPUT_STREAM, PATH_FOR_OUTPUTSTREAM, ID, DIR}
/**
* Indicates the type of current operation
*/
private OPERATION operation;
/**
* Indicates how the local resource is identifies
*/
private LOCAL_RESOURCE localResource;
/**
* Indicates how the remote resource is identifies
*/
private REMOTE_RESOURCE remoteResource;
/**
* Set the complete operation definition
* @param op operation type
* @param lr local resource type
* @param rr remote resource type
*/
public OperationDefinition(OPERATION op, LOCAL_RESOURCE lr, REMOTE_RESOURCE rr){
setOperation(op);
setLocalResource(lr);
setRemoteResource(rr);
}
/**
* Set the operation definition without specifies the loal resource and the remote resource
* @param op operation type
*/
public OperationDefinition(OPERATION op){
setOperation(op);
setLocalResource(LOCAL_RESOURCE.VOID);
setRemoteResource(REMOTE_RESOURCE.VOID);
}
/**
* Get the operation type
* @return the operation type
*/
public OPERATION getOperation() {
return operation;
}
/**
* set the operation type
* @param operation operation type
*/
public void setOperation(OPERATION operation) {
this.operation = operation;
}
/**
* get the local resource type
* @return the local resource type
*/
public LOCAL_RESOURCE getLocalResource() {
return localResource;
}
/**
* set the local resource type
* @param localResource local resource type
*/
public void setLocalResource(LOCAL_RESOURCE localResource) {
this.localResource = localResource;
}
/**
* get the remote resource type
* @return the remote resource type
*/
public REMOTE_RESOURCE getRemoteResource() {
return remoteResource;
}
/**
* set the remote resource type
* @param remoteResource
*/
public void setRemoteResource(REMOTE_RESOURCE remoteResource) {
this.remoteResource = remoteResource;
}
@Override
public String toString() {
return "OperationDefinition [operation=" + operation
+ ", localResource=" + localResource + ", remoteResource="
+ remoteResource + "]";
}
}

@ -0,0 +1,76 @@
package org.gcube.contentmanagement.blobstorage.resource;
/**
* Class that define a entity object (a file or a directory).
* This entity, contains file properties and methods for the client queries
* This type of resource is builded by Transportmanager for answer the client
* ex: if the customer asks for the contents of a remote folder. It will be returned a List of StorageObject
*
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class StorageObject {
private String type;
private String name;
private String owner;
private String creationTime;
private String id;
public String getCreationTime() {
return creationTime;
}
public void setCreationTime(String creationTime) {
this.creationTime = creationTime;
}
public StorageObject(String name, String type){
setType(type);
setName(name);
}
public StorageObject(String name, String type, String owner, String creationTime){
setType(type);
setName(name);
setOwner(owner);
setCreationTime(creationTime);
}
public boolean isDirectory() {
return type.equalsIgnoreCase("dir");
}
public boolean isFile() {
return type.equalsIgnoreCase("file");
}
private void setType(String type) {
this.type = type;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getOwner() {
return owner;
}
public void setOwner(String owner) {
this.owner = owner;
}
public void setId(String id) {
this.id=id;
}
public String getId(){
return this.id;
}
}

@ -0,0 +1,262 @@
package org.gcube.contentmanagement.blobstorage.service;
import org.gcube.contentmanagement.blobstorage.service.impl.AmbiguousResource;
import org.gcube.contentmanagement.blobstorage.service.impl.LocalResource;
import org.gcube.contentmanagement.blobstorage.service.impl.RemoteResource;
import org.gcube.contentmanagement.blobstorage.service.impl.RemoteResourceBoolean;
import org.gcube.contentmanagement.blobstorage.service.impl.RemoteResourceComplexInfo;
import org.gcube.contentmanagement.blobstorage.service.impl.RemoteResourceFolderInfo;
import org.gcube.contentmanagement.blobstorage.service.impl.RemoteResourceInfo;
import org.gcube.contentmanagement.blobstorage.service.impl.RemoteResourceSource;
/**
* User interface.
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public interface IClient {
/**
*
* Method for upload
*/
public abstract LocalResource get();
/**
*
* Method for locking a remote resource (file)
*/
public abstract AmbiguousResource lock();
/**
*
* Method for ask file dimension
*/
public abstract RemoteResourceInfo getSize();
/**
* Method for the download
* @param replace indicates if the file must be replaced if this is present in the storage
* @return LocalResource object
*/
public abstract LocalResource put(boolean replace);
/**
* Method for the download
* @param replace indicates if the file must be replaced if this is present in the storage
* @param file mimetype
* @return LocalResource object
*/
public abstract LocalResource put(boolean replace, String mimeType);
/**
*
* Method for unlocking a remote resource
*/
public abstract AmbiguousResource unlock(String key);
/**
* TTl query
* @return the TTL left in ms for a remote resource if it is locked
*/
public abstract RemoteResourceInfo getTTL();
/**
*
* Remove a remote resource from the storage Sytem
* @return RemoteResource object
*/
public abstract RemoteResource remove();
/**
*Show all the objects in a specified remote folder
* @return RemoteResource object
*/
public RemoteResource showDir();
/**
*
* remove a folder from the storage System
* @return RemoteResource object
*/
public RemoteResource removeDir();
/**
* renew a TTL for a specific resource. This operation is allowed a limited number of times
* @return RemoteResourceInfo object
*/
public RemoteResourceInfo renewTTL(String key);
/**
*
* @return RemoteResource object
*/
RemoteResource getUrl();
/**
* Link a file from remote resource to another new remote resource. If the new remote resource exist,
* this resource will be removed and replaced with the new resource
* @return RemoteResource object
*/
public RemoteResourceSource linkFile();
/**
* Copy a file from remote resource to another new remote resource. If the new remote resource exist,
* this resource will be removed and replaced with the new resource
* @return RemoteResource object
*/
public RemoteResourceSource copyFile();
/**
* Copy a file from remote resource to another new remote resource. If the new remote resource exist,
* this resource will be removed and replaced with the new resource
* @return RemoteResource object
*/
public RemoteResourceSource copyFile(boolean replace);
public RemoteResourceSource copyFile(String backendType);
public RemoteResourceSource copyFile(String backendType, boolean replaceOption);
/**
* Move a file from remote resource to another new remote resource. If the new remote resource exist,
* this resource will be removed and replaced with the new resource
* @return RemoteResource object
*/
public RemoteResourceSource moveFile();
public LocalResource get(String backendType);
public RemoteResourceInfo getSize(String backendType);
public RemoteResourceFolderInfo getFolderTotalVolume();
public RemoteResourceFolderInfo getFolderTotalItems();
public String getTotalUserVolume();
public String getUserTotalItems();
public RemoteResourceFolderInfo getFolderLastUpdate();
public RemoteResource remove(String backendType);
public RemoteResource showDir(String backendType);
public RemoteResource removeDir(String backendType);
public RemoteResource getUrl(String backendType);
public RemoteResourceInfo getTTL(String backendType);
public AmbiguousResource unlock(String key, String backendType);
public RemoteResourceInfo renewTTL(String key, String backendType);
public RemoteResourceSource linkFile(String backendType);
//public RemoteResourceSource copyFile(String backendType);
public RemoteResource duplicateFile();
public RemoteResource duplicateFile(String backendType);
public RemoteResourceSource softCopy();
public RemoteResourceSource softCopy(boolean replace);
public RemoteResourceSource softCopy(String backendType);
public RemoteResourceSource softCopy(String backendType, boolean replaceOption);
public RemoteResourceSource moveFile(String backendType);
public RemoteResourceSource moveDir(String backendType);
public RemoteResourceSource moveDir();
public RemoteResourceSource copyDir(String backendType);
public RemoteResourceSource copyDir();
public RemoteResourceComplexInfo getMetaFile();
/**
* close the connections to backend storage system
*/
public void close();
public RemoteResource getUrl(boolean forceCreation);
public RemoteResource getUrl(String backendType, boolean forceCreation);
public RemoteResource getMetaInfo(String field);
public RemoteResource getMetaInfo(String field, String backendType);
public RemoteResource setMetaInfo(String field, String value);
public RemoteResource setMetaInfo(String field, String value, String backendType);
public String getId(String id);
public RemoteResource getRemotePath();
public RemoteResource getHttpUrl(boolean forceCreation);
public RemoteResource getHttpUrl(String backendType, boolean forceCreation);
public RemoteResource getHttpUrl(String backendType);
public RemoteResource getHttpUrl();
public RemoteResource getHttpsUrl(boolean forceCreation);
public RemoteResource getHttpsUrl(String backendType, boolean forceCreation);
public RemoteResource getHttpsUrl(String backendType);
public RemoteResource getHttpsUrl();
public void setWriteConcern(String write);
public void setReadConcern(String read);
public void setOwner(String owner);
public void setDbNames(String [] dbs);
/**
*
* Method for checking if the file exist
*/
public abstract RemoteResourceBoolean exist();
public abstract RemoteResourceBoolean exist(String backendType);
}

@ -0,0 +1,188 @@
package org.gcube.contentmanagement.blobstorage.service.directoryOperation;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class contains all the coding and decoding for a bucket name:
* bucketId: TO DO
* bucketName (if is a dir):
* bucketName (if is a file):
*
* @author Roberto Cirillo (ISTI - CNR)
*/
public class BucketCoding {
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(BucketCoding.class);
/**
* Coding the name of a file object in a file-type bucket
* @param path the path on the cluster
* @param author the file's owner
* @return the bucketName coded
*/
public String bucketFileCoding(String path, String rootArea) {
logger.debug("Coding name: path: "+path+" rootArea "+rootArea);
// if(!ObjectId.isValid(path)){
String absolutePath =path;
if(rootArea.length()>0){
absolutePath = mergingPathAndFile(rootArea, path);
path=absolutePath;
}
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){
absolutePath=absolutePath.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR);
}
logger.debug("coding name done");
// }
return path;
}
/**
* rootArea + path formed an absolute path
*
* @param path remote relative path
* @param rootArea remote root path
* @return absolute remote path
*/
public String mergingPathAndDir(String rootArea, String path ) {
char c=rootArea.charAt(rootArea.length()-1);
if((c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){
rootArea=rootArea.substring(0, rootArea.length()-1);
}
c=path.charAt(0);
if(!(c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){
path=Costants.FILE_SEPARATOR+path;
}
c=path.charAt(path.length()-1);
if(!(c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){
path=path+Costants.FILE_SEPARATOR;
}
String bucketName=rootArea+path;
return bucketName;
}
/**
* check and correct the directory format
* @param path remote dir path
* @return remote dir path
*/
public String checkSintaxDir(String path ) {
char c=path.charAt(0);
if(!(c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){
path=Costants.FILE_SEPARATOR+path;
}
c=path.charAt(path.length()-1);
if(!(c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){
path=path+Costants.FILE_SEPARATOR;
}
String bucketName=path;
return bucketName;
}
/**
* rootArea + path formed an absolute path
*
* @param path relative path
* @param rootArea root path
* @return complete path
*/
private String mergingPathAndFile(String rootArea, String path ) {
char c=rootArea.charAt(rootArea.length()-1);
if((c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){
rootArea=rootArea.substring(0, rootArea.length()-1);
}
if(path == null) return null;
c=path.charAt(0);
if(!(c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){
path=Costants.FILE_SEPARATOR+path;
}
c=path.charAt(path.length()-1);
if((c+"").equalsIgnoreCase(Costants.FILE_SEPARATOR)){
path=path.substring(0, path.length()-1);
}
String bucketName=rootArea+path;
return bucketName;
}
/**
*
* Decoding the name of a file object in a file-type bucket
* @param key relative remote path
* @return complete remote path
*/
public String bucketFileDecoding(String key, String rootArea) {
String nameDecoded=key;
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){
String[] splits=key.split(Costants.SEPARATOR);
nameDecoded=splits[splits.length-1];
}
if (logger.isDebugEnabled()) {
logger.debug("decodeBucketFile(String) - end");
}
return nameDecoded;
}
/**
* Coding the name of a directory object in a directory-type bucket
* @param author file owner
* @param dir remote directory
* @return the complete remote path
*/
public String bucketDirCoding(String dir, String rootArea) {
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){
dir=dir.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR);
}
dir=mergingPathAndDir(rootArea, dir);
return dir;
}
/**
* Decoding the name in a directory-type bucket.
* In a directory type bucket you can found or a file object or a directory object
* @param key remote path
* @return the remote path
*/
public String bucketDirDecoding(String key, String rootArea) {
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){
String lastChar=key.substring(key.length()-3);
// if is a dir object
if(lastChar.equalsIgnoreCase(Costants.SEPARATOR)){
String[] extractPath=key.split(Costants.SEPARATOR);
String[] rootPath= rootArea.split(Costants.FILE_SEPARATOR);
key="";
for(int i=rootPath.length;i<extractPath.length;i++){
key=key+Costants.FILE_SEPARATOR+extractPath[i];
}
key=key+Costants.FILE_SEPARATOR;
if(logger.isInfoEnabled())
logger.info("found directory: "+key);
// if is a file object
}else{
if(logger.isDebugEnabled())
logger.debug("found object coded: "+key);
key=bucketFileDecoding(key, rootArea);
if(logger.isInfoEnabled())
logger.info("found object: "+key);
}
}
return key;
}
/**
* Return true if key is a file-bucket object else (if is a directory-bucket object) return false
* @param key remote path
* @return remote path
*/
public boolean isFileObject(String key) {
String lastChar=key.substring(key.length()-3);
if(lastChar.equalsIgnoreCase(Costants.SEPARATOR))
return false;
return true;
}
}

@ -0,0 +1,237 @@
package org.gcube.contentmanagement.blobstorage.service.directoryOperation;
import java.net.UnknownHostException;
import java.util.Iterator;
import java.util.Map;
import java.util.Set;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.StorageObject;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.MongoException;
/**
*
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class DirectoryBucket {
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(DirectoryBucket.class);
String author;
String fileName;
String path;
String[] server;
String user, password;
public DirectoryBucket(String[] server, String user, String password, String path, String author){
if(logger.isDebugEnabled())
logger.debug("DirectoryBucket PATH: "+path);
//coding the path
this.path=path;
this.author=author;
this.server=server;
this.user=user;
this.password=password;
}
/**
* generate the names of the upper tree directory buckets
* @return The list of tree directory buckets: ex: if the path is /this/is/my/path/myFile.txt
* the list will contains: /this, /this/is, /this/is/my, this/is/my/path
*/
public String[] retrieveBucketsName(String path, String rootArea){
if (logger.isDebugEnabled()) {
logger.debug("retrieveBucketsName() - start");
}
String pathCoded=new BucketCoding().mergingPathAndDir(rootArea, path);
String[] splits=pathCoded.split(Costants.FILE_SEPARATOR);
String[] buckets=new String[splits.length];
for(int i=0;i<splits.length;i++){
if(logger.isDebugEnabled())
logger.debug("splits["+i+"] = "+splits[i]);
if(i>0){
if(i==(splits.length-1)){
if(logger.isDebugEnabled())
logger.debug("splits["+i+"]= "+splits[i]);
fileName=buckets[i-1]+splits[i];
buckets[i]=buckets[i-1]+splits[i];
if(logger.isDebugEnabled())
logger.debug("fileName: "+fileName);
break;
}else{
buckets[i]=buckets[i-1]+splits[i]+Costants.SEPARATOR;
}
}else{
buckets[i]=Costants.SEPARATOR;
}
if (logger.isDebugEnabled())
logger.debug("buckets["+i+"]= "+buckets[i]);
}
if (logger.isDebugEnabled()) {
logger.debug("retrieveBucketsName() - end");
}
return buckets;
}
/**
* remove a file on a remote directory
* @param bucket remote file to remove
*/
@Deprecated
public void removeKeysOnDirBucket(MyFile resource, String bucket, String rootArea, String backendType, String[] dbNames){
if(logger.isDebugEnabled())
logger.debug("CHECK REMOVE: "+bucket);
String[] bucketList=null;
bucketList=retrieveBucketsName(path, rootArea);
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, resource.getGcubeMemoryType(), dbNames, resource.getWriteConcern(), resource.getReadPreference());
// TerrastoreClient client=new TerrastoreClient( new OrderedHostManager(Arrays.asList(server)), new HTTPConnectionFactory());
for(int i=0;i<bucketList.length;i++){
if(logger.isDebugEnabled())
logger.debug("REMOVE: check "+bucketList[i]);
if(bucketList[i].equalsIgnoreCase(bucket)){
if(logger.isDebugEnabled())
logger.debug("Removing key file: "+bucketList[i]+" from dir: "+bucketList[i-1]);
try {
tm.getValues(resource, bucketList[i-1], DirectoryEntity.class);
} catch (MongoException e) {
tm.close();
e.printStackTrace();
}
}
}
}
/**
* remove a remote directory and all the files that the remote directory contains
* @param bucket
*/
public String removeDirBucket(MyFile resource, String bucket, String rootArea, String backendType, String[] dbNames){
if(logger.isDebugEnabled())
logger.debug("CHECK REMOVE: "+bucket);
String[] bucketList=null;
BucketCoding bc=new BucketCoding();
String bucketDirCoded =bc.bucketDirCoding(bucket, rootArea);
if(logger.isDebugEnabled())
logger.debug("bucketDir Coded: "+bucketDirCoded);
bucketList=retrieveBucketsName(bucket, rootArea);
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, resource.getGcubeMemoryType(), dbNames, resource.getWriteConcern(),resource.getReadPreference());
for(int i=0;i<bucketList.length;i++){
if(logger.isDebugEnabled())
logger.debug("REMOVE: check "+bucketList[i]+" bucketDirCoded: "+bucketDirCoded );
if(bucketDirCoded.contains(bucketList[i])){
Map<String, StorageObject> map=null;
try {
map = tm.getValues(resource, bucketList[i], DirectoryEntity.class);
} catch (MongoException e) {
tm.close();
e.printStackTrace();
}
Set<String> keys=map.keySet();
for(Iterator<String> it=keys.iterator(); it.hasNext();){
String key=(String)it.next();
if(key.equalsIgnoreCase(bucketDirCoded)){
if(logger.isDebugEnabled())
logger.debug("key matched: "+key+" remove");
//recursively remove
try {
map=tm.getValues(resource, key, DirectoryEntity.class);
} catch (MongoException e) {
tm.close();
e.printStackTrace();
}
keys=map.keySet();
for(Iterator<String> it2=keys.iterator(); it2.hasNext();){
String key2=(String)it2.next();
if(logger.isDebugEnabled())
logger.debug("the bucket: "+key+" have a son: "+key2);
if(bc.isFileObject(key2)){
if(logger.isDebugEnabled()){
logger.debug("remove "+key2+" in the bucket: "+key);
}
if(logger.isDebugEnabled())
logger.debug("remove all keys in the bucket: "+key2);
try {
tm.removeRemoteFile(key2, resource);
} catch (UnknownHostException e) {
tm.close();
e.printStackTrace();
} catch (MongoException e) {
tm.close();
e.printStackTrace();
}
}else{
if(logger.isDebugEnabled())
logger.debug(key2+" is a directory");
String bucketDecoded=bc.bucketDirDecoding(key2, rootArea);
removeDirBucket(resource, bucketDecoded, rootArea, backendType, dbNames);
}
}
if(logger.isDebugEnabled())
logger.debug("remove "+key+" in the bucket: "+bucketList[i]);
if(logger.isDebugEnabled())
logger.debug("remove all keys in the bucket: "+key);
try {
tm.removeRemoteFile(key, resource);
} catch (UnknownHostException e) {
tm.close();
e.printStackTrace();
} catch (MongoException e) {
tm.close();
e.printStackTrace();
}
}
}
}
}
return bucketDirCoded;
}
/**
* recursively search on directories buckets, return a key if found else return null
* @param name fileName
* @param bucketCoded bucketName coded
* @param tm a client for the cluster
*/
public String searchInBucket(MyFile resource, String name, String bucketCoded,
TransportManager tm, String rootArea) {
Map <String, StorageObject> dirs=null;
try{
dirs=tm.getValues(resource, bucketCoded, DirectoryEntity.class);
}catch(Exception e){
tm.close();
logger.info("object not found");
return null;
}
Set<String> set=dirs.keySet();
for(Iterator<String> it= set.iterator(); it.hasNext();){
String key=(String)it.next();
if(logger.isDebugEnabled())
logger.debug("try in "+key);
String nameDecoded = new BucketCoding().bucketFileDecoding(key, rootArea);
if(logger.isDebugEnabled())
logger.debug("name decoded: "+nameDecoded+" name searched is: "+name);
if((nameDecoded!=null ) && (nameDecoded.equalsIgnoreCase(name))){
if(logger.isDebugEnabled())
logger.debug("FOUND in "+bucketCoded+" objectId returned: "+key);
return key;
}else{
searchInBucket(resource,name, key, tm, rootArea);
}
}
return null;
}
}

@ -0,0 +1,80 @@
package org.gcube.contentmanagement.blobstorage.service.directoryOperation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
/**
* Is used only by terrastore client: Define a directory object.
* Is useful for the operations on the directory tree
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class DirectoryEntity {
private String directory;
private String name;
private String author;
private String a;
private String b;
private String c;
private String d;
private String e;
public String getDirectory() {
return directory;
}
public void setDirectory(String directory) {
this.directory = directory;
}
public DirectoryEntity(){
}
public DirectoryEntity(String dir, String author){
setDirectory(dir);
setAuthor(author);
}
public DirectoryEntity(String dir, String author, MyFile file){
setDirectory(dir);
setAuthor(author);
}
public String getAuthor() {
return author;
}
public void setAuthor(String author) {
this.author = author;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public int setGenericVariable(String name, String value){
int codeError=0;
if(a==null){
a=name+"%"+value;
}else if(b==null){
b=name+"%"+value;
}else if(c==null){
c=name+"%"+value;
}else if(d==null){
d=name+"%"+value;
}else if(e==null){
e=name+"%"+value;
}else
codeError=-1;
return codeError;
}
}

@ -0,0 +1,129 @@
package org.gcube.contentmanagement.blobstorage.service.directoryOperation;
import java.io.UnsupportedEncodingException;
import java.security.InvalidKeyException;
import java.security.NoSuchAlgorithmException;
import java.security.spec.KeySpec;
import javax.crypto.SecretKey;
import javax.crypto.SecretKeyFactory;
import javax.crypto.spec.DESKeySpec;
import javax.crypto.spec.DESedeKeySpec;
import org.gcube.common.encryption.StringEncrypter;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
/**
* This class can be used to encrypt and decrypt using DES and a given key
*@author Roberto Cirillo (ISTI-CNR)
*
*/
public class Encrypter {
private KeySpec keySpec;
private SecretKeyFactory keyFactory;
private static final String UNICODE_FORMAT = "UTF8";
@Deprecated
public Encrypter( String encryptionScheme ) throws EncryptionException
{
this( encryptionScheme, null );
}
public Encrypter( String encryptionScheme, String encryptionKey )
throws EncryptionException
{
if ( encryptionKey == null )
throw new IllegalArgumentException( "encryption key was null" );
if ( encryptionKey.trim().length() < 24 )
throw new IllegalArgumentException(
"encryption key was less than 24 characters" );
try
{
byte[] keyAsBytes = encryptionKey.getBytes( UNICODE_FORMAT );
if ( encryptionScheme.equals( Costants.DESEDE_ENCRYPTION_SCHEME) )
{
keySpec = new DESedeKeySpec( keyAsBytes );
}
else if ( encryptionScheme.equals( Costants.DES_ENCRYPTION_SCHEME ) )
{
keySpec = new DESKeySpec( keyAsBytes );
}
else
{
throw new IllegalArgumentException( "Encryption scheme not supported: "
+ encryptionScheme );
}
keyFactory = SecretKeyFactory.getInstance( encryptionScheme);
}
catch (InvalidKeyException e)
{
throw new EncryptionException( e );
}
catch (UnsupportedEncodingException e)
{
throw new EncryptionException( e );
}
catch (NoSuchAlgorithmException e)
{
throw new EncryptionException( e );
}
}
/**
* Encrypt a string
* @param unencryptedString string to encrypt
* @return encrypted string
* @throws EncryptionException
*/
public String encrypt( String unencryptedString ) throws EncryptionException
{
if ( unencryptedString == null || unencryptedString.trim().length() == 0 )
throw new IllegalArgumentException(
"unencrypted string was null or empty" );
try
{
SecretKey key = keyFactory.generateSecret( keySpec );
return StringEncrypter.getEncrypter().encrypt(unencryptedString, key);//t(unencryptedString, key);
}
catch (Exception e)
{
throw new EncryptionException( e );
}
}
/**
* decrypt a string
* @param encryptedString encrypted string
* @return decrypted string
* @throws EncryptionException
*/
public String decrypt( String encryptedString ) throws EncryptionException
{
if ( encryptedString == null || encryptedString.trim().length() <= 0 )
throw new IllegalArgumentException( "encrypted string was null or empty" );
try
{
SecretKey key = keyFactory.generateSecret( keySpec );
return StringEncrypter.getEncrypter().decrypt(encryptedString, key);
}
catch (Exception e)
{
throw new EncryptionException( e );
}
}
@SuppressWarnings("serial")
public static class EncryptionException extends Exception
{
public EncryptionException( Throwable t )
{
super( t );
}
}
}

@ -0,0 +1,33 @@
package org.gcube.contentmanagement.blobstorage.service.impl;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.LOCAL_RESOURCE;
/**
* This class is used from methods that can have both a RemoteResource or a LocalResource
*
* @author Roberto Cirillo (ISTI-CNR)
*
*/
public class AmbiguousResource extends RemoteResource {
public AmbiguousResource(MyFile file, ServiceEngine engine) {
super(file, engine);
}
/**
* define local resource
* @param path : local absolute path of resource
* @return remoteResource object
*/
public RemoteResource LFile(String path){
if(getMyFile() != null){
getMyFile().setLocalPath(path);
}else{
setMyFile(setGenericProperties("", "", path, "local"));
getMyFile().setLocalPath(path);
}
getMyFile().setLocalResource(LOCAL_RESOURCE.PATH);
return new RemoteResource(getMyFile(), getEngine());
}
}

@ -0,0 +1,139 @@
package org.gcube.contentmanagement.blobstorage.service.impl;
import java.io.InputStream;
import java.io.OutputStream;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.LOCAL_RESOURCE;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
/**
* Defines the operations for selecting a local resource.
* ex. a local path for a download operation, or a inputStream
*
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class LocalResource extends Resource{
public LocalResource(MyFile file, ServiceEngine engine) {
super(file, engine);
}
/**
* define local resource
* @param path : local absolute path of resource
* @return remoteResource object
*/
public RemoteResource LFile(String path){
if(getMyFile() != null){
getMyFile().setLocalPath(path);
}else{
setMyFile(setGenericProperties("", "", path, "local"));
getMyFile().setLocalPath(path);
}
getMyFile().setLocalResource(LOCAL_RESOURCE.PATH);
return new RemoteResource(getMyFile(), getEngine());
}
/**
* define local resource
* @param is : inputStream of resource
* @return remoteResource object
*/
public RemoteResource LFile(InputStream is) {
if(getMyFile() != null){
getMyFile().setInputStream(is);
}else{
setMyFile(new MyFile(engine.getGcubeMemoryType()));
getMyFile().setInputStream(is);
}
getMyFile().setLocalResource(LOCAL_RESOURCE.INPUT_STREAM);
return new RemoteResource(getMyFile(), getEngine());
}
/**
* define local resource
* @param os output stream of resource
* @return remoteResource object
*/
public RemoteResource LFile(OutputStream os) {
if(getMyFile() != null){
getMyFile().setOutputStream(os);
}else{
setMyFile(new MyFile(engine.getGcubeMemoryType()));
getMyFile().setOutputStream(os);
}
getMyFile().setLocalResource(LOCAL_RESOURCE.OUTPUT_STREAM);
return new RemoteResource(getMyFile(), getEngine());
}
/**
* Method that returns an inputStream of a remote resource
* @param path remote path of remote resource
* @return inputStream of remote resource identifies by path argument
*
*/
public InputStream RFileAsInputStream(String path){
file = setGenericProperties(engine.getContext(), engine.owner, path, "remote");
file.setRemotePath(path);
file.setOwner(engine.owner);
file.setType("input");
file.setLocalResource(LOCAL_RESOURCE.VOID);
file.setRemoteResource(REMOTE_RESOURCE.PATH_FOR_INPUT_STREAM);
setMyFile(file);
engine.service.setResource(getMyFile());
getRemoteObject(file, engine.primaryBackend, engine.volatileBackend);
InputStream is= file.getInputStream();
file.setInputStream(null);
return is;
}
/**
* Method that returns an inputStream of a remote resource
* @param path remote path of remote resource
* @return inputStream of remote resource identifies by path argument
*
*/
@Deprecated
public InputStream RFileAStream(String path){
file = setGenericProperties(engine.getContext(), engine.owner, path, "remote");
file.setRemotePath(path);
file.setOwner(engine.owner);
file.setType("input");
file.setLocalResource(LOCAL_RESOURCE.VOID);
file.setRemoteResource(REMOTE_RESOURCE.PATH_FOR_INPUT_STREAM);
setMyFile(file);
engine.service.setResource(getMyFile());
getRemoteObject(file, engine.primaryBackend, engine.volatileBackend);
InputStream is= file.getInputStream();
file.setInputStream(null);
return is;
}
/**
* Method that returns an outputStream of a remote resource, used for upload operation
* @param path remote path of remote resource
* @return outputStream of remote resource identifies by path argument
*
*/
public OutputStream RFileAsOutputStream(String path){
file = setGenericProperties(engine.getContext(), engine.owner, path, "remote");
file.setRemotePath(path);
file.setOwner(engine.owner);
file.setType("output");
file.setLocalResource(LOCAL_RESOURCE.VOID);
file.setRemoteResource(REMOTE_RESOURCE.PATH_FOR_OUTPUTSTREAM);
setMyFile(file);
engine.service.setResource(getMyFile());
// retrieveRemoteObject(engine.primaryBackend);
getRemoteObject(file, engine.primaryBackend, engine.volatileBackend);
OutputStream os=file.getOutputStream();
file.setOutputStream(null);
return os;
}
}

@ -0,0 +1,156 @@
package org.gcube.contentmanagement.blobstorage.service.impl;
import java.net.UnknownHostException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryEntity;
import org.gcube.contentmanagement.blobstorage.service.operation.OperationManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.resource.StorageObject;
/**
* Defines the operations for selecting a remote resource.
* ex. a remote path for a download operation.
* This selection is made for all types of operation
*
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class RemoteResource extends Resource{
public RemoteResource(MyFile file, ServiceEngine engine) {
super(file, engine);
logger.info("file gCube parameter costructor: "+file.getGcubeAccessType()+" "+file.getGcubeScope());
}
/**
* identify a remote resource by path (a file or a directory)
* @param path the remote path
* @return remote resource id
* @throws RemoteBackendException if there are runtime exception from the remote backend
*/
public String RFile(String path) throws RemoteBackendException{
return RFile(path, false);
}
/**
* identify a remote resource by path (a file or a directory)
* @param path the remote path
* @return remote resource id
* @throws RemoteBackendException if there are runtime exception from the remote backend
*/
public String RFile(String path, boolean backendTypeReturned) throws RemoteBackendException{
logger.info("file gCube parameter before: "+file.getGcubeAccessType()+" "+file.getGcubeScope());
file = setGenericProperties(engine.getContext(), engine.owner, path, "remote");
file.setRemotePath(path);
file.setOwner(engine.owner);
getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH);
setMyFile(file);
engine.service.setResource(getMyFile());
Object obj=getRemoteObject(getMyFile(),engine.primaryBackend,engine.volatileBackend);
String id=null;
if(obj!=null)
id=obj.toString();
if (backendTypeReturned&& (id!=null))
return id+BACKEND_STRING_SEPARATOR+engine.getBackendType();
return id;
}
/**
* identify a remote resource by object id
* @param id that identifies a remote resource
* @return remote resource id
* @throws RemoteBackendException if there are runtime exception from the remote backend
*/
public String RFileById(String id) throws RemoteBackendException{
return RFileById(id, false);
}
/**
* identify a remote resource by object id
* @param id that identifies a remote resource
* @return remote resource id
* @throws RemoteBackendException if there are runtime exception from the remote backend
*/
public String RFileById(String id, boolean backendTypeReturned) throws RemoteBackendException{
getMyFile().setRemoteResource(REMOTE_RESOURCE.ID);
Object obj = executeOperation(id);
String idReturned=null;
if(obj!=null)
idReturned=obj.toString();
if (backendTypeReturned && idReturned != null)
return idReturned+BACKEND_STRING_SEPARATOR+engine.getBackendType();
return idReturned;
}
/**
* Identify a remote folder by path
* @param dir dir remote path
* @return list of object contained in the remote dir
*/
public List<StorageObject> RDir(String dir){
getMyFile().setRemoteResource(REMOTE_RESOURCE.DIR);
getMyFile().setOwner(engine.owner);
if(engine.getCurrentOperation().equalsIgnoreCase("showdir")){
dir = new BucketCoding().bucketDirCoding(dir, engine.getContext());
TransportManagerFactory tmf= new TransportManagerFactory(engine.primaryBackend, engine.getBackendUser(), engine.getBackendPassword());
TransportManager tm=tmf.getTransport(engine.getBackendType(), engine.getGcubeMemoryType(), engine.getDbNames(), engine.getWriteConcern(), engine.getReadConcern());
Map<String, StorageObject> mapDirs=null;
try {
mapDirs = tm.getValues(getMyFile(), dir, DirectoryEntity.class);
} catch (RemoteBackendException e) {
e.printStackTrace();
}
List<StorageObject> dirs=null;
if(mapDirs!=null){
dirs = engine.addObjectsDirBucket(mapDirs);
}
if(dirs==null)
dirs=Collections.emptyList();
return dirs;
}else if(engine.getCurrentOperation().equalsIgnoreCase("removedir")){
if((dir != null) && (engine.owner != null)){
DirectoryBucket dirBuc=new DirectoryBucket(engine.primaryBackend, engine.getBackendUser(), engine.getBackendPassword(), dir, engine.owner);
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo"))
dirBuc.removeDirBucket(getMyFile(), dir, engine.getContext(), engine.getBackendType(), engine.getDbNames());
else{
TransportManagerFactory tmf=new TransportManagerFactory(engine.primaryBackend, engine.getBackendUser(), engine.getBackendPassword());
TransportManager tm=tmf.getTransport(Costants.CLIENT_TYPE, engine.getGcubeMemoryType(), engine.getDbNames(), engine.getWriteConcern(), engine.getReadConcern());
dir=new BucketCoding().bucketFileCoding(dir, engine.getContext());
try {
tm.removeDir(dir, getMyFile());
} catch (UnknownHostException e) {
throw new RemoteBackendException(e.getMessage());
}
}
}else{
logger.error("REMOVE Operation not valid:\n\t specify a valid bucketID or an author and a path on the cluster ");
}
return null;
}else{
throw new IllegalArgumentException("The method RDir is not applicable for the operation selected");
}
}
}

@ -0,0 +1,77 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.service.impl;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class RemoteResourceBoolean extends Resource{
/**
* @param file
* @param engine
*/
public RemoteResourceBoolean(MyFile file, ServiceEngine engine) {
super(file, engine);
logger.info("file gCube parameter costructor: "+file.getGcubeAccessType()+" "+file.getGcubeScope());
}
/**
* identify a remote resource by path (a file or a directory)
* @param path the remote path
* @return remote resource id
* @throws RemoteBackendException if there are runtime exception from the remote backend
*/
public boolean RFile(String path) throws RemoteBackendException{
return RFile(path, false);
}
/**
* identify a remote resource by path (a file or a directory)
* @param path the remote path
* @return remote resource id
* @throws RemoteBackendException if there are runtime exception from the remote backend
*/
public boolean RFile(String path, boolean backendTypeReturned) throws RemoteBackendException{
getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH);
Object obj = executeOperation(path);
Boolean value= new Boolean(obj.toString());
return value;
}
/**
* identify a remote resource by object id
* @param id that identifies a remote resource
* @return remote resource id
* @throws RemoteBackendException if there are runtime exception from the remote backend
* @deprecated this method could be replace with RFile method
*/
public boolean RFileById(String id) throws RemoteBackendException{
getMyFile().setRemoteResource(REMOTE_RESOURCE.ID);
Object obj = executeOperation(id);
Boolean value= new Boolean(obj.toString());
return value;
// getMyFile().setOwner(engine.owner);
// engine.service.setResource(getMyFile());
// String idReturned=null;
// getMyFile().setRemotePath(id);
// Object obj=getRemoteObject(getMyFile(),engine.primaryBackend,engine.volatileBackend);
// if(obj!=null)
// idReturned=obj.toString();
// return idReturned;
}
}

@ -0,0 +1,35 @@
package org.gcube.contentmanagement.blobstorage.service.impl;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
/**
* Manage operations that return a structured object
* @author Roberto Cirillo (ISTI-CNR)
*
*/
public class RemoteResourceComplexInfo extends Resource{
public RemoteResourceComplexInfo(MyFile file, ServiceEngine engine) {
super(file, engine);
}
/**
* identify a remote resource by path (a file or a directory)
* @param path the remote path
* @return a long object to remote resource ex: the size of the resource
* @throws RemoteBackendException if there are runtime exception from the remote backend
*/
public MyFile RFile(String path) throws RemoteBackendException{
setMyFile(setGenericProperties(engine.getContext(), engine.owner, path, "remote"));
getMyFile().setRemotePath(path);
getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH);
engine.service.setResource(getMyFile());
getRemoteObject(getMyFile(),engine.primaryBackend,engine.volatileBackend);
return getMyFile();
}
}

@ -0,0 +1,45 @@
package org.gcube.contentmanagement.blobstorage.service.impl;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
/**
* Manage operation results of String type
* @author Roberto Cirillo (ISTI-CNR)
*
*/
public class RemoteResourceDestination extends Resource{
public RemoteResourceDestination(MyFile file, ServiceEngine engine) {
super(file, engine);
}
/**
*
* @param remoteDestination it can be a remote path or an id
* @return
* @throws RemoteBackendException
*/
public String to(String remoteDestination) throws RemoteBackendException{
logger.info("file gCube parameter before: "+file.getGcubeAccessType()+" "+file.getGcubeScope());
file = setGenericProperties(engine.getContext(), engine.owner, remoteDestination, "remote");
file.setRemotePath(remoteDestination);
file.setOwner(engine.owner);
setMyFile(file);
if((remoteDestination != null) &&(ObjectId.isValid(remoteDestination))){
getMyFile().setRemoteResource(REMOTE_RESOURCE.ID);
getMyFile().setId2(remoteDestination);
}else{
getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH);
}
// setMyFile(file);
engine.service.setResource(getMyFile());
String bucketName=null;
logger.info("file gCube parameter after: "+file.getGcubeAccessType()+" "+file.getGcubeScope());
bucketName=getRemoteObject(getMyFile(),engine.primaryBackend,engine.volatileBackend).toString();
return bucketName;
}
}

@ -0,0 +1,84 @@
package org.gcube.contentmanagement.blobstorage.service.impl;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
/**
* Manage folder operation result of String type
* @author Roberto Cirillo (ISTI-CNR)
*
*/
public class RemoteResourceFolderInfo extends Resource {
private String serviceName;
private String ownerGcube;
private String gcubeScope;
private String gcubeAccessType;
private String gcubeMemoryType;
public RemoteResourceFolderInfo(MyFile file, ServiceEngine engine) {
super(file, engine);
}
/**
* identify a remote resource by path (a file or a directory)
* @param path the remote path
* @return a long object to remote resource ex: the size of the resource
* @throws RemoteBackendException if there are runtime exception from the remote backend
*/
public String RDir(String path) throws RemoteBackendException{
file = setGenericProperties(engine.getContext(), engine.owner, path, "remote");
file.setRemotePath(path);
file.setOwner(engine.owner);
getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH);
setMyFile(file);
engine.service.setResource(getMyFile());
return getRemoteObject(getMyFile(),engine.primaryBackend,engine.volatileBackend).toString();
}
public String getServiceName() {
return serviceName;
}
public void setServiceName(String serviceName) {
this.serviceName = serviceName;
}
public String getOwnerGcube() {
return ownerGcube;
}
public void setOwnerGcube(String ownerGcube) {
this.ownerGcube = ownerGcube;
}
public String getGcubeScope() {
return gcubeScope;
}
public void setGcubeScope(String gcubeScope) {
this.gcubeScope = gcubeScope;
}
public String getGcubeAccessType() {
return gcubeAccessType;
}
public void setGcubeAccessType(String gcubeAccessType) {
this.gcubeAccessType = gcubeAccessType;
}
public String getGcubeMemoryType() {
return gcubeMemoryType;
}
public void setGcubeMemoryType(String gcubeMemoryType) {
this.gcubeMemoryType = gcubeMemoryType;
}
}

@ -0,0 +1,53 @@
package org.gcube.contentmanagement.blobstorage.service.impl;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
/**
* Unlike the RemoteResource class, return informations to the client like a ttl or a size
* This class is used for the operations on TTL
* @author rcirillo
*
*/
public class RemoteResourceInfo extends Resource{
public RemoteResourceInfo(MyFile file, ServiceEngine engine) {
super(file, engine);
}
/**
* identify a remote resource by path (a file or a directory)
* @param path the remote path
* @return a long object to remote resource ex: the size of the resource
* @throws RemoteBackendException if there are runtime exception from the remote backend
*/
public long RFile(String path) throws RemoteBackendException{
getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH);
String info= executeOperation(path).toString();
if(info!=null)
return Long.parseLong(info);
else
return -1;
}
/**
* identify a remote resource by object id
* @param id identifies a remote file
* @return a long object to remote resource ex: the size of the resource
* @throws RemoteBackendException if there are runtime exception from the remote backend
*/
public long RFileById(String id) throws RemoteBackendException{
getMyFile().setRemoteResource(REMOTE_RESOURCE.ID);
String info=executeOperation(id).toString();
if(info!=null)
return Long.parseLong(info);
else
return -1;
}
}

@ -0,0 +1,39 @@
package org.gcube.contentmanagement.blobstorage.service.impl;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.LOCAL_RESOURCE;
/**
*
* @author Roberto Cirillo (ISTI-CNR)
*
*/
public class RemoteResourceSource extends Resource {
public RemoteResourceSource(MyFile file, ServiceEngine engine) {
super(file, engine);
}
/**
*
* @param remoteIdentifier: it can be an id or a remote path
* @return
*/
public RemoteResourceDestination from(String remoteIdentifier){
if(getMyFile() != null){
getMyFile().setLocalPath(remoteIdentifier);
}else{
setMyFile(setGenericProperties("", "", remoteIdentifier, "local"));
getMyFile().setLocalPath(remoteIdentifier);
}
if(ObjectId.isValid(remoteIdentifier)){
getMyFile().setLocalResource(LOCAL_RESOURCE.ID);
getMyFile().setId(remoteIdentifier);
}else{
getMyFile().setLocalResource(LOCAL_RESOURCE.PATH);
}
return new RemoteResourceDestination(file, engine);
}
}

@ -0,0 +1,138 @@
package org.gcube.contentmanagement.blobstorage.service.impl;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* defines a common set of operations to identify a remote resource or a local resource
*
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class Resource {
final Logger logger = LoggerFactory.getLogger(ServiceEngine.class);
protected static final String BACKEND_STRING_SEPARATOR="%";
protected MyFile file;
protected ServiceEngine engine;
public Resource(MyFile file, ServiceEngine engine){
setMyFile(file);
setEngine(engine);
}
protected ServiceEngine getEngine() {
return engine;
}
protected void setEngine(ServiceEngine engine) {
this.engine = engine;
}
protected MyFile getMyFile(){
return file;
}
protected void setMyFile(MyFile f){
if (f!=null)
file=f;
else
logger.warn("instantiated an empty file object");
}
/**
* Set generic properties on MyFile object
* @param context remote root path
* @param owner file author
* @param path remote/local relative path
* @param type remote or local
* @return the current resource
*/
protected MyFile setGenericProperties(String context, String owner, String path, String type) {
if((path != null) && (path.length()>0)){
if(ObjectId.isValid(path)){
if(file==null)
file= new MyFile(path, engine.getGcubeMemoryType());
String id = file.getId();
if((id != null) && (!id.isEmpty()))
file.setId2(path);
else
file.setId(path);
file.setRootPath(context);
file.setAbsoluteRemotePath(context);
} else{
String[] dirs= path.split(Costants.FILE_SEPARATOR);
String name=dirs[dirs.length-1];
if (logger.isDebugEnabled()) {
logger.debug("path(String) - name: " + name);
}
if(file == null){
file= new MyFile(name, engine.getGcubeMemoryType());
}else{
file.setName(name);
}
if(type.equalsIgnoreCase("remote") && (context!=null) && context.length()>0){
file.setRootPath(context);
path=new BucketCoding().bucketFileCoding(path, context);
file.setAbsoluteRemotePath(path);
}
String dir=path.substring(0, (path.length()-name.length()));
if (logger.isDebugEnabled()) {
logger.debug("path(String) - path: " + dir);
}
if(type.equalsIgnoreCase("local")){
if(file.getLocalDir()== null)
file.setLocalDir(dir);
}else{
if(file.getRemoteDir()== null)
file.setRemoteDir(dir);
}
}
file.setOwner(owner);
}else{
file.setOwner(owner);
file.setRootPath(context);
file.setAbsoluteRemotePath(context);
}
return file;
}
protected Object getRemoteObject(MyFile file, String[] backend, String[] vltBackend)throws RemoteBackendException {
Object obj=null;
obj=retrieveRemoteObject(file, backend);
if((obj == null) && (vltBackend !=null && vltBackend.length>0))
obj=retrieveRemoteObject(file, vltBackend);
return obj;
}
protected Object retrieveRemoteObject(MyFile file, String[] backend) throws RemoteBackendException {
Object obj=null;
if(((file.getInputStream() != null) || (file.getOutputStream()!=null)) || ((file.getLocalPath() != null) || (file.getRemotePath() != null)))
obj=engine.service.startOperation(file,file.getRemotePath(), file.getOwner(), backend, Costants.DEFAULT_CHUNK_OPTION, file.getRootPath(), file.isReplace());
else{
logger.error("parameters incompatible ");
}
return obj;
}
protected Object executeOperation(String path) {
logger.info("file gCube parameter before: "+file.getGcubeAccessType()+" "+file.getGcubeScope());
file = setGenericProperties(engine.getContext(), engine.owner, path, "remote");
file.setRemotePath(path);
file.setOwner(engine.owner);
setMyFile(file);
engine.service.setResource(getMyFile());
Object obj=getRemoteObject(getMyFile(),engine.primaryBackend,engine.volatileBackend);
return obj;
}
}

@ -0,0 +1,146 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.operation.UploadOperator;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Upload the chunks in a concurrent safe mode is used only for terrastore
* @author rcirillo
*
*/
public class ChunkConsumer implements Runnable {
/**
* Logger for this class
*/
final Logger logger = LoggerFactory.getLogger(ChunkConsumer.class);
private Monitor monitor;
private int id;
private String[] server;
private String user;
private String password;
private static String bucketName;
boolean isChunk=false;
String[] dbNames;
public static ThreadLocal<TransportManager> client=new ThreadLocal<TransportManager>();
public static ThreadLocal<MyFile> resource=new ThreadLocal<MyFile>();
private boolean replaceOpt;
Thread producer;
public void run(){
if (logger.isDebugEnabled()) {
logger.debug("run() - start");
}
MyFile request = null;
synchronized (ChunkConsumer.class) {
request=monitor.getRequest();
resource.set(request);
}
// ... actions for manage the requests ...
connection(resource.get());
if (logger.isDebugEnabled()) {
logger.debug("run() - end");
}
}
private void connection(MyFile richiesta) {
if (logger.isDebugEnabled()) {
logger.debug("connection(MyFile) - start");
}
try{
if (logger.isDebugEnabled()) {
logger.debug("connection(MyFile) - request fetched: "
+ resource.get().getKey()
+ " current Thread: "
+ Thread.currentThread());
}
putInTerrastore(resource.get());
}catch(Exception e){
logger.warn("connection(MyFile)- upload"+ e.getMessage());
if (logger.isDebugEnabled()) {
logger.debug("connection(MyFile) - retry PUT");
}
connection(resource.get());
}
if (logger.isDebugEnabled()) {
logger.debug("connection(MyFile) - end");
}
}
public ChunkConsumer(Monitor monitor, int id, String[] server, String user, String password, String[] dbNames, boolean isChunk, String bucket, boolean replaceOption){
this.monitor = monitor;
this.id = id;
this.server=server;
this.user=user;
this.password=password;
bucketName=bucket;
this.isChunk=isChunk;
this.dbNames=dbNames;
this.replaceOpt=replaceOption;
}
private String[] randomizeServer(String[] server) {
int len=server.length;
if(logger.isDebugEnabled())
logger.debug("array server length: "+len);
int n = (int)(Math.random()*10);
if(logger.isDebugEnabled())
logger.debug("random number: "+n);
int start=0;
if(n>0){
start=len%n;
if(start>0)
start--;
if(logger.isDebugEnabled())
logger.debug("start index: "+start);
String temp=server[0];
server[0]=server[start];
server[start]=temp;
}
if(logger.isDebugEnabled())
logger.debug("Server 0: "+server[0]);
return server;
}
private void putInTerrastore(MyFile myFile) {
if (logger.isDebugEnabled()) {
logger.debug("putInTerrastore(MyFile) - start");
}
long start=0;
if(client.get()==null){
start=System.currentTimeMillis();
synchronized(ChunkConsumer.class){
String [] randomServer=randomizeServer(server);
TransportManagerFactory tmf=new TransportManagerFactory(randomServer, null, null);
client.set(tmf.getTransport(Costants.CLIENT_TYPE, null, null, myFile.getWriteConcern(), myFile.getReadPreference()));
}
if(logger.isDebugEnabled()){
logger.debug("waiting time for upload: "
+ (System.currentTimeMillis() - start) + " su: "
+ resource.get().getKey());
}
}
start=System.currentTimeMillis();
try{
// client.get().put(resource.get(), bucketName, resource.get().getKey(), replaceOpt);
UploadOperator upload=new UploadOperator(server, user, password, bucketName, monitor, isChunk , null, dbNames);
client.get().put(upload);
}catch(Exception e){
logger.error("ERROR IN CLUSTER CONNECTION ", e);
monitor.putRequest(resource.get());
}
if(logger.isDebugEnabled()){
logger.debug("Time for upload: "
+ (System.currentTimeMillis() - start) + " on: "
+ resource.get().getKey());
}
if (logger.isDebugEnabled()) {
logger.debug("putInTerrastore(MyFile) - end");
}
}
}

@ -0,0 +1,49 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements a simple algorithm for calculating the size of the chunk
* @author Roberto Cirillo (ISTI-CNR)
*
*/
public class ChunkOptimization {
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(ChunkOptimization.class);
private long fileSize;
public ChunkOptimization(long dimensioneFile){
if (logger.isDebugEnabled()) {
logger.debug("ChunkOptimization(long) - Dimensione del file: "
+ dimensioneFile);
}
this.fileSize=dimensioneFile;
}
public int chunkCalculation(){
long chunkSize=0;
if(fileSize<= Costants.sogliaDimensioneMinima){
chunkSize=fileSize;
}else{
// numero chunk
for(int i=Costants.sogliaNumeroMinimo; i<Costants.sogliaNumeroMassimo; i++){
chunkSize=(fileSize/i);
if((chunkSize < Costants.sogliaDimensioneMassima) && (chunkSize> Costants.sogliaDimensioneMinima)){
break;
}else if(chunkSize<Costants.sogliaDimensioneMinima){
chunkSize=Costants.sogliaDimensioneMinima;
break;
}
}
}
if(logger.isDebugEnabled()){
logger.debug("The chunk size is "+chunkSize);
}
return (int)chunkSize;
}
}

@ -0,0 +1,127 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.io.BufferedInputStream;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.apache.commons.io.IOUtils;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Produces the chunks for large files. This class is used only for terrastore
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class ChunkProducer implements Runnable{
MyFile resource;
long dimensionChunk;
int totChunks;
int nThreads;
ChunkConsumer task;
Monitor monitor;
String bucketName;
final Logger logger=LoggerFactory.getLogger(ChunkProducer.class);
public ChunkProducer(Monitor monitor, MyFile resource, long dimensionChunk, int totChunks,
int nThreads, String bucket, ChunkConsumer consumer ) throws FileNotFoundException{
this.resource=resource;
this.dimensionChunk=dimensionChunk;
this.totChunks=totChunks;
this.nThreads=nThreads;
this.monitor=monitor;
this.task=consumer;
this.bucketName=bucket;
}
@Override
public synchronized void run() {
long start=System.currentTimeMillis();
ExecutorService executor = Executors.newFixedThreadPool (nThreads);
// MyThreadConsumer task=new MyThreadConsumer(monitor, 1, server, bucket);
// executor.submit (task);
InputStream in=null;
try {
in = new BufferedInputStream(new FileInputStream(resource.getLocalPath()));
} catch (FileNotFoundException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
for (int i=0; i<totChunks; i++)
{
//produco un chunk
byte[] chunk=null;
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){
// chunk=encodeFile2ByteChunk2(in, resource.getPathClient(), true , dimensionChunk);
try {
chunk=IOUtils.toByteArray(in);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
if(logger.isDebugEnabled())
logger.debug("Chunk produced "+i+" with size: "+chunk.length);
if (logger.isDebugEnabled()) {
logger.debug("put(MyFile, boolean, boolean) - Produced chunk: "
+ i);
}
//---- creo i task e li invio al thread-pool ----
String key= getBucketName()+i;
resource.setKey(key);
MyFile copy=resource.copyProperties();
copy.setContent(chunk);
if(logger.isDebugEnabled()){
logger.debug("request in queue: "+key);
}
//CHUNK ready to write
monitor.putRequest(copy);
executor.submit (task);
}
System.gc();
executor.shutdown ();
try {
executor.awaitTermination (Long.MAX_VALUE, TimeUnit.SECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
}
try {
in.close();
} catch (IOException e) {
e.printStackTrace();
}
if (logger.isDebugEnabled()) {
logger.debug(" Time for file uploading: "+(System.currentTimeMillis()-start));
logger.debug("Used "
+ nThreads + " threads"+"\n\n");
}
}
public byte[] encodeFile2ByteChunk2(InputStream in, String path, boolean isChunk, long chunkDimension) {
byte[] encode=null;
try{
encode=IOUtils.toByteArray(in);
}catch(IOException e){
}
return encode;
}
public String getBucketName() {
return bucketName;
}
public void setBucketName(String bucketName) {
this.bucketName = bucketName;
}
}

@ -0,0 +1,49 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Close extends Operation{
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(GetSize.class);
// public String file_separator = ServiceEngine.FILE_SEPARATOR;//System.getProperty("file.separator");
public Close(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
try {
tm.close();
} catch (Exception e) {
throw new RemoteBackendException(" Error in GetSize operation ", e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return null;
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
return null;
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
return null;
}
}

@ -0,0 +1,100 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.net.UnknownHostException;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class Copy extends Operation{
/**
* Logger for this class
*/
// private static final GCUBELog logger = new GCUBELog(Download.class);
final Logger logger=LoggerFactory.getLogger(Copy.class);
protected String sourcePath;
protected String destinationPath;
protected MyFile resource;
public Copy(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
this.sourcePath=file.getLocalPath();
this.destinationPath=remotePath;
sourcePath = new BucketCoding().bucketFileCoding(file.getLocalPath(), rootArea);
destinationPath = new BucketCoding().bucketFileCoding(remotePath, rootArea);
setResource(file);
getResource().setLocalPath(sourcePath);
getResource().setRemotePath(destinationPath);
return bucket=destinationPath;
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String id=null;
try {
// id=tm.copy(myFile, sourcePath, destinationPath);
id=tm.copy(this);
} catch (UnknownHostException e) {
tm.close();
logger.error("Problem in copy from: "+sourcePath+" to: "+destinationPath+": "+e.getMessage());
throw new RemoteBackendException(" Error in copy operation ", e.getCause());
}
return id;
}
@Override
public String initOperation(MyFile resource, String remotePath,
String author, String[] server, String rootArea) {
// For terrastore, the name of bucket is formed: path_____fileName_____author
this.sourcePath=resource.getLocalPath();
this.destinationPath=resource.getRemotePath();
sourcePath = new BucketCoding().bucketFileCoding(resource.getLocalPath(), rootArea);
destinationPath = new BucketCoding().bucketFileCoding(resource.getRemotePath(), rootArea);
setResource(resource);
getResource().setLocalPath(sourcePath);
getResource().setRemotePath(destinationPath);
return bucket=destinationPath;
}
// public abstract String execute(MongoIO mongoPrimaryInstance) throws UnknownHostException;
public abstract String execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException;
public MyFile getResource() {
return resource;
}
public void setResource(MyFile resource) {
this.resource = resource;
}
public String getSourcePath() {
return sourcePath;
}
public void setSourcePath(String sourcePath) {
this.sourcePath = sourcePath;
}
public String getDestinationPath() {
return destinationPath;
}
public void setDestinationPath(String destinationPath) {
this.destinationPath = destinationPath;
}
}

@ -0,0 +1,100 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.net.UnknownHostException;
import java.util.List;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements the copy dir operation
* @author Roberto Cirillo (ISTI-CNR)
*
*/
public abstract class CopyDir extends Operation{
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(Download.class);
private String sourcePath;
private String destinationPath;
private MyFile resource;
public CopyDir(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
this.sourcePath=file.getLocalPath();
this.destinationPath=remotePath;
sourcePath = new BucketCoding().bucketFileCoding(file.getLocalPath(), rootArea);
destinationPath = new BucketCoding().bucketFileCoding(remotePath, rootArea);
setResource(file);
return bucket=destinationPath;
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
List<String> ids=null;
try {
// ids=tm.copyDir(myFile, sourcePath, destinationPath);
ids=tm.copyDir(this);
} catch (UnknownHostException e) {
tm.close();
logger.error("Problem in copyDir from: "+sourcePath+" to: "+destinationPath+": "+e.getMessage());
throw new RemoteBackendException(" Error in copyDir operation ", e.getCause());
}
return ids.toString();
}
@Override
public String initOperation(MyFile resource, String remotePath,
String author, String[] server, String rootArea) {
// DirectoryBucket dirBuc=new DirectoryBucket(server, user, password, remotePath, author);
// For terrastore, the name of bucket is formed: path_____fileName_____author
// String bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea);
this.sourcePath=resource.getLocalPath();
this.destinationPath=resource.getRemotePath();
sourcePath = new BucketCoding().bucketFileCoding(resource.getLocalPath(), rootArea);
destinationPath = new BucketCoding().bucketFileCoding(resource.getRemotePath(), rootArea);
setResource(resource);
return bucket=destinationPath;
}
public abstract List<String> execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException;
public String getSourcePath() {
return sourcePath;
}
public void setSourcePath(String sourcePath) {
this.sourcePath = sourcePath;
}
public String getDestinationPath() {
return destinationPath;
}
public void setDestinationPath(String destinationPath) {
this.destinationPath = destinationPath;
}
public MyFile getResource() {
return resource;
}
public void setResource(MyFile resource) {
this.resource = resource;
}
}

@ -0,0 +1,111 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.OutputStream;
/**
* Implements a download operation from the cluster: download a file object
*
*@author Roberto Cirillo (ISTI - CNR)
*/
public abstract class Download extends Operation{
/**
* Logger for this class
*/
// private static final GCUBELog logger = new GCUBELog(Download.class);
final Logger logger=LoggerFactory.getLogger(Download.class);
protected String localPath;
protected String remotePath;
protected OutputStream os;
protected MyFile resource;
public Download(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
this.localPath=file.getLocalPath();
this.remotePath=remotePath;
setResource(file);
return getRemoteIdentifier(remotePath, rootArea);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
String id=null;
if (logger.isDebugEnabled()) {
logger.debug(" DOWNLOAD " + myFile.getRemotePath()
+ " in bucket: " + getBucket());
}
try {
id=get(this, myFile, false);
} catch (Throwable e) {
TransportManagerFactory tmf=new TransportManagerFactory(getServer(), getUser(), getPassword());
TransportManager tm=tmf.getTransport(getBackendType(), myFile.getGcubeMemoryType(), getDbNames(), myFile.getWriteConcern(), myFile.getReadPreference());
tm.close();
logger.error("Problem in download from: "+myFile.getRemotePath()+": "+e.getMessage());
// e.printStackTrace();
throw new RemoteBackendException(" Problem in download operation ", e.getCause());
}
return id;
}
@Override
public String initOperation(MyFile resource, String remotePath,
String author, String[] server, String rootArea) {
// DirectoryBucket dirBuc=new DirectoryBucket(server, getUser(), getPassword(), remotePath, author);
// For terrastore, the name of bucket is formed: path_____fileName_____author
String bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea);
// DirectoryEntity dirObject=null;
this.os=resource.getOutputStream();
setBucket(bucketName);
return bucketName;
}
public abstract ObjectId execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance) throws IOException;
public MyFile getResource() {
return resource;
}
public void setResource(MyFile resource) {
this.resource = resource;
}
public String getLocalPath() {
return localPath;
}
public void setLocalPath(String localPath) {
this.localPath = localPath;
}
public String getRemotePath() {
return remotePath;
}
public void setRemotePath(String remotePath) {
this.remotePath = remotePath;
}
public OutputStream getOs() {
return os;
}
public void setOs(OutputStream os) {
this.os = os;
}
}

@ -0,0 +1,67 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.io.OutputStream;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.operation.DownloadOperator;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class DownloadAndLock extends Operation {
final Logger logger=LoggerFactory.getLogger(Download.class);
private String localPath;
private String remotePath;
private OutputStream os;
/**
* @deprecated
* @param server
* @param bucket
* @param monitor
* @param isChunk
*
*/
public DownloadAndLock(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
// TODO Auto-generated constructor stub
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
@Override
public String doIt(MyFile myFile) throws RemoteBackendException {
if (logger.isDebugEnabled()) {
logger.debug(" DOWNLOAD " + myFile.getRemotePath()
+ " in bucket: " + getBucket());
}
Download download = new DownloadOperator(getServer(), getUser(), getPassword(), getBucket(), getMonitor(), isChunk(), getBackendType(), getDbNames());
try {
//TODO add field for file lock
get(download,myFile, true);
} catch (Exception e) {
TransportManagerFactory tmf=new TransportManagerFactory(getServer(), getUser(), getPassword());
TransportManager tm=tmf.getTransport(getBackendType(), myFile.getGcubeMemoryType(), getDbNames(), myFile.getWriteConcern(), myFile.getReadPreference());
tm.close();
throw new RemoteBackendException(" Error in downloadAndLock operation ", e.getCause());
}
return null;
}
@Override
public String initOperation(MyFile file, String RemotePath,
String author, String[] server, String rootArea,
boolean replaceOption) {
this.localPath=file.getLocalPath();
this.remotePath=remotePath;
return getRemoteIdentifier(remotePath, rootArea);
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
// TODO Auto-generated method stub
return null;
}
}

@ -0,0 +1,93 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public abstract class DuplicateFile extends Operation {
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(DuplicateFile.class);
protected String sourcePath;
protected MyFile resource;
public DuplicateFile(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String id=null;
try {
// id = tm.duplicateFile(myFile, bucket);
id = tm.duplicateFile(this);
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in GetSize operation ", e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return id;
}
@Override
public String initOperation(MyFile file, String remotePath, String author, String[] server, String rootArea, boolean replaceOption) {
if(remotePath != null){
boolean isId=ObjectId.isValid(remotePath);
setResource(file);
if(!isId){
// String[] dirs= remotePath.split(file_separator);
if(logger.isDebugEnabled())
logger.debug("remotePath: "+remotePath);
String buck=null;
buck = new BucketCoding().bucketFileCoding(remotePath, rootArea);
return bucket=buck;
}else{
return bucket=remotePath;
}
}else throw new RemoteBackendException("argument cannot be null");
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation");
}
public abstract String execute(MongoIOManager mongoPrimaryInstance);
public String getSourcePath() {
return sourcePath;
}
public void setSourcePath(String sourcePath) {
this.sourcePath = sourcePath;
}
public MyFile getResource() {
return resource;
}
public void setResource(MyFile resource) {
this.resource = resource;
}
}

@ -0,0 +1,72 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements a Exist operation: check if a given object exist
* @author Roberto Cirillo (ISTI - CNR) 2018
*
*/
public class Exist extends Operation{
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(Exist.class);
public Exist(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
boolean isPresent=false;
try {
isPresent = tm.exist(bucket);
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in Exist operation ", e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return isPresent+"";
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
// String[] dirs= remotePath.split(file_separator);
if(logger.isDebugEnabled())
logger.debug("remotePath: "+remotePath);
String buck=null;
boolean isId=ObjectId.isValid(remotePath);
if(!isId){
buck = new BucketCoding().bucketFileCoding(remotePath, rootArea);
return bucket=buck;
}else{
return bucket=remotePath;
}
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
throw new IllegalArgumentException("Input/Output stream is not compatible with Exist operation");
}
}

@ -0,0 +1,99 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
//import org.apache.log4j.Logger;
//import org.gcube.common.core.utils.logging.GCUBELog;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.io.OutputStream;
/**
* A thread that write the chunk in a output stream specified
*
*@author Roberto Cirillo (ISTI - CNR)
*
*/
public class FileWriter extends Thread{
/**
* Logger for this class
*/
// private static final GCUBELog logger = new GCUBELog(FileWriter.class);
final Logger logger=LoggerFactory.getLogger(FileWriter.class);
private Monitor monitor;
private int id;
// private MyFile myFile;
// private byte[] encode;
// private int offset;
// private static int len=0;
private OutputStream out;
// private String path;
private byte[] full;
public synchronized void run(){
if (logger.isDebugEnabled()) {
logger.debug("run() - start");
}
MyFile request = monitor.getRequest();
synchronized (FileWriter.class) {
if(logger.isDebugEnabled()){
logger.debug("recover request: "+request.getKey()+" length: "+request.getContent().length);
}
try {
decodeByte2File(request.getContent());
out.flush();
} catch (Exception e) {
logger.error("run()", e);
}
}
if (logger.isDebugEnabled()) {
logger.debug("run() - end");
}
}
public FileWriter(Monitor monitor, OutputStream out, byte[] fullEncode){
this.monitor=monitor;
this.out=out;
this.full=fullEncode;
}
public FileWriter(Monitor monitor, OutputStream out){
this.monitor = monitor;
this.out=out;
}
public FileWriter(Monitor monitor, int id){
this.monitor = monitor;
this.id = id;
}
public void decodeByte2File(byte[] encode, int offset, int len){
try {
out.write(encode, offset, len);
if(logger.isDebugEnabled())
logger.debug("write from pos:"+offset+" to pos: "+len);
} catch (IOException e) {
logger.error("decodeByte2File(byte[], int, int)", e);
}
if(logger.isDebugEnabled())
logger.debug("New file created!");
}
public void decodeByte2File(byte[] encode){
if (logger.isDebugEnabled()) {
logger.debug("decodeByte2File(byte[]) - start");
logger.debug("encode.length: "+encode.length);
}
try {
out.write(encode);
} catch (Exception e) {
logger.error("scrittura chunk non riuscita!!");
logger.error("decodeByte2File(byte[])", e);
}
if (logger.isDebugEnabled()) {
logger.debug("decodeByte2File(byte[]) - end");
}
}
}

@ -0,0 +1,64 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class GetFolderCount extends Operation {
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(GetSize.class);
public GetFolderCount(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType,dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
long dim=0;
try {
dim = tm.getFolderTotalItems(bucket);
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in getFolderTotalItems operation ", e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return ""+dim;
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
if(logger.isDebugEnabled())
logger.debug("remotePath: "+remotePath);
String buck=null;
BucketCoding bc=new BucketCoding();
buck=bc.bucketFileCoding(remotePath, rootArea);
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){
buck=buck.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR);
//remove directory bucket
DirectoryBucket dirBuc=new DirectoryBucket(server,user, password, remotePath, author);
dirBuc.removeKeysOnDirBucket(file, buck, rootArea, backendType, dbNames);
}
return bucket=buck;
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation");
}
}

@ -0,0 +1,52 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class GetFolderLastUpdate extends Operation {
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(GetSize.class);
public GetFolderLastUpdate(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
return null;
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
// String[] dirs= remotePath.split(file_separator);
if(logger.isDebugEnabled())
logger.debug("remotePath: "+remotePath);
String buck=null;
BucketCoding bc=new BucketCoding();
buck=bc.bucketFileCoding(remotePath, rootArea);
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){
buck=buck.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR);
//remove directory bucket
DirectoryBucket dirBuc=new DirectoryBucket(server,user, password, remotePath, author);
dirBuc.removeKeysOnDirBucket(file, buck, rootArea, backendType, dbNames);
}
return bucket=buck;
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation");
}
}

@ -0,0 +1,65 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class GetFolderSize extends Operation {
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(GetSize.class);
public GetFolderSize(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
long dim=0;
try {
dim = tm.getFolderTotalVolume(bucket);
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in getFolderTotalVolume operation ", e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return ""+dim;
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
// String[] dirs= remotePath.split(file_separator);
if(logger.isDebugEnabled())
logger.debug("remotePath: "+remotePath);
String buck=null;
BucketCoding bc=new BucketCoding();
buck=bc.bucketFileCoding(remotePath, rootArea);
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){
buck=buck.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR);
//remove directory bucket
DirectoryBucket dirBuc=new DirectoryBucket(server,user, password, remotePath, author);
dirBuc.removeKeysOnDirBucket(file, buck, rootArea, backendType, dbNames);
// String bucketName=null;
}
return bucket=buck;
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation");
}
}

@ -0,0 +1,103 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.io.IOException;
import java.net.URL;
import org.apache.commons.codec.binary.Base64;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter.EncryptionException;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
public class GetHttpUrl extends Operation {
// private OutputStream os;
TransportManager tm;
public GetHttpUrl(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
@Override
public String initOperation(MyFile file, String remotePath, String author,
String[] server, String rootArea, boolean replaceOption) {
return getRemoteIdentifier(remotePath, rootArea);
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
// TODO Auto-generated method stub
return null;
}
@Override
public Object doIt(MyFile myFile) throws RemoteBackendException {
String resolverHost=myFile.getResolverHOst();
String urlBase="smp://"+resolverHost+Costants.URL_SEPARATOR;
String urlParam="";
try {
String id=getId(myFile.getAbsoluteRemotePath(), myFile.isForceCreation(), myFile.getGcubeMemoryType(), myFile.getWriteConcern(), myFile.getReadPreference());
String phrase=myFile.getPassPhrase();
// urlParam =new StringEncrypter("DES", phrase).encrypt(id);
urlParam = new Encrypter("DES", phrase).encrypt(id);
// String urlEncoded=URLEncoder.encode(urlParam, "UTF-8");
} catch (EncryptionException e) {
throw new RemoteBackendException(" Error in getUrl operation problem to encrypt the string", e.getCause());
}
logger.info("URL generated: "+urlBase+urlParam);
String smpUrl=urlBase+urlParam;
logger.info("URL generated: "+smpUrl);
smpUrl=smpUrl.replace("smp://", "http://");
URL httpUrl=null;
try {
httpUrl=translate(new URL(smpUrl));
} catch (IOException e) {
e.printStackTrace();
}
logger.info("URL translated: "+httpUrl);
if(myFile.getGcubeMemoryType().equals(MemoryType.VOLATILE)){
return httpUrl.toString()+Costants.VOLATILE_URL_IDENTIFICATOR;
}
return httpUrl.toString();
}
private String getId(String path, boolean forceCreation, MemoryType memoryType, String writeConcern, String readPreference){
String id=null;
if(tm ==null){
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(backendType, memoryType, dbNames, writeConcern, readPreference);
}
try {
id = tm.getId(bucket, forceCreation);
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in GetUrl operation. Problem to discover remote file:"+bucket+" "+ e.getMessage(), e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return id;
}
private URL translate(URL url) throws IOException {
logger.debug("translating: "+url);
String urlString=url.toString();
String baseUrl="http://"+url.getHost()+"/";
logger.debug("base Url extracted is: "+baseUrl);
// int index=urlString.lastIndexOf(".org/");
String params = urlString.substring(baseUrl.length());
logger.debug("get params: "+baseUrl+" "+params);
//encode params
params=Base64.encodeBase64URLSafeString(params.getBytes("UTF-8"));
// URLEncoder.encode(params, "UTF-8");
// merge string
urlString=baseUrl+params;
logger.info("uri translated in http url: "+urlString);
return new URL(urlString);
}
}

@ -0,0 +1,103 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.io.IOException;
import java.net.URL;
import org.apache.commons.codec.binary.Base64;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter.EncryptionException;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
public class GetHttpsUrl extends Operation {
// private OutputStream os;
TransportManager tm;
public GetHttpsUrl(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
@Override
public String initOperation(MyFile file, String remotePath, String author,
String[] server, String rootArea, boolean replaceOption) {
return getRemoteIdentifier(remotePath, rootArea);
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
// TODO Auto-generated method stub
return null;
}
@Override
public Object doIt(MyFile myFile) throws RemoteBackendException {
String resolverHost=myFile.getResolverHOst();
String urlBase="smp://"+resolverHost+Costants.URL_SEPARATOR;
String urlParam="";
try {
String id=getId(myFile.getAbsoluteRemotePath(), myFile.isForceCreation(), myFile.getGcubeMemoryType(), myFile.getWriteConcern(), myFile.getReadPreference());
String phrase=myFile.getPassPhrase();
// urlParam =new StringEncrypter("DES", phrase).encrypt(id);
urlParam = new Encrypter("DES", phrase).encrypt(id);
// String urlEncoded=URLEncoder.encode(urlParam, "UTF-8");
} catch (EncryptionException e) {
throw new RemoteBackendException(" Error in getUrl operation problem to encrypt the string", e.getCause());
}
logger.info("URL generated: "+urlBase+urlParam);
String smpUrl=urlBase+urlParam;
logger.info("URL generated: "+smpUrl);
smpUrl=smpUrl.replace("smp://", "https://");
URL httpsUrl=null;
try {
httpsUrl=translate(new URL(smpUrl));
} catch (IOException e) {
e.printStackTrace();
}
logger.info("URL translated: "+httpsUrl);
if(myFile.getGcubeMemoryType().equals(MemoryType.VOLATILE)){
return httpsUrl.toString()+Costants.VOLATILE_URL_IDENTIFICATOR;
}
return httpsUrl.toString();
}
private String getId(String path, boolean forceCreation, MemoryType memoryType, String writeConcern, String readPreference){
String id=null;
if(tm ==null){
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(backendType, memoryType, dbNames, writeConcern, readPreference);
}
try {
id = tm.getId(bucket, forceCreation);
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in GetUrl operation. Problem to discover remote file:"+bucket+" "+ e.getMessage(), e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return id;
}
private URL translate(URL url) throws IOException {
logger.debug("translating: "+url);
String urlString=url.toString();
String baseUrl="https://"+url.getHost()+"/";
logger.debug("base Url extracted is: "+baseUrl);
// int index=urlString.lastIndexOf(".org/");
String params = urlString.substring(baseUrl.length());
logger.debug("get params: "+baseUrl+" "+params);
//encode params
params=Base64.encodeBase64URLSafeString(params.getBytes("UTF-8"));
// URLEncoder.encode(params, "UTF-8");
// merge string
urlString=baseUrl+params;
logger.info("uri translated in https url: "+urlString);
return new URL(urlString);
}
}

@ -0,0 +1,86 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class GetMetaFile extends Operation{
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(GetSize.class);
public GetMetaFile(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
/**
* Only the following values will be returned:
* mimeType,
* owner,
* id,
* name,
* remotePath,
* size.
*
*/
public MyFile doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
long dim=0;
String id=null;
String mime=null;
try {
dim = tm.getSize(bucket);
id=tm.getId(bucket, false);
mime=tm.getFileProperty(bucket, "mimetype");
myFile.setOwner(tm.getFileProperty(bucket, "owner"));
if(tm.isValidId(bucket)){
myFile.setRemotePath(tm.getFileProperty(bucket, "filename"));
myFile.setAbsoluteRemotePath(tm.getFileProperty(bucket, "filename"));
myFile.setName(tm.getFileProperty(bucket, "name"));
}
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in GetMetaFile operation ", e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
myFile.setSize(dim);
myFile.setId(id);
myFile.setMimeType(mime);
return myFile;
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
// String[] dirs= remotePath.split(file_separator);
if(logger.isDebugEnabled())
logger.debug("remotePath: "+remotePath);
String buck=null;
boolean isId=ObjectId.isValid(remotePath);
if(!isId){
buck = new BucketCoding().bucketFileCoding(remotePath, rootArea);
return bucket=buck;
}else{
return bucket=remotePath;
}
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation");
}
}

@ -0,0 +1,62 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class GetMetaInfo extends Operation {
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(GetSize.class);
public GetMetaInfo(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String value=null;
try {
value=tm.getFileProperty(bucket, myFile.getGenericPropertyField());
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in GetMetaFile operation ", e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return value;
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
// String[] dirs= remotePath.split(file_separator);
if(logger.isDebugEnabled())
logger.debug("remotePath: "+remotePath);
String buck=null;
boolean isId=ObjectId.isValid(remotePath);
if(!isId){
buck = new BucketCoding().bucketFileCoding(remotePath, rootArea);
return bucket=buck;
}else{
return bucket=remotePath;
}
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
throw new IllegalArgumentException("method not compatible with getMetaInfo operation");
}
}

@ -0,0 +1,70 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class GetRemotePath extends Operation{
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(GetSize.class);
private String rootPath;
public GetRemotePath(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType,dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String path=null;
try {
path = tm.getRemotePath(bucket);
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in GetSize operation ", e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
logger.debug("\t path "+path+"\n\t rootPath: "+rootPath);
int rootLength=rootPath.length();
if((path.length() >= rootLength)){
path=path.substring(rootLength-1);
System.out.println("new relative path "+ path);
return path;
}else{
throw new RuntimeException("expected rootPath or expected relative path are malformed: rootPath: "+rootPath+ " relativePath: "+path);
}
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
rootPath=file.getRootPath();
logger.trace("rootArea is "+file.getRootPath()+ " absoluteremotepath is "+file.getAbsoluteRemotePath());
if(logger.isDebugEnabled())
logger.debug("remotePath: "+remotePath);
boolean isId=ObjectId.isValid(remotePath);
if(!isId){
throw new RuntimeException("the getRemotePath method have an invalid id"+ remotePath);
}else{
return bucket=remotePath;
}
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation");
}
}

@ -0,0 +1,68 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements a getSize operation from the remote system: return the dimension of a file in the remote system
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class GetSize extends Operation{
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(GetSize.class);
public GetSize(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
long dim=0;
try {
dim = tm.getSize(bucket);
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in GetSize operation ", e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return ""+dim;
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
if(logger.isDebugEnabled())
logger.debug("remotePath: "+remotePath);
String buck=null;
boolean isId=ObjectId.isValid(remotePath);
if(!isId){
buck = new BucketCoding().bucketFileCoding(remotePath, rootArea);
return bucket=buck;
}else{
return bucket=remotePath;
}
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation");
}
}

@ -0,0 +1,69 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.io.OutputStream;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements a getTTL operation for a resource locked in the remote system: return the TTL left
* @author Roberto Cirillo (ISTI - CNR)
*/
public class GetTTL extends Operation {
final Logger logger=LoggerFactory.getLogger(Download.class);
private String localPath;
private String remotePath;
private OutputStream os;
public GetTTL(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
// TODO Auto-generated constructor stub
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
@Override
public String doIt(MyFile myFile) throws RemoteBackendException {
if (logger.isDebugEnabled()) {
logger.debug(" DOWNLOAD " + myFile.getRemotePath()
+ " in bucket: " + bucket);
}
long currentTTL=-1;
TransportManager tm=null;
try {
//aggiungere field per il lock del file
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
currentTTL=tm.getTTL(bucket);
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in getTTL operation ", e.getCause());
}
return currentTTL+"";
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea,
boolean replaceOption) {
this.localPath=file.getLocalPath();
this.remotePath=remotePath;
return getRemoteIdentifier(remotePath, rootArea);
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
// TODO Auto-generated method stub
return null;
}
}

@ -0,0 +1,76 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter.EncryptionException;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
public class GetUrl extends Operation{
// private OutputStream os;
TransportManager tm;
public GetUrl(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
@Override
public String initOperation(MyFile file, String remotePath, String author,
String[] server, String rootArea, boolean replaceOption) {
return getRemoteIdentifier(remotePath, rootArea);
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
// TODO Auto-generated method stub
return null;
}
@Override
public Object doIt(MyFile myFile) throws RemoteBackendException {
String resolverHost=myFile.getResolverHOst();
String urlBase="smp://"+resolverHost+Costants.URL_SEPARATOR;
String urlParam="";
try {
String id=getId(myFile.getAbsoluteRemotePath(), myFile.isForceCreation(), myFile.getGcubeMemoryType(), myFile.getWriteConcern(), myFile.getReadPreference());
String phrase=myFile.getPassPhrase();
// urlParam =new StringEncrypter("DES", phrase).encrypt(id);
urlParam = new Encrypter("DES", phrase).encrypt(id);
// String urlEncoded=URLEncoder.encode(urlParam, "UTF-8");
} catch (EncryptionException e) {
throw new RemoteBackendException(" Error in getUrl operation problem to encrypt the string", e.getCause());
}
String url=urlBase+urlParam;
logger.info("URL generated: "+url);
if(myFile.getGcubeMemoryType().equals(MemoryType.VOLATILE)){
return url.toString()+Costants.VOLATILE_URL_IDENTIFICATOR;
}
return url;
}
private String getId(String path, boolean forceCreation, MemoryType memoryType, String writeConcern, String readPreference){
String id=null;
if(tm ==null){
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(backendType, memoryType, dbNames, writeConcern, readPreference);
}
try {
id = tm.getId(bucket, forceCreation);
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in GetUrl operation. Problem to discover remote file:"+bucket+" "+ e.getMessage(), e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return id;
}
}

@ -0,0 +1,72 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class GetUserTotalItems extends Operation {
final Logger logger=LoggerFactory.getLogger(GetUserTotalItems.class);
public GetUserTotalItems(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String dim=null;
logger.info("check user total items for user: "+getOwner()+ " user is "+user);
try {
dim = tm.getUserTotalItems(getOwner());
} catch (Exception e) {
e.printStackTrace();
tm.close();
throw new RemoteBackendException(" Error in getUserTotalItems operation ", e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket+" for user: "+getOwner());
}
return dim;
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
setOwner(author);
if((remotePath != null) && (remotePath.length() > 0)){
// String[] dirs= remotePath.split(file_separator);
if(logger.isDebugEnabled())
logger.debug("remotePath: "+remotePath);
String buck=null;
BucketCoding bc=new BucketCoding();
buck=bc.bucketFileCoding(remotePath, rootArea);
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){
buck=buck.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR);
//remove directory bucket
DirectoryBucket dirBuc=new DirectoryBucket(server,user, password, remotePath, author);
dirBuc.removeKeysOnDirBucket(file, buck, rootArea, backendType, dbNames);
// String bucketName=null;
}
return bucket=buck;
}else{
logger.info("found empty remote path in input ");
return bucket;
}
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation");
}
}

@ -0,0 +1,71 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class GetUserTotalVolume extends Operation {
final Logger logger=LoggerFactory.getLogger(GetUserTotalVolume.class);
// public String file_separator = ServiceEngine.FILE_SEPARATOR;//System.getProperty("file.separator");
public GetUserTotalVolume(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String dim=null;
logger.info("check user total volume for user: "+getOwner()+ " user is "+user);
try {
dim = tm.getUserTotalVolume(getOwner());
} catch (Exception e) {
e.printStackTrace();
tm.close();
throw new RemoteBackendException(" Error in getUserTotalVolume operation ", e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return dim;
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
setOwner(author);
if(remotePath!= null && remotePath.length()>0){
// String[] dirs= remotePath.split(file_separator);
if(logger.isDebugEnabled())
logger.debug("remotePath: "+remotePath);
String buck=null;
BucketCoding bc=new BucketCoding();
buck=bc.bucketFileCoding(remotePath, rootArea);
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){
buck=buck.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR);
//remove directory bucket
DirectoryBucket dirBuc=new DirectoryBucket(server,user, password, remotePath, author);
dirBuc.removeKeysOnDirBucket(file, buck, rootArea, backendType, dbNames);
}
return bucket=buck;
}else{
return bucket;
}
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation");
}
}

@ -0,0 +1,95 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.net.UnknownHostException;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class Link extends Operation{
/**
* Logger for this class
*/
// private static final GCUBELog logger = new GCUBELog(Download.class);
final Logger logger=LoggerFactory.getLogger(Download.class);
private String sourcePath;
private String destinationPath;
private MyFile resource;
public Link(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
this.sourcePath=file.getLocalPath();
this.destinationPath=remotePath;
sourcePath = new BucketCoding().bucketFileCoding(file.getLocalPath(), rootArea);
destinationPath = new BucketCoding().bucketFileCoding(remotePath, rootArea);
setResource(file);
return bucket=destinationPath;
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String id=null;
try {
id=tm.link(this);
} catch (UnknownHostException e) {
tm.close();
logger.error("Problem in link from: "+sourcePath+" to: "+destinationPath+": "+e.getMessage());
throw new RemoteBackendException(" Error in link operation ", e.getCause());
}
return id;
}
@Override
public String initOperation(MyFile resource, String remotePath,
String author, String[] server, String rootArea) {
// For terrastore, the name of bucket is formed: path_____fileName_____author
// String bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea);
this.sourcePath=resource.getLocalPath();
this.destinationPath=resource.getRemotePath();
setResource(resource);
sourcePath = new BucketCoding().bucketFileCoding(resource.getLocalPath(), rootArea);
destinationPath = new BucketCoding().bucketFileCoding(resource.getRemotePath(), rootArea);
return bucket=destinationPath;
}
public abstract String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException;
public String getSourcePath() {
return sourcePath;
}
public void setSourcePath(String sourcePath) {
this.sourcePath = sourcePath;
}
public String getDestinationPath() {
return destinationPath;
}
public void setDestinationPath(String destinationPath) {
this.destinationPath = destinationPath;
}
public MyFile getResource() {
return resource;
}
public void setResource(MyFile resource) {
this.resource = resource;
}
}

@ -0,0 +1,124 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.io.OutputStream;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.operation.DownloadOperator;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements a lock operation relative to a remote resource
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public abstract class Lock extends Operation {
final Logger logger=LoggerFactory.getLogger(Download.class);
protected String localPath;
protected String remotePath;
protected OutputStream os;
protected MyFile resource;
protected Download download;
public Lock(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
// TODO Auto-generated constructor stub
super(server, user,pwd, bucket, monitor, isChunk, backendType, dbs);
}
@Override
public String doIt(MyFile myFile) throws RemoteBackendException {
if (logger.isDebugEnabled()) {
logger.debug(" DOWNLOAD " + myFile.getRemotePath()
+ " in bucket: " + getBucket());
}
String unlockKey=null;
try {
//aggiungere field per il lock del file
Download download = new DownloadOperator(getServer(), getUser(), getPassword(), getBucket(), getMonitor(), isChunk(), getBackendType(), getDbNames());
unlockKey=get(download, myFile, true);
} catch (Exception e) {
TransportManagerFactory tmf=new TransportManagerFactory(getServer(), getUser(), getPassword());
TransportManager tm=tmf.getTransport(getBackendType(), myFile.getGcubeMemoryType(), getDbNames(), myFile.getWriteConcern(), myFile.getReadPreference());
tm.close();
throw new RemoteBackendException(" Error in lock operation ", e.getCause());
}
return unlockKey;
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea,
boolean replaceOption) {
String bucketName=null;
setResource(file);
// create the directory bucket
if((remotePath.length()<23) || (remotePath.contains(Costants.FILE_SEPARATOR))){
this.localPath=file.getLocalPath();
this.remotePath=remotePath;
bucketName = new BucketCoding().bucketFileCoding(remotePath, rootArea);
}else{
bucketName=remotePath;
}
setBucket(bucketName);
return bucketName;
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
// TODO Auto-generated method stub
return null;
}
public abstract String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String serverLocation) throws Exception;
public String getLocalPath() {
return localPath;
}
public void setLocalPath(String localPath) {
this.localPath = localPath;
}
public String getRemotePath() {
return remotePath;
}
public void setRemotePath(String remotePath) {
this.remotePath = remotePath;
}
public OutputStream getOs() {
return os;
}
public void setOs(OutputStream os) {
this.os = os;
}
public MyFile getResource() {
return resource;
}
public void setResource(MyFile resource) {
this.resource = resource;
}
public Download getDownload() {
return download;
}
public void setDownload(Download download) {
this.download = download;
}
}

@ -0,0 +1,88 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
//import org.apache.log4j.Logger;
//import org.gcube.common.core.utils.logging.GCUBELog;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Vector;
/**
* A monitor class for the concurrent operations
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class Monitor {
/**
* Logger for this class
*/
// private static final GCUBELog logger = new GCUBELog(Monitor.class);
final Logger logger=LoggerFactory.getLogger(Monitor.class);
// request queue
private Vector<MyFile> requestQueue = new Vector<MyFile>();
// fetch the first request in the queue
public synchronized MyFile getRequest(){
if (logger.isDebugEnabled()) {
logger.debug("getRequest() - start");
}
while (requestQueue.size() == 0){
try {
wait(10000);
}
catch (InterruptedException e){
logger.error("getRequest()", e);
}
}
MyFile myFile=requestQueue.remove(0);
notifyAll();
if (logger.isDebugEnabled()) {
logger.debug("getRequest() - end");
}
return myFile;
}
public synchronized MyFile getRequest(ChunkProducer producer){
if (logger.isDebugEnabled()) {
logger.debug("getRequest(ChunkProducer) - start");
}
while (requestQueue.size() == 0){
try {
wait();
}
catch (InterruptedException e){
logger.error("getRequest(ChunkProducer)", e);
}
}
MyFile myFile=requestQueue.remove(0);
notifyAll();
if (logger.isDebugEnabled()) {
logger.debug("getRequest(ChunkProducer) - end");
}
return myFile;
}
// Accoda una nuova richiesta
public synchronized void putRequest(MyFile richiesta){
if (logger.isDebugEnabled()) {
logger.debug("putRequest(MyFile) - start");
logger.debug("request in queue, queue size: "+requestQueue.size());
}
while (requestQueue.size() > Costants.MAX_THREAD){
try {
wait();
}
catch (InterruptedException e){
logger.error("putRequest(MyFile)", e);
}
}
requestQueue.addElement(richiesta);
notifyAll();
if (logger.isDebugEnabled()) {
logger.debug("putRequest(MyFile) - end");
}
}
}

@ -0,0 +1,96 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.io.OutputStream;
import java.net.UnknownHostException;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class Move extends Operation{
/**
* Logger for this class
*/
// private static final GCUBELog logger = new GCUBELog(Download.class);
final Logger logger=LoggerFactory.getLogger(Download.class);
protected String sourcePath;
protected String destinationPath;
protected MyFile resource;
public Move(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
this.sourcePath=file.getLocalPath();
this.destinationPath=remotePath;
sourcePath = new BucketCoding().bucketFileCoding(file.getLocalPath(), rootArea);
destinationPath = new BucketCoding().bucketFileCoding(remotePath, rootArea);
setResource(file);
return bucket=destinationPath;
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String id=null;
try {
// id=tm.move(myFile, sourcePath, destinationPath);
id=tm.move(this);
} catch (UnknownHostException e) {
tm.close();
logger.error("Problem in move from: "+sourcePath+" to: "+destinationPath+": "+e.getMessage());
throw new RemoteBackendException(" Error in move operation ", e.getCause());
}
return id;
}
@Override
public String initOperation(MyFile resource, String remotePath,
String author, String[] server, String rootArea) {
this.sourcePath=resource.getLocalPath();
this.destinationPath=resource.getRemotePath();
sourcePath = new BucketCoding().bucketFileCoding(resource.getLocalPath(), rootArea);
destinationPath = new BucketCoding().bucketFileCoding(resource.getRemotePath(), rootArea);
return bucket=destinationPath;
}
public abstract String execute(MongoIOManager mongoPrimaryInstance, MemoryType memoryType, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException;
public String getSourcePath() {
return sourcePath;
}
public void setSourcePath(String sourcePath) {
this.sourcePath = sourcePath;
}
public String getDestinationPath() {
return destinationPath;
}
public void setDestinationPath(String destinationPath) {
this.destinationPath = destinationPath;
}
public MyFile getResource() {
return resource;
}
public void setResource(MyFile resource) {
this.resource = resource;
}
}

@ -0,0 +1,95 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.net.UnknownHostException;
import java.util.List;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public abstract class MoveDir extends Operation{
/**
* Logger for this class
*/
// private static final GCUBELog logger = new GCUBELog(Download.class);
final Logger logger=LoggerFactory.getLogger(Download.class);
private String sourcePath;
private String destinationPath;
private MyFile resource;
// private OutputStream os;
public MoveDir(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
this.sourcePath=file.getLocalPath();
this.destinationPath=remotePath;
sourcePath = new BucketCoding().bucketFileCoding(file.getLocalPath(), rootArea);
destinationPath = new BucketCoding().bucketFileCoding(remotePath, rootArea);
setResource(file);
return bucket=destinationPath;
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
List<String>ids=null;
try {
ids=tm.moveDir(this);
} catch (UnknownHostException e) {
tm.close();
logger.error("Problem in moveDir from: "+sourcePath+" to: "+destinationPath+": "+e.getMessage());
throw new RemoteBackendException(" Error in moveDir operation ", e.getCause());
}
return ids.toString();
}
@Override
public String initOperation(MyFile resource, String remotePath,
String author, String[] server, String rootArea) {
this.sourcePath=resource.getLocalPath();
this.destinationPath=resource.getRemotePath();
sourcePath = new BucketCoding().bucketFileCoding(resource.getLocalPath(), rootArea);
destinationPath = new BucketCoding().bucketFileCoding(resource.getRemotePath(), rootArea);
setResource(resource);
return bucket=destinationPath;
}
public abstract List<String> execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath, MemoryType memoryType) throws UnknownHostException;
public String getSourcePath() {
return sourcePath;
}
public void setSourcePath(String sourcePath) {
this.sourcePath = sourcePath;
}
public String getDestinationPath() {
return destinationPath;
}
public void setDestinationPath(String destinationPath) {
this.destinationPath = destinationPath;
}
public MyFile getResource() {
return resource;
}
public void setResource(MyFile resource) {
this.resource = resource;
}
}

@ -0,0 +1,385 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
/**
* Define the utilities function for the sub classes operations
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public abstract class Operation {
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(Operation.class);
String[] server;
String user;
private String owner;
String password;
String bucket;
String[] dbNames;
private Monitor monitor;
private boolean isChunk;
String backendType;
public Operation(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs){
this.server=server;
this.user=user;
this.password=pwd;
this.bucket=bucket;
this.monitor=monitor;
this.isChunk=isChunk;
this.backendType=backendType;
this.dbNames=dbs;
}
protected int numOfThread(int totChunks) {
if((totChunks> Costants.MIN_THREAD) &&(totChunks < Costants.MAX_THREAD)){
int returnint = totChunks - 1;
return returnint;
}else if(totChunks > Costants.MAX_THREAD){
return Costants.MAX_THREAD;
}else{
return 1;
}
}
protected int getLengthCurrentChunk(long len, int i, int dimChunk) {
int lengthCurrentChunk=0;
if(((i+1)*dimChunk) <= len){
lengthCurrentChunk=dimChunk;
}else{
lengthCurrentChunk=(int) (len - (i*dimChunk));
}
return lengthCurrentChunk;
}
protected int getNumberOfChunks(long len, long dimChunk) {
if(len< dimChunk)
return 1;
else if((len%dimChunk)>0){
long returnint = (len / dimChunk) + 1;
return (int)returnint;
}else{
long returnint = (len / dimChunk);
return (int)returnint;
}
}
/**
* Upload operation
* @param resource object that contains the resource coordinates
* @param isChunk if the file is in chunk
* @param isBase64 if is in base64 coding
* @param replaceOption if the file will be replaced
* @param isLock if the file is lock
* @return a String that identifies a file
* @throws Exception
*/
public String put(Upload upload, MyFile resource, boolean isChunk, boolean isBase64, boolean replaceOption, boolean isLock) throws Exception{
if (logger.isDebugEnabled()) {
logger.debug("put(MyFile, boolean, boolean) - start");
}
long len=1;
if(resource.getLocalPath()!=null)
len=new File(resource.getLocalPath()).length();
if(logger.isDebugEnabled()){
logger.debug("file size: "+len);
}
long dimensionChunk=0;
if(logger.isDebugEnabled())
logger.debug("PUT is chukn? "+isChunk);
if(isChunk){
ChunkOptimization chunkOptimization=new ChunkOptimization(len);
dimensionChunk=chunkOptimization.chunkCalculation();
}else{
if(len==0){
dimensionChunk=1;
len=1;
}else{
dimensionChunk=len;
}
}
if (logger.isDebugEnabled()) {
logger.debug("put(MyFile, boolean, boolean) - encode length: "
+ len);
}
// number of chunks calculation
int totChunks=1;
if(logger.isDebugEnabled())
logger.debug("len File: "+len+" len chunk: "+dimensionChunk);
totChunks=getNumberOfChunks(len, dimensionChunk);
if (logger.isDebugEnabled()) {
logger.debug("put(MyFile, boolean, boolean) - number of chunks: "
+ totChunks);
}
int nThreads=1;
if(totChunks>1){
nThreads=numOfThread(totChunks);
}
if (logger.isDebugEnabled()) {
logger.debug("put(MyFile, boolean, boolean) - number of thread: "
+ nThreads);
}
if(logger.isDebugEnabled())
logger.debug("consumer have a bucket name: "+bucket);
if(totChunks>1){
if(logger.isDebugEnabled())
logger.debug("THREAD POOL USED");
ChunkConsumer consumer= new ChunkConsumer(monitor, 1, server, user, password, dbNames, isChunk, bucket, replaceOption);
Thread producer=new Thread(new ChunkProducer(monitor, resource, dimensionChunk, totChunks, nThreads, bucket, consumer));
producer.start();
if (logger.isDebugEnabled()) {
logger.debug("put(MyFile, boolean, boolean) - end");
}
producer.join();
return null;
}else{
if(logger.isDebugEnabled())
logger.debug("NO THREAD POOL USED");
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, resource.getGcubeMemoryType(), dbNames, resource.getWriteConcern(), resource.getReadPreference());
String objectId=tm.uploadManager(upload, resource, bucket, bucket+"_1", replaceOption);
return objectId;
}
}
/**
* Download operation
* @param myFile object that contains the resource coordinates
* @throws IOException
* @throws InterruptedException
*/
public String get(Download download, MyFile myFile, boolean isLock) throws IOException, InterruptedException, Exception {
if (logger.isDebugEnabled()) {
logger.debug("get(String) - start");
}
String unlocKey=null;
TransportManagerFactory tmf=null;
// if(server.length >1)
tmf=new TransportManagerFactory(server, user, password);
// else
// tmf=new TransportManagerFactory(server, null, null);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
long start=System.currentTimeMillis();
String path=myFile.getLocalPath();
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){
startPThreadChunk(download, myFile, tm, path);
}else{
unlocKey=tm.downloadManager(download, myFile, bucket, MyFile.class);
}
if((path!=null) && (new File(path).length()>0)){
if (logger.isDebugEnabled()) {
logger.debug("*** Time for downloading: "
+ (System.currentTimeMillis() - start) + " ms "+"\n\n");
}
}
return unlocKey;
}
/**
* @param myFile
* @param tm
* @param path
* @throws FileNotFoundException
* @throws InterruptedException
* @throws IOException
*/
protected void startPThreadChunk(Download download,MyFile myFile, TransportManager tm,
String path) throws FileNotFoundException, InterruptedException,
IOException {
ExecutorService executor = Executors.newFixedThreadPool (2);
int j=0;
MyFile value=null;
if(logger.isInfoEnabled())
logger.info("localPath: "+path+" bucket: "+bucket);
OutputStream out =null;
if((path !=null) && (!path.isEmpty()))
out = new FileOutputStream(new File(path));
do{
value=null;
// String currentKey=bucket+j;
if (logger.isDebugEnabled()) {
logger.debug("get(String) -");
}
try{
value=(MyFile) tm.get(download);
}catch(Exception e){
if (logger.isDebugEnabled()) {
logger.debug("get(String) - \n Trovate " + (j) + " key");
}
value=null;
}
if(value!=null){
if (logger.isDebugEnabled()) {
logger.debug("get(String) - write chunk , author: "
+ value.getOwner());
}
monitor.putRequest(value);
System.gc();
executor.submit (new FileWriter(monitor, out));
}
j++;
}while(value!=null);
executor.shutdown ();
executor.awaitTermination (Long.MAX_VALUE, TimeUnit.SECONDS);
out.flush();
out.close();
}
protected String getRemoteIdentifier(String remotePath, String rootArea) {
String buck=null;
boolean isId=ObjectId.isValid(remotePath);
if(!isId){
buck = new BucketCoding().bucketFileCoding(remotePath, rootArea);
return bucket=buck;
}else{
return bucket=remotePath;
}
}
protected String appendFileSeparator(String source) {
if(source.lastIndexOf(Costants.FILE_SEPARATOR) != (source.length()-1))
source=source+Costants.FILE_SEPARATOR;
return source;
}
protected String extractParent(String source) {
source=source.substring(0, source.length()-1);
String parent=source.substring(source.lastIndexOf(Costants.FILE_SEPARATOR)+1);
logger.debug("parent folder extracted: "+parent);
return parent;
}
/**
* Do a operation
* @param myFile object that contains the resource coordinates
* @return a generic object that contains operation results
* @throws IllegalAccessException
*/
public abstract Object doIt(MyFile myFile) throws RemoteBackendException;
/**
* init a operation
* @param file object that contains the resource coordinates
* @param remoteIdentifier remote path of the resource
* @param author file owner
* @param server server list
* @param rootArea remote root path
* @param replaceOption if true the file will be replaced
* @return a string that identifies the operation
*/
public abstract String initOperation(MyFile file, String remoteIdentifier, String author, String[] server, String rootArea, boolean replaceOption);
/**
* init a operation
* @param resource object that contains the resource coordinates
* @param remoteIdentifier remote path of the resource
* @param author file owner
* @param server server list
* @param rootArea remote root path
* @return a string that identifies the operation
*/
public abstract String initOperation(MyFile resource, String remoteIdentifier, String author, String[] server, String rootArea);
public String getOwner() {
return owner;
}
public void setOwner(String owner) {
this.owner = owner;
}
public String getPassword() {
return password;
}
public void setPassword(String password) {
this.password = password;
}
public String getBucket() {
return bucket;
}
public void setBucket(String bucket) {
this.bucket = bucket;
}
public String[] getDbNames() {
return dbNames;
}
public void setDbNames(String[] dbNames) {
this.dbNames = dbNames;
}
public Monitor getMonitor() {
return monitor;
}
public void setMonitor(Monitor monitor) {
this.monitor = monitor;
}
public boolean isChunk() {
return isChunk;
}
public void setChunk(boolean isChunk) {
this.isChunk = isChunk;
}
public String getBackendType() {
return backendType;
}
public void setBackendType(String backendType) {
this.backendType = backendType;
}
public String[] getServer() {
return server;
}
public void setServer(String[] server) {
this.server = server;
}
public String getUser() {
return user;
}
public void setUser(String user) {
this.user = user;
}
}

@ -0,0 +1,114 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.transport.backend.operation.*;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
//import terrastore.client.TerrastoreClient;
/**
*
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class OperationFactory {
/**
* Logger for this class
*/
// private static final GCUBELog logger = new GCUBELog(OperationFactory.class);
final Logger logger=LoggerFactory.getLogger(OperationFactory.class);
// TerrastoreClient client;
String[] server;
String bucket;
String user;
String password;
String[] dbNames;
Monitor monitor;
boolean isChunk;
private String backendType;
public OperationFactory(String server[], String user, String pwd, String bucket, Monitor monitor2, boolean isChunk, String backendType, String[] dbs){
this.server=server;
this.user=user;
this.password=pwd;
this.bucket=bucket;
this.monitor=monitor2;
this.isChunk=isChunk;
this.backendType=backendType;
this.dbNames=dbs;
}
public Operation getOperation(String operation){
if (logger.isInfoEnabled()) {
logger.info("getOperation(String) - start "+operation);
}
Operation op=null;
if(operation.equalsIgnoreCase("upload")){
op=new UploadOperator(server, user, password, bucket , monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("download")){
op= new DownloadOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("remove")){
op=new Remove(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getSize")){
op=new GetSize(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("duplicate")){
op=new DuplicateOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("softcopy")){
op=new SoftCopyOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getFolderSize")){
op=new GetFolderSize(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getFolderCount")){
op=new GetFolderCount(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getFolderLastUpdate")){
op=new GetFolderLastUpdate(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getTotalUserItems")){
op=new GetUserTotalItems(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getTotalUserVolume")){
op=new GetUserTotalVolume(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("download+lock")){
op=new DownloadAndLock(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("upload+unlock")){
op=new UploadAndUnlock(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("lock")){
op=new LockOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("unlock")){
op=new UnlockOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getTTL")){
op=new GetTTL(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("renewTTL")){
op=new RenewTTL(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getUrl")){
op=new GetUrl(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getHttpUrl")){
op=new GetHttpUrl(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getHttpsUrl")){
op=new GetHttpsUrl(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("link")){
op=new LinkOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("copy")){
op=new SoftCopyOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("move")){
op=new MoveOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("copy_dir")){
op=new CopyDirOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("move_dir")){
op=new MoveDirOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getMetaFile")){
op=new GetMetaFile(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getMetaInfo")){
op=new GetMetaInfo(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("setMetaInfo")){
op=new SetMetaInfo(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("exist")){
op=new Exist(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getRemotePath")){
op=new GetRemotePath(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else{
logger.error("getOperation(String) - Invalid Operation");
}
if (logger.isDebugEnabled()) {
logger.debug("getOperation(String) - end");
}
return op;
}
}

@ -0,0 +1,148 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
//import terrastore.client.TerrastoreClient;
/**
* This is the manager of the operation on file-object.
* The number of threads in upload and the chunk threshold is determined in this class
* (TODO) build and send accounting report
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class OperationManager {
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(OperationManager.class);
private String[] server;
// private int dimension;
private String operation;
private MyFile resource;
private boolean isChunk;
private String bucketName;
private String fileDest;
private String backendType;
private boolean isBase64;
private String user;
private String password;
private String[] dbNames;
public OperationManager(String[] server, String user, String password, String operation, MyFile myFile, String backendType, String[] dbs){
this.setServer(server);
this.setUser(user);
this.setPassword(password);
this.setTypeOperation(operation);
this.setResource(myFile);
this.setTypeOperation(operation);
this.setDbNames(dbs);
this.backendType=backendType;
}
public Object startOperation(MyFile file, String remotePath, String author, String[] server, boolean chunkOpt, String rootArea, boolean replaceOption) throws RemoteBackendException{
// setUser(author);
if (logger.isDebugEnabled()) {
logger.debug("connection(boolean) - start");
}
logger.info("startOpertion getResource..getGcubeAccessType()= "+getResource().getGcubeAccessType()+" file..getGcubeAccessType() "+file.getGcubeAccessType());
// creo il monitor
Monitor monitor = new Monitor();
OperationFactory of=new OperationFactory(server, getUser(), getPassword(), getBucketName(), monitor, chunkOpt, getBackendType(), getDbNames());
Operation op=of.getOperation(getTypeOperation());
//start specific operation
setBucketName(op.initOperation(file, remotePath, author, server, rootArea, replaceOption));
Object object=op.doIt(getResource());
return object;
}
private String getBackendType() {
return backendType;
}
public String getBucketName() {
return bucketName;
}
public void setBucketName(String bucketName) {
this.bucketName = bucketName;
}
public String getFileDest() {
return fileDest;
}
public void setFileDest(String fileDest) {
this.fileDest = fileDest;
}
public boolean isChunk() {
return isChunk;
}
public void setChunk(boolean isChunk) {
this.isChunk = isChunk;
}
public String[] getServer() {
return server;
}
public void setServer(String[] server) {
this.server = server;
}
public String getUser() {
return user;
}
public String getPassword() {
return password;
}
public void setUser(String user) {
this.user = user;
}
public void setPassword(String pwd) {
this.password = pwd;
}
public String getTypeOperation() {
return operation;
}
public void setTypeOperation(String operation) {
this.operation = operation;
}
public MyFile getResource() {
return resource;
}
public void setResource(MyFile resource) {
this.resource = resource;
}
public boolean isBase64() {
return isBase64;
}
public void setBase64(boolean isBase64) {
this.isBase64 = isBase64;
}
public String[] getDbNames() {
return dbNames;
}
public void setDbNames(String[] dbNames) {
this.dbNames = dbNames;
}
}

@ -0,0 +1,88 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements a remove operation from the cluster: remove a file object
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class Remove extends Operation{
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(Remove.class);
public Remove(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server,user,pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
removeBucket(tm, bucket, myFile);
if (logger.isDebugEnabled()) {
logger.debug(" REMOVE " + bucket);
}
return "removed";
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
String[] dirs= remotePath.split(Costants.FILE_SEPARATOR);
if(logger.isDebugEnabled())
logger.debug("remotePath: "+remotePath);
String buck=null;
// in this case the remote path is really a remote path and not a objectId
if((dirs != null) && ((dirs.length >1) || ((dirs.length==1) && (dirs[0].length()<23)))){
BucketCoding bc=new BucketCoding();
buck=bc.bucketFileCoding(remotePath, rootArea);
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){
buck=buck.replaceAll(Costants.FILE_SEPARATOR, Costants.SEPARATOR);
//remove directory bucket
}
}else{
// is an object id
buck=remotePath;
}
// bucketName=new BucketCoding().bucketFileCoding(remotePath, author, rootArea);
return bucket=buck;
}
/**
* Remove a remote directory identifies by bucketName
* @param bucketName indicates the remote directory to remove
* @throws RemoteBackendException
*/
public void removeBucket(TransportManager tm, String bucketName, MyFile resource) throws RemoteBackendException {
if(logger.isDebugEnabled())
logger.debug("removing file bucket: "+bucketName);
try {
tm.removeRemoteFile(bucket, resource);
} catch (Exception e) {
tm.close();
logger.error("Problem in remove: "+bucket+": "+e.getMessage());
throw new RemoteBackendException(" Error in remove operation ", e.getCause());
}
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
throw new IllegalArgumentException("Input/Output stream is not compatible with remove operation");
}
}

@ -0,0 +1,64 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.io.OutputStream;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements a Renew TTL operation for a locked remote resource
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class RenewTTL extends Operation {
final Logger logger=LoggerFactory.getLogger(Download.class);
private String localPath;
private String remotePath;
private OutputStream os;
public RenewTTL (String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
// TODO Auto-generated constructor stub
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
@Override
public String doIt(MyFile myFile) throws RemoteBackendException {
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
long ttl=-1;
try {
myFile.setRemotePath(bucket);
ttl = tm.renewTTL(myFile);
} catch (Throwable e) {
tm.close();
throw new RemoteBackendException(" Error in renew TTL operation ", e.getCause());
}
return ttl+"";
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea,
boolean replaceOption) {
this.localPath=file.getLocalPath();
this.remotePath=remotePath;
String bucketName = new BucketCoding().bucketFileCoding(remotePath, rootArea);
return bucket=bucketName;
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
// TODO Auto-generated method stub
return null;
}
}

@ -0,0 +1,61 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class SetMetaInfo extends Operation {
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(GetSize.class);
public SetMetaInfo(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
try {
tm.setFileProperty(bucket, myFile.getGenericPropertyField(), myFile.getGenericPropertyValue());
} catch (Exception e) {
tm.close();
e.printStackTrace();
throw new RemoteBackendException(" Error in SetMetaInfo operation ", e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return "1";
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea, boolean replaceOption) {
if(logger.isDebugEnabled())
logger.debug("remotePath: "+remotePath);
String buck=null;
boolean isId=ObjectId.isValid(remotePath);
if(!isId){
buck = new BucketCoding().bucketFileCoding(remotePath, rootArea);
return bucket=buck;
}else{
return bucket=remotePath;
}
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
throw new IllegalArgumentException("Input/Output stream is not compatible with getSize operation");
}
}

@ -0,0 +1,130 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.net.UnknownHostException;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public abstract class SoftCopy extends Operation {
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(SoftCopy.class);
private String sourcePath;
private String destinationPath;
private MyFile resource;
public SoftCopy(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String initOperation(MyFile file, String remotePath, String author, String[] server, String rootArea, boolean replaceOption) {
// if(remotePath != null){
// boolean isId=ObjectId.isValid(remotePath);
// setResource(file);
// if(!isId){
//// String[] dirs= remotePath.split(file_separator);
// if(logger.isDebugEnabled())
// logger.debug("remotePath: "+remotePath);
// String buck=null;
// buck = new BucketCoding().bucketFileCoding(remotePath, rootArea);
// return bucket=buck;
// }else{
// return bucket=remotePath;
// }
// }return bucket=null;//else throw new RemoteBackendException("argument cannot be null");
this.sourcePath=file.getLocalPath();
this.destinationPath=remotePath;
sourcePath = new BucketCoding().bucketFileCoding(file.getLocalPath(), rootArea);
destinationPath = new BucketCoding().bucketFileCoding(remotePath, rootArea);
setResource(file);
return bucket=destinationPath;
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String id=null;
try {
id=tm.softCopy(this);
} catch (UnknownHostException e) {
tm.close();
logger.error("Problem in copy from: "+sourcePath+" to: "+destinationPath+": "+e.getMessage());
throw new RemoteBackendException(" Error in copy operation ", e.getCause());
}
return id;
}
@Override
public String initOperation(MyFile resource, String remotePath, String author, String[] server, String rootArea) {
// For terrastore, the name of bucket is formed: path_____fileName_____author
this.sourcePath=resource.getLocalPath();
this.destinationPath=resource.getRemotePath();
sourcePath = new BucketCoding().bucketFileCoding(resource.getLocalPath(), rootArea);
destinationPath = new BucketCoding().bucketFileCoding(resource.getRemotePath(), rootArea);
setResource(resource);
return bucket=destinationPath;
// if(remotePath != null){
// boolean isId=ObjectId.isValid(remotePath);
// setResource(resource);
// if(!isId){
//// String[] dirs= remotePath.split(file_separator);
// if(logger.isDebugEnabled())
// logger.debug("remotePath: "+remotePath);
// String buck=null;
// buck = new BucketCoding().bucketFileCoding(remotePath, rootArea);
// return bucket=buck;
// }else{
// return bucket=remotePath;
// }
// }return bucket=null;//else throw new RemoteBackendException("argument cannot be null");
}
public abstract String execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException;
public String getSourcePath() {
return sourcePath;
}
public void setSourcePath(String sourcePath) {
this.sourcePath = sourcePath;
}
public String getDestinationPath() {
return destinationPath;
}
public void setDestinationPath(String destinationPath) {
this.destinationPath = destinationPath;
}
public MyFile getResource() {
return resource;
}
public void setResource(MyFile resource) {
this.resource = resource;
}
}

@ -0,0 +1,129 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.io.OutputStream;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.operation.UploadOperator;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
/**
* Implements the unlock operation for a locked remote resource
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public abstract class Unlock extends Operation {
private String keyUnlock;
protected String localPath;
protected String remotePath;
protected OutputStream os;
protected MyFile resource;
protected Upload upload;
public Unlock(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
// TODO Auto-generated constructor stub
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
@Override
public String doIt(MyFile myFile) throws RemoteBackendException {
if (logger.isDebugEnabled()) {
logger.debug(" UPLOAD " + myFile.getLocalPath()
+ " author: " + myFile.getOwner());
}
String objectId=null;
try {
Upload upload= new UploadOperator(getServer(), getUser(), getPassword(), getBucket(), getMonitor(), isChunk(), getBackendType(), getDbNames());
//inserire parametro per il lock
objectId=put(upload, myFile, isChunk(), false, false, true);
} catch (Exception e) {
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
tm.close();
throw new RemoteBackendException(" Error in unlock operation ", e.getCause());
}
return objectId;
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea,
boolean replaceOption) {
String bucketName=null;
// create the directory bucket
if((remotePath.length()<23) || (remotePath.contains(Costants.FILE_SEPARATOR))){
// the name of bucket is formed: path_____fileName_____author
bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea);
}else{
//is an ObjectId
bucketName=remotePath;
}
return bucket=bucketName;
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
// TODO Auto-generated method stub
return null;
}
public abstract String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String bucket, String key4unlock) throws Exception;
public String getLocalPath() {
return localPath;
}
public void setLocalPath(String localPath) {
this.localPath = localPath;
}
public String getRemotePath() {
return remotePath;
}
public void setRemotePath(String remotePath) {
this.remotePath = remotePath;
}
public OutputStream getOs() {
return os;
}
public void setOs(OutputStream os) {
this.os = os;
}
public MyFile getResource() {
return resource;
}
public void setResource(MyFile resource) {
this.resource = resource;
}
public Upload getUpload() {
return upload;
}
public void setUpload(Upload upload) {
this.upload = upload;
}
public String getKeyUnlock() {
return keyUnlock;
}
public void setKeyUnlock(String keyUnlock) {
this.keyUnlock = keyUnlock;
}
}

@ -0,0 +1,165 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Implements a upload operation from the cluster: upload a file object
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public abstract class Upload extends Operation {
/**
* Logger for this class
*/
// private static final GCUBELog logger = new GCUBELog(Upload.class);
final Logger logger=LoggerFactory.getLogger(Upload.class);
protected InputStream is;
private boolean replaceOption;
protected String localPath;
protected String remotePath;
protected OutputStream os;
protected MyFile resource;
public Upload(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String bck, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, bck, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
if (logger.isDebugEnabled()) {
logger.debug(" UPLOAD " + myFile.getLocalPath()
+ " author: " + myFile.getOwner());
}
String objectId=null;
try {
objectId=put(this, myFile, isChunk(), false, replaceOption, false);
} catch (Throwable e) {
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
tm.close();
logger.error("Problem in upload from: "+myFile.getLocalPath()+": "+e.getMessage());
throw new RemoteBackendException(" Error in upload operation ", e.getCause());
}
return objectId;
}
@Override
public String initOperation(MyFile file, String remotePath, String author, String[] server, String rootArea, boolean replaceOption) {
// set replace option
this.replaceOption=replaceOption;
setResource(file);
//patch id: check if remotePath is not an id
if(remotePath.contains(Costants.FILE_SEPARATOR)){
// the name of bucket is formed: path_____fileName_____author
String bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea);
return bucket=bucketName;
}else{
return bucket=remotePath;
}
}
@Override
public String initOperation(MyFile resource, String remotePath,
String author, String[] server, String rootArea) {
// the name of bucket is formed: path_____fileName_____author
String bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea);
setResource(resource);
this.is=resource.getInputStream();
return bucket=bucketName;
}
public abstract String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String bucket, boolean replace) throws IOException;
public InputStream getIs() {
return is;
}
public void setIs(InputStream is) {
this.is = is;
}
public boolean isReplaceOption() {
return replaceOption;
}
public void setReplaceOption(boolean replaceOption) {
this.replaceOption = replaceOption;
}
public String getLocalPath() {
return localPath;
}
public void setLocalPath(String localPath) {
this.localPath = localPath;
}
public String getRemotePath() {
return remotePath;
}
public void setRemotePath(String remotePath) {
this.remotePath = remotePath;
}
public OutputStream getOs() {
return os;
}
public void setOs(OutputStream os) {
this.os = os;
}
public MyFile getResource() {
return resource;
}
public void setResource(MyFile resource) {
this.resource = resource;
}
}

@ -0,0 +1,62 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.operation.UploadOperator;
/**
* @deprecated
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class UploadAndUnlock extends Operation {
// private String keyUnlock;
public UploadAndUnlock(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
// TODO Auto-generated constructor stub
super(server,user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
@Override
public String doIt(MyFile myFile) throws RemoteBackendException {
if (logger.isDebugEnabled()) {
logger.debug(" UPLOAD " + myFile.getLocalPath()
+ " author: " + myFile.getOwner());
}
Upload upload= new UploadOperator(getServer(), getUser(), getPassword(), getBucket(), getMonitor(), isChunk(), getBackendType(), getDbNames());
String objectId=null;
try {
//inserire parametro per il lock
objectId=put(upload, myFile, isChunk(), false, false, true);
} catch (Exception e) {
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
tm.close();
throw new RemoteBackendException(" Error in uploadAndUnlock operation ", e.getCause()); }
return objectId;
}
@Override
public String initOperation(MyFile file, String remotePath,
String author, String[] server, String rootArea,
boolean replaceOption) {
// set replace option
// this.replaceOption=replaceOption;
// the name of bucket is formed: path_____fileName_____author
String bucketName=new BucketCoding().bucketFileCoding(remotePath, rootArea);
return bucket=bucketName;
}
@Override
public String initOperation(MyFile resource, String RemotePath,
String author, String[] server, String rootArea) {
// TODO Auto-generated method stub
return null;
}
}

@ -0,0 +1,28 @@
package org.gcube.contentmanagement.blobstorage.test;
import java.util.List;
import org.gcube.contentmanagement.blobstorage.service.IClient;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.resource.StorageObject;
public class SimpleTest2 {
public static void main(String[] args) throws RemoteBackendException{
String[] server=new String[]{"146.48.123.73","146.48.123.74" };
IClient client=new ServiceEngine(server, "rcirillo", "cnr", "private", "rcirillo");
// String localFile="/home/rcirillo/FilePerTest/CostaRica.jpg";
String remoteFile="/img/shared9.jpg";
String newFile="/home/rcirillo/FilePerTest/repl4.jpg";
client.get().LFile(newFile).RFile(remoteFile);
List<StorageObject> list=client.showDir().RDir("/img/");
for(StorageObject obj : list){
System.out.println("obj found: "+obj.getName());
}
String uri=client.getUrl().RFile(remoteFile);
System.out.println(" uri file: "+uri);
}
}

@ -0,0 +1,357 @@
package org.gcube.contentmanagement.blobstorage.transport;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.UnknownHostException;
import java.util.List;
import java.util.Map;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.StorageObject;
import org.gcube.contentmanagement.blobstorage.service.operation.*;
import org.gcube.contentmanagement.blobstorage.transport.backend.operation.LockOperator;
import org.gcube.contentmanagement.blobstorage.transport.backend.operation.UnlockOperator;
import com.mongodb.MongoException;
/**
* The Transport Manager presents the methods for the connection to the remote system. This class should be instantiated for connection on remote backend
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public abstract class TransportManager {
/**
* This method specifies the type of the backend for dynamic loading
* For mongoDB, default backend, the name is MongoDB
* @return the backend name
*/
public abstract String getName();
/**
* This method set initialize and configure the backend servers
* @param server array that contains ip of backend server
* @param pass
* @param user
*/
public abstract void initBackend(String[] server, String user, String pass, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern);
/**
* Start the download operation. It contains logic to determine the correct operation based on the input parameters
* @param myFile object that contains the resource coordinates
* @param key remote path or objectId
* @param type class type of myFile object
* @return the key of remote resource
* @throws IOException if there are IO problems
*/
public String downloadManager(Download download, MyFile myFile, String key, Class <? extends Object> type) throws Exception{
String key4lock=null;
if(myFile.isLock()){
download.setResource(myFile);
get(download);
Lock lock= new LockOperator(download.getServer(), download.getUser(), download.getPassword(), download.getBucket(), download.getMonitor(), download.isChunk(), download.getBackendType(), download.getDbNames());
lock.setResource(myFile);
key4lock=lock(lock);
return key4lock;
}else{
// return get(myFile, key, type).toString();
return get(download).toString();
}
}
/**
* Start the upload operation. It contains logic to determine the correct operation based on the input parameters
* @param resource object that contains the resource coordinates
* @param bucket remote path or objectId
* @param key used only for chunk index operation
* @param replace if is true the file will be replaced
* @return the id of the remote resource
* @throws FileNotFoundException
* @throws UnknownHostException
*/
public String uploadManager(Upload upload, Object resource, String bucket, String key, boolean replace) throws Exception{
String id=null;
MyFile file=(MyFile)resource;
if((file.getLockedKey()!=null) && (!file.getLockedKey().isEmpty())){
Unlock unlock= new UnlockOperator(upload.getServer(), upload.getUser(), upload.getPassword(), upload.getBucket(), upload.getMonitor(), upload.isChunk(), upload.getBackendType(), upload.getDbNames());
unlock.setResource(file);
unlock.setKeyUnlock(file.getLockedKey());
id=unlock(unlock);
upload.setResource(file);
id=put(upload);
}else{
// id=put(resource, bucket, key, replace);
id=put(upload);
}
return id;
}
/**
* get a object from the cluster
* @param myFile object that contains the resource coordinates
* @param key identifies a server location object:
* in Terrastore correspond to a key, in Mongo correspond to a objectid or a remote path
* @param type class type definition for casting operation
* @return generic object that identifies a remote resource
* @throws FileNotFoundException
* @throws IOException
*/
// public abstract Object get(MyFile myFile, String key, Class <? extends Object> type) throws FileNotFoundException, IOException;
/**
* get a object from the cluster
* @param myFile object that contains the resource coordinates
* @param key identifies a server location object:
* in Terrastore correspond to a key, in Mongo correspond to a objectid or a remote path
* @param type class type definition for casting operation
* @return generic object that identifies a remote resource
* @throws FileNotFoundException
* @throws IOException
*/
public abstract Object get(Download download) throws FileNotFoundException, IOException;
/**
* put a object on the cluster
* @param resource object that contains the resource coordinates
* @param bucket remote path or objectId
* @param key used for chunk file index or for unlock operation
* @throws MongoException
* @throws UnknownHostException
*/
// public abstract String put(Object resource, String bucket, String key, boolean replace) throws UnknownHostException;
public abstract String put(Upload upload) throws FileNotFoundException, IOException;
/**
* get all values contained in a remote bucket (or remote directory)
* @param bucket remote path or objectId
* @param type class type of myFile object
* @return map that contains the object in the direcotry
* @throws UnknownHostException
*/
public abstract Map<String, StorageObject> getValues(MyFile resource, String bucket, Class< ? extends Object> type);
/**
* delete a remote file
* @param bucket identifies the remote file
* @throws UnknownHostException
*/
public abstract void removeRemoteFile(String bucket, MyFile resource) throws UnknownHostException;
/**
* delete a remote directory
* @param remoteDir remote Directory path
* @param myFile
* @throws IllegalStateException
* @throws UnknownHostException
*
*/
public abstract void removeDir(String remoteDir, MyFile myFile) throws UnknownHostException;
/**
* get the size of the remote file
* @param bucket identifies the remote file path
* @return the size of the remote file
* @throws UnknownHostException
*/
public abstract long getSize(String bucket);
/**
* lock a remote file
* @param resource object that contains the resource coordinates
* @param serverLocation remote path
* @param type class of resource
* @return the key that permits the object's unlock
* @throws IOException
* @throws Exception
*/
// public abstract String lock(MyFile resource, String serverLocation,
// Class<? extends Object> type) throws IOException;
public abstract String lock(Lock lock) throws Exception;
/**
* unlock a remote file
* @param resource object that contains the resource coordinates
* @param bucket remote path
* @param key used only for chunk identifications
* @param key4unlock key used for unlock the remote file
* @return key for lock or null
* @throws FileNotFoundException
* @throws UnknownHostException
* @throws MongoException
* @throws Exception
*/
// public abstract String unlock(Object resource, String bucket, String key,
// String key4unlock) throws FileNotFoundException,
// UnknownHostException, MongoException;
public abstract String unlock(Unlock unlock) throws FileNotFoundException,
UnknownHostException, MongoException, Exception;
/**
* returns the TTL associated with a remote file
* @param pathServer file remote path
* @return the time of ttl
* @throws UnknownHostException
*/
public abstract long getTTL(String pathServer) throws UnknownHostException;
/**
* renew the TTL associated with a remote file
* @param resource
* @return the TTL time left
* @throws UnknownHostException
* @throws IllegalAccessException
*/
public abstract long renewTTL(MyFile resource) throws UnknownHostException, IllegalAccessException;
/**
* link the destination resource to the source resource. In this operation the payload of the file is the same. The metadata will be changed
* @param resource resource object
* @param source complete path of the source resource
* @param destination complete path of the destination resource
* @return id of the new resource
* @throws UnknownHostException
*/
// public abstract String link(MyFile resource, String source, String destination) throws UnknownHostException;
public abstract String link(Link link) throws UnknownHostException;
/**
* copy a remote resource from source path to destination path. In this case the payload will be duplicated
* @param resource resource object
* @param source complete path of the source resource
* @param destination complete path of the destination resource
* @return id of the new resource
* @throws UnknownHostException
*/
// public abstract String copy(MyFile resource, String source, String destination) throws UnknownHostException;
/**
* copy a remote resource from source path to destination path. In this case the payload will be duplicated
* @param resource resource object
* @param source complete path of the source resource
* @param destination complete path of the destination resource
* @return id of the new resource
* @throws UnknownHostException
*/
public abstract String copy(Copy copy) throws UnknownHostException;
/**
* Move a remote resource from source path to destination path
* @param resource resource object
* @param source complete path of the source resource
* @param destination complete path of the destination resource
* @return id of the new resource
* @throws UnknownHostException
*/
// public abstract String move(MyFile resource, String source, String destination) throws UnknownHostException;
public abstract String move(Move move) throws UnknownHostException;
/**
* copy a remote folder from source path to destination path.
* @param resource resource object
* @param source complete path of the source resource
* @param destination complete path of the destination resource
* @return id of the new resource
* @throws UnknownHostException
*/
// public abstract List<String> copyDir(MyFile resource, String source, String destination) throws UnknownHostException;
public abstract List<String> copyDir(CopyDir copy) throws UnknownHostException;
/**
* Move a remote folder from source path to destination path
* @param resource resource object
* @param source complete path of the source resource
* @param destination complete path of the destination resource
* @return id of the new resource
* @throws UnknownHostException
*/
// public abstract List<String> moveDir(MyFile resource, String source, String destination) throws UnknownHostException;
public abstract List<String> moveDir(MoveDir move) throws UnknownHostException;
/**
* Get a generic metadata from a remote file ex: owner, creationDate, link
* @param remotePath remote file path
* @param property property key
* @return property value
* @throws UnknownHostException
*/
public abstract String getFileProperty(String remotePath, String property);
/**
* Get the number of files in a folder
* @param folderPath: the folder path
* @return the number of files contained in the folder
* @throws UnknownHostException
*/
public abstract long getFolderTotalItems(String folderPath);
/**
* Get the total Volume in the folder specified by input parameter folderPath
* @param folderPath: the path of the folder
* @return the folder size
* @throws UnknownHostException
*/
public abstract long getFolderTotalVolume(String folderPath);
/**
* Get the total Volume of files uploaded by a user specified in input parameter user
* @param user: the username
* @return the total
* @throws UnknownHostException
*/
public abstract String getUserTotalVolume(String user);
/**
* Get the number of files uploaded by a user
* @param user: username
* @return the total
* @throws UnknownHostException
*/
public abstract String getUserTotalItems(String user);
public abstract boolean isValidId(String id);
public abstract String getId(String remoteIdentifier, boolean forceCreation);
public abstract String getField(String remoteIdentifier, String fieldName) throws UnknownHostException ;
public abstract void close();
public abstract void setFileProperty(String remotePath, String propertyField, String propertyValue);
public abstract String getRemotePath(String bucket)throws UnknownHostException;
/**
* @param bucket
* @return
*/
public abstract boolean exist(String bucket);
/**
* @param bucket remote path or objectId
* @return
*/
// public abstract String duplicateFile(MyFile resource, String bucket);
public abstract String duplicateFile(DuplicateFile duplicate);
// public String softCopy(MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException{return null;}
public String softCopy(SoftCopy copy) throws UnknownHostException{return null;}
/**
* @param move
* @return
* @throws UnknownHostException
*/
}

@ -0,0 +1,76 @@
package org.gcube.contentmanagement.blobstorage.transport;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.ServiceLoader;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
//import terrastore.client.TerrastoreClient;
/**
* Transport manager factory
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class TransportManagerFactory {
/**
* Logger for this class
*/
// private static final Logger logger = Logger.getLogger(OperationFactory.class);
final Logger logger = LoggerFactory.getLogger(TransportManagerFactory.class);
// TerrastoreClient client;
String[] server;
String user;
String password;
public TransportManagerFactory(String server[], String user, String password){
this.server=server;
this.user=user;
this.password=password;
}
public TransportManager getTransport(String backendType, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern){
if (logger.isDebugEnabled()) {
logger.debug("getOperation(String) - start");
}
return load(backendType, memoryType, dbNames, writeConcern, readConcern);
}
private TransportManager load(String backendType, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern){
ServiceLoader<TransportManager> loader = ServiceLoader.load(TransportManager.class);
Iterator<TransportManager> iterator = loader.iterator();
List<TransportManager> impls = new ArrayList<TransportManager>();
while(iterator.hasNext())
impls.add(iterator.next());
int implementationCounted=impls.size();
// System.out.println("size: "+implementationCounted);
if(implementationCounted==0){
logger.info(" 0 implementation found. Load default implementation of TransportManager");
return new MongoOperationManager(server, user, password, memoryType, dbNames, writeConcern, readConcern);
}else if(implementationCounted==1){
TransportManager tm = impls.get(0);
logger.info("1 implementation of TransportManager found. Load it. "+tm.getName());
tm.initBackend(server, user, password, memoryType, dbNames, writeConcern, readConcern);
return tm;
}else{
logger.info("found "+implementationCounted+" implementations of TransportManager");
logger.info("search: "+backendType);
for(TransportManager tm : impls){
if(tm.getName().equalsIgnoreCase(backendType)){
logger.info("Found implementation "+backendType);
return tm;
}
}
throw new IllegalStateException("Mismatch Backend Type and RuntimeResource Type. The backend type expected is "+backendType);
}
}
}

@ -0,0 +1,49 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend;
import java.util.List;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.BasicDBObject;
import com.mongodb.gridfs.GridFS;
import com.mongodb.gridfs.GridFSDBFile;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class BsonOperator {
private GridFS gfs;
// private GridFSDBFile gfsFile;
// private String dbName;
// private BasicDBObject dbObject;
private Logger logger = LoggerFactory.getLogger(BsonOperator.class);
public BsonOperator(GridFS gfs){
this.gfs=gfs;
// this.dbName=dbName;
}
protected List<GridFSDBFile> getFilesOnFolder(String folderPath) {
BasicDBObject queryFile = new BasicDBObject();
queryFile.put("dir", java.util.regex.Pattern.compile(folderPath+"*"));
List<GridFSDBFile> list=gfs.find(queryFile);
logger.info("retrieveRemoteFileObject found "+list.size()+" objects ");
return list;
}
protected List<GridFSDBFile> getOwnedFiles(String username){
BasicDBObject queryFile = new BasicDBObject();
queryFile.put("owner", username);
List<GridFSDBFile> list=gfs.find(queryFile);
logger.info("retrieveUsersFileObjectfound "+list.size()+" objects ");
return list;
}
}

@ -0,0 +1,50 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend;
import com.mongodb.BasicDBObject;
import com.mongodb.DBCollection;
import com.mongodb.gridfs.GridFS;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class CollectionOperator {
private GridFS gfs;
private DBCollection collection;
private BasicDBObject dbObject;
public CollectionOperator(GridFS gfs){
setGfs(gfs);
}
public GridFS getGfs() {
return gfs;
}
public void setGfs(GridFS gfs) {
this.gfs = gfs;
}
public DBCollection getCollection() {
return collection;
}
public void setCollection(DBCollection collection) {
this.collection = collection;
}
public BasicDBObject getDbObject() {
return dbObject;
}
public void setDbObject(BasicDBObject dbObject) {
this.dbObject = dbObject;
}
}

@ -0,0 +1,87 @@
//package org.gcube.contentmanagement.blobstorage.transport.backend;
//
//
//import java.net.UnknownHostException;
//import java.util.Arrays;
//import java.util.Map;
//
//import org.gcube.contentmanagement.blobstorage.resource.MyFile;
//import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
//
//import com.mongodb.MongoException;
//
//import terrastore.client.TerrastoreClient;
//import terrastore.client.connection.OrderedHostManager;
//import terrastore.client.connection.resteasy.HTTPConnectionFactory;
//
///**
// * Terrastore Transport layer
// * @author Roberto Cirillo (ISTI - CNR)
// *
// */
//public class HttpTerrastoreClient extends TransportManager{
//
// private String[] server;
// private TerrastoreClient client;
//
// public HttpTerrastoreClient(String[] server) {
// client=new TerrastoreClient(new OrderedHostManager(Arrays.asList(server)), new HTTPConnectionFactory());
// }
//
// @Override
// public Object get(MyFile resource, String key, Class<? extends Object> type) {
// Object ret=null;
// if((resource.getPathClient()!=null) && (!resource.getPathClient().isEmpty()))
// ret=client.bucket(resource.getPathClient()).key(key).get(type);
// else
// throw new IllegalArgumentException("Local path not found");
// return ret;
// }
//
// @Override
// public String put(Object resource, String bucket, String key, boolean replaceOption) {
// //replace option is ignored
// client.bucket(bucket).key(key).put(resource);
// return null;
// }
//
// @Override
// public Map getValues(String bucket, Class<? extends Object> type) {
// return client.bucket(bucket).values().get(type);
// }
//
// @Override
// public void clearBucket(String bucket) {
// client.bucket(bucket).clear();
//
// }
//
// @Override
// public void removeKey(String bucket, String key) {
// client.bucket(bucket).key(key).remove();
//
// }
//
// @Override
// public Map getValuesPredicate(String bucket, Class< ? extends Object> type, String predicate) {
// return client.bucket(bucket).predicate(predicate).get(type);
//
// }
//
// @Override
// public void removeDir(String remoteDir){}
//
// @Override
// public long getTTL(String pathServer) throws UnknownHostException,
// MongoException {
// throw new IllegalArgumentException("This operation is not compatible with this client");
// }
//
// @Override
// public long renewTTL(MyFile resource) throws UnknownHostException,
// MongoException {
// throw new IllegalArgumentException("This operation is not compatible with this client");
//}
//
//
//}

@ -0,0 +1,687 @@
package org.gcube.contentmanagement.blobstorage.transport.backend;
import org.bson.types.ObjectId;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.service.operation.*;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.gcube.contentmanagement.blobstorage.resource.StorageObject;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.BasicDBObject;
import com.mongodb.DBCollection;
import com.mongodb.MongoException;
import com.mongodb.gridfs.GridFS;
import com.mongodb.gridfs.GridFSDBFile;
import com.mongodb.gridfs.GridFSInputFile;
/**
* MongoDB transport layer
* @author Roberto Cirillo (ISTI - CNR)
*
*/
public class MongoOperationManager extends TransportManager{
/**
* Logger for this class
*/
final Logger logger = LoggerFactory.getLogger(MongoOperationManager.class);
// private MongoClient mongo;
private MongoIOManager mongoPrimaryInstance;
private MongoIOManager mongoSecondaryInstance;
private MemoryType memoryType;
protected static String[] dbNames;
public MongoOperationManager(String[] server, String user, String password, MemoryType memoryType, String[] dbNames,String writeConcern, String readConcern){
initBackend(server,user,password, memoryType,dbNames, writeConcern, readConcern);
}
@Override
public void initBackend(String[] server, String user, String pass, MemoryType memoryType , String[] dbNames, String writeConcern, String readConcern) {
try {
this.memoryType=memoryType;
MongoOperationManager.dbNames=dbNames;
logger.debug("check mongo configuration");
if (dbNames!=null){
if(dbNames.length==1){
logger.info("found one mongo db to connect");
mongoPrimaryInstance= getMongoInstance(server, user, pass, memoryType, dbNames[0], writeConcern, readConcern);
}else if (dbNames.length== 0){
logger.warn("primary db not discovered correctly. Backend will be instantiated with default value");
mongoPrimaryInstance= getMongoInstance(server, user, pass, memoryType, null, writeConcern, readConcern);
} else if (dbNames.length== 2){
logger.info("found two mongo db to connect");
mongoPrimaryInstance= getMongoInstance(server, user, pass, memoryType, dbNames[0], writeConcern, readConcern);
mongoSecondaryInstance=getMongoInstance(server, user, pass, memoryType, dbNames[1], writeConcern, readConcern);
}else{
throw new RuntimeException("Found more than 2 collection on the ServiceEndopint. This case is not managed");
}
}else{
logger.debug("primary db not discovered. Backend will be instantiated with default value");
mongoPrimaryInstance= getMongoInstance(server, user, pass, memoryType, null, writeConcern, readConcern);
}
} catch (UnknownHostException e) {
e.printStackTrace();
} catch (MongoException e) {
e.printStackTrace();
}
}
private MongoIOManager getMongoInstance(String[] server, String user, String password, MemoryType memoryType, String dbName, String writeConcern, String readPreference)
throws UnknownHostException {
MongoIOManager mongoInstance=new MongoIOManager(server, user, password, memoryType, dbName, writeConcern, readPreference);//MongoIO.getInstance(server, user, password);
mongoInstance.clean();
DBCollection coll =mongoInstance.getMetaDataCollection();// io.getDB().getCollection("fs.files");
coll.createIndex(new BasicDBObject("filename", 1)); // create index on "filename", ascending
coll.createIndex(new BasicDBObject("dir", 1)); // create index on "filename", ascending
coll.createIndex(new BasicDBObject("owner", 1)); // create index on "owner", ascending
return mongoInstance;
}
/**
* @param serverLocation can be a path remote on the cluster or a object id
* @throws IOException
*/
@Override
public ObjectId get(Download download) throws IOException {
return download.execute(mongoPrimaryInstance, mongoSecondaryInstance);
}
/**
* return the key that permits the object's unlock
* @throws IOException
*/
@Override
public String lock(Lock lock) throws Exception {
return lock.execute(mongoPrimaryInstance, mongoSecondaryInstance, lock.getResource(), lock.getBucket());
}
@Override
public String put(Upload upload) throws IOException {
return upload.execute(mongoPrimaryInstance, mongoSecondaryInstance, upload.getResource(), upload.getBucket(), upload.isReplaceOption());
}
public void close() {
mongoPrimaryInstance.close();
// mongoSecondaryInstance.close();
}
/**
* Unlock the object specified, this method accept the key field for the unlock operation
* @throws FileNotFoundException
* @throws UnknownHostException
*/
@Override
public String unlock(Unlock unlock) throws Exception {
return unlock.execute(mongoPrimaryInstance, mongoSecondaryInstance,unlock.getResource(), unlock.getBucket(), unlock.getKeyUnlock());
}
@Override
public Map<String, StorageObject> getValues(MyFile resource, String bucket, Class<? extends Object> type){
Map<String, StorageObject> map=null;
try{
OperationDefinition op=resource.getOperationDefinition();
logger.info("MongoClient getValues method: "+op.toString());
// DB db=mongoPrimaryInstance.getConnectionDB(resource.getWriteConcern(), resource.getReadPreference(), getPrimaryCollectionName(), true);
GridFS gfs = mongoPrimaryInstance.getGfs(getPrimaryCollectionName(), true);
if(logger.isDebugEnabled()){
logger.debug("Mongo get values of dir: "+bucket);
}
BasicDBObject query = new BasicDBObject();
query.put("dir", bucket);
List<GridFSDBFile> list = gfs.find(query);
// Patch for incompatibility v 1-2
list=mongoPrimaryInstance.patchRemoteDirPathVersion1(bucket, gfs, query, list);
//end
logger.info("find all object (files/dirs) in the directory "+bucket);
for(Iterator<GridFSDBFile> it=list.iterator(); it.hasNext();){
GridFSDBFile f=(GridFSDBFile)it.next();
if(map==null){
map=new HashMap<String, StorageObject>();
}
StorageObject s_obj=null;
// = null if the object is not contained in a subDirectory
if((f.get("type")==null) || (f.get("type").toString().equalsIgnoreCase("file"))){
if(logger.isDebugEnabled())
logger.debug("found object: "+f.get("name")+" type: "+f.get("type"));
s_obj=new StorageObject(f.get("name").toString(), "file");
String owner=(String)f.get("owner");
if(owner !=null)
s_obj.setOwner(owner);
String creationTime=(String)f.get("creationTime");
if(creationTime!=null)
s_obj.setCreationTime(creationTime);
s_obj.setId(f.getId().toString());
}else{
if(logger.isDebugEnabled())
logger.debug("found directory: "+f.get("name")+" type: "+f.get("type"));
// check if a empty dir, if it is a empty dir then I remove it
BasicDBObject queryDir = new BasicDBObject();
queryDir.put("dir", f.get("dir").toString()+f.get("name").toString());
List<GridFSDBFile> listDir = gfs.find(queryDir);
if((listDir != null) && (listDir.size() > 0))
s_obj=new StorageObject(f.get("name").toString(), "dir");
else{
// then the dir not contains subDirectory
//check if it contains subfiles
BasicDBObject queryFile = new BasicDBObject();
queryFile.put("filename", java.util.regex.Pattern.compile(f.get("dir").toString()+"*"));
logger.info("find all files in the directory "+f.get("name"));
List<GridFSDBFile> listFile = gfs.find(queryFile);
logger.info("search completed");
if((listFile != null) && (listFile.size() > 0)){
// then it contains subFile. Insert it in the result map
s_obj=new StorageObject(f.get("name").toString(), "dir");
}else s_obj=null;
}
}
if(s_obj !=null)
map.put(f.get("name").toString(), s_obj);
}
logger.info("search completed");
}catch(Exception e ){
close();
throw new RemoteBackendException("problem to retrieve objects in the folder: "+bucket+" exception message: "+e.getMessage());
}
close();
return map;
}
@Override
public void removeRemoteFile(String bucket, MyFile resource) throws UnknownHostException{
logger.info("Check file: "+bucket+ " for removing operation");
GridFSDBFile f=mongoPrimaryInstance.retrieveRemoteDescriptor(bucket, null, true);
if(f!=null){
mongoPrimaryInstance.checkAndRemove(f, resource);
}else{
if(logger.isDebugEnabled())
logger.debug("File Not Found. Try to delete by ObjectID");
if(bucket.length()>23){
ObjectId id=new ObjectId(bucket);
GridFSDBFile fID=mongoPrimaryInstance.findGFSCollectionObject(id);
if(fID != null){
mongoPrimaryInstance.checkAndRemove(fID, resource);
if(logger.isInfoEnabled())
logger.info("object deleted by ID");
}
}
}
close();
}
@Override
public void removeDir(String remoteDir, MyFile resource){
ArrayList<String> dirs=new ArrayList<String>();
dirs.add(remoteDir);
// patch for incompatibility v 1-2
patchCompatibilityOldLibraryVersion(remoteDir, dirs);
// end patch
// DB db=mongoPrimaryInstance.getConnectionDB(resource.getWriteConcern(), resource.getReadPreference(),getPrimaryCollectionName(), true);
GridFS gfs =mongoPrimaryInstance.getGfs(getPrimaryCollectionName(), true);//new GridFS(db);
for(String directory : dirs){
if(logger.isDebugEnabled())
logger.debug("Mongo start operation delete bucket: "+directory);
// remove subfolders
if(logger.isDebugEnabled())
logger.debug("remove subfolders of folder: "+directory);
BasicDBObject query = new BasicDBObject();
String regex=directory+"*";
query.put("dir", java.util.regex.Pattern.compile(regex));
mongoPrimaryInstance.removeObject(gfs, query,resource);
query=new BasicDBObject();
String[] dir=directory.split(Costants.FILE_SEPARATOR);
StringBuffer parentDir=new StringBuffer();
for(int i=0;i<dir.length-1;i++){
parentDir.append(dir[i]+Costants.FILE_SEPARATOR);
}
String name=dir[dir.length-1];
query.put("dir", parentDir.toString());
query.put("name", name);
if(logger.isDebugEnabled())
logger.debug("now remove the folder: "+name+" from folder "+parentDir);
mongoPrimaryInstance.removeObject(gfs, query, resource);
if(logger.isDebugEnabled())
logger.debug("Mongo end operation delete bucket: "+directory);
}
close();
}
private void patchCompatibilityOldLibraryVersion(String remoteDir, ArrayList<String> dirs) {
if((remoteDir.contains(Costants.ROOT_PATH_PATCH_V1)) || (remoteDir.contains(Costants.ROOT_PATH_PATCH_V2))){
if(remoteDir.contains(Costants.ROOT_PATH_PATCH_V1)){
String remoteDirV1=remoteDir.replace(Costants.ROOT_PATH_PATCH_V1, Costants.ROOT_PATH_PATCH_V2);
dirs.add(remoteDirV1);
}else{
String remoteDirV2= remoteDir.replace(Costants.ROOT_PATH_PATCH_V2, Costants.ROOT_PATH_PATCH_V1);
dirs.add(remoteDirV2);
String remoteDirV2patch=Costants.FILE_SEPARATOR+remoteDirV2;
dirs.add(remoteDirV2patch);
}
}
}
@Override
public long getSize(String remotePath){
long length=-1;
if(logger.isDebugEnabled())
logger.debug("MongoDB - get Size for pathServer: "+remotePath);
GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(remotePath, null, true);
if(f!=null){
length=f.getLength();
}
close();
return length;
}
@Override
public boolean exist(String remotePath){
boolean isPresent=false;
if(logger.isDebugEnabled())
logger.debug("MongoDB - get Size for pathServer: "+remotePath);
GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(remotePath, null, true);
if(f!=null){
isPresent=true;
}
close();
return isPresent;
}
@Override
public long getTTL(String remotePath) throws UnknownHostException{
long timestamp=-1;
long currentTTL=-1;
long remainsTTL=-1;
if(logger.isDebugEnabled())
logger.debug("MongoDB - pathServer: "+remotePath);
GridFSDBFile f=mongoPrimaryInstance.retrieveRemoteDescriptor(remotePath, null, true);
if(f!=null){
timestamp=(Long)f.get("timestamp");
if(timestamp > 0){
currentTTL=System.currentTimeMillis() - timestamp;
remainsTTL=Costants.TTL- currentTTL;
}
}
close();
return remainsTTL;
}
@Override
public long renewTTL(MyFile resource) throws UnknownHostException, IllegalAccessException{
long ttl=-1;
MyFile file=(MyFile)resource;
REMOTE_RESOURCE remoteResourceIdentifier=file.getOperation().getRemoteResource();
String key=file.getLockedKey();
String remotePath=file.getRemotePath();
GridFSDBFile f=mongoPrimaryInstance.retrieveRemoteDescriptor(remotePath, remoteResourceIdentifier, true);
if(f!=null){
String lock=(String)f.get("lock");
//check if the od file is locked
if((lock !=null) && (!lock.isEmpty())){
String lck=(String)f.get("lock");
if(lck.equalsIgnoreCase(key)){
if((f.containsField("countRenew")) && (f.get("countRenew") != null)){
int count=(Integer)f.get("countRenew");
if(count < Costants.TTL_RENEW){
f.put("countRenew", count+1);
}else{
close();
// number max of ttl renew operation reached. the operation is blocked
throw new IllegalAccessException("The number max of TTL renew reached. The number max is: "+Costants.TTL_RENEW);
}
}else{
// first renew operation
f.put("countRenew", 1);
}
f.put("timestamp", System.currentTimeMillis());
f.save();
ttl=Costants.TTL;
}else{
close();
throw new IllegalAccessError("bad key for unlock");
}
}
}
close();
return ttl;
}
/**
* link operation
*
*/
@Override
public String link(Link link) throws UnknownHostException{
return link.execute(mongoPrimaryInstance, mongoSecondaryInstance, link.getResource(), link.getSourcePath(), link.getDestinationPath());
}
@Override
public String copy(Copy copy) throws UnknownHostException{
logger.info("CopyFile operation from "+copy.getSourcePath()+" to "+ copy.getDestinationPath());
return copy.execute(mongoPrimaryInstance, copy.getResource(), copy.getSourcePath(), copy.getDestinationPath());
}
@Override
public String move(Move move) throws UnknownHostException{
logger.info("MoveFile operation from "+move.getSourcePath()+" to "+ move.getDestinationPath());
return move.execute(mongoPrimaryInstance, memoryType, move.getResource(), move.getSourcePath(), move.getDestinationPath());
}
@Override
public String getName() {
return Costants.DEFAULT_TRANSPORT_MANAGER;
}
@Override
public List<String> copyDir(CopyDir copy) throws UnknownHostException {
return copy.execute(mongoPrimaryInstance, copy.getResource(), copy.getSourcePath(), copy.getDestinationPath());
}
@Override
public List<String> moveDir(MoveDir move) throws UnknownHostException {
return move.execute(mongoPrimaryInstance, move.getResource(), move.getSourcePath(), move.getDestinationPath(), memoryType);
}
@Override
public String getFileProperty(String remotePath, String property){
GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(remotePath, null, true);
if(f!=null){
String value=(String)f.get(property);
close();
return value;
}else{
close();
throw new RemoteBackendException("remote file not found at path: "+remotePath);
}
}
@Override
public void setFileProperty(String remotePath, String propertyField, String propertyValue){
logger.trace("setting field "+propertyField+" with value: "+propertyValue);
try {
updateMetaObject(remotePath, propertyField, propertyValue);
} catch (UnknownHostException e) {
e.printStackTrace();
throw new RemoteBackendException("UnknownHostException: "+e.getMessage());
}
}
/**
* This method perform a query to mongodb in order to add a new property to the metadata object
* @param remoteIdentifier: objectID or remote path of the remote object
* @param propertyField: new field name
* @param propertyValue value of the new field
* @return
* @throws UnknownHostException
*/
private void updateMetaObject(String remoteIdentifier, String propertyField, String propertyValue)
throws UnknownHostException {
BasicDBObject remoteMetaCollectionObject;
logger.debug("find object...");
remoteMetaCollectionObject = mongoPrimaryInstance.findMetaCollectionObject(remoteIdentifier);
if(remoteMetaCollectionObject!=null){
logger.debug("object found");
remoteMetaCollectionObject.put(propertyField, propertyValue);
logger.info("set query field: "+propertyField+" with value: "+propertyValue);
BasicDBObject updateQuery= new BasicDBObject();
updateQuery.put("$set", remoteMetaCollectionObject);
// retrieve original object
BasicDBObject querySourceObject = getQuery(remoteIdentifier);
//getCollection
logger.debug("get Collection ");
DBCollection metaCollectionInstance=mongoPrimaryInstance.getMetaDataCollection(mongoPrimaryInstance.getConnectionDB(getPrimaryCollectionName(), false));
//update field
logger.debug("update Collection ");
if (!(memoryType== MemoryType.VOLATILE))
metaCollectionInstance.update(querySourceObject, updateQuery, false, true, Costants.DEFAULT_WRITE_TYPE);
else
metaCollectionInstance.update(querySourceObject, updateQuery, false, true);
logger.info("update completed");
close();
}else{
logger.debug("object not found");
close();
throw new RemoteBackendException("remote file not found at path: "+remoteIdentifier);
}
}
/**
*
* @param remoteIdentifier objectID or remote path of the remote object
* @return the BasicDBObject of the remote object
*/
private BasicDBObject getQuery(String remoteIdentifier) {
BasicDBObject querySourceObject = new BasicDBObject();
logger.debug("check identifier object: "+remoteIdentifier);
if(ObjectId.isValid(remoteIdentifier)){
logger.debug("object is a valid id");
querySourceObject.put( "_id" , new ObjectId(remoteIdentifier));
}else{
logger.debug("object is a remotepath");
querySourceObject.put( "filename" , remoteIdentifier);
}
return querySourceObject;
}
@Override
public long getFolderTotalItems(String folderPath){
logger.debug("getFolderTotalItems for folder "+folderPath);
long totalItems=0;
try{
List<GridFSDBFile> list= mongoPrimaryInstance.getFilesOnFolder(folderPath);
totalItems=getCount(list);
logger.info("getFolderTotalItems found "+list.size()+" objects for folder "+folderPath);
}catch(Exception e ){
close();
throw new RemoteBackendException(e.getMessage());
}
return totalItems;
}
@Override
public long getFolderTotalVolume(String folderPath){
logger.debug("getFolderTotalVolume for folder "+folderPath);
long totalVolume=0;
try{
List<GridFSDBFile> list= mongoPrimaryInstance.getFilesOnFolder(folderPath);
totalVolume=getVolume(list);
logger.info("getFolderTotalVolume "+totalVolume+" for folder "+folderPath);
}catch(Exception e ){
close();
throw new RemoteBackendException(e.getMessage());
}
return totalVolume;
}
@Override
public String getUserTotalVolume(String user){
logger.debug("getUserTotalVolume for folder "+user);
long volume=0;
try{
List<GridFSDBFile> list= mongoPrimaryInstance.getOwnedFiles(user);
volume=getVolume(list);
logger.info("getUserTotalVolume found "+volume+" for user "+user);
}catch(Exception e ){
close();
throw new RemoteBackendException(e.getMessage());
}
return ""+volume;
}
@Override
public String getUserTotalItems(String user){
logger.debug("getUserTotalItems for folder "+user);
long count=0;
try{
List<GridFSDBFile> list= mongoPrimaryInstance.getOwnedFiles(user);
logger.info("getUserTotalItems found "+list.size()+" objects for user "+user);
count=getCount(list);
}catch(Exception e ){
close();
throw new RemoteBackendException(e.getMessage());
}
return ""+count;
}
@Override
public String getId(String path, boolean forceCreation){
ObjectId id=null;
if(logger.isDebugEnabled())
logger.debug("MongoDB - pathServer: "+path);
GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(path, null, true);
if(f!=null){
id=(ObjectId)f.getId();
}else if(forceCreation){
logger.warn("The remote file doesn't exist. An empty file will be created");
// if the file doesn't exist. An empty file will be created
id = forceCreation(path, id);
}else{
close();
throw new RemoteBackendException("the file "+path+" is not present on storage. The uri is not created ");
}
close();
return id.toString();
}
private long getCount(List<GridFSDBFile> list){
return list.size();
}
private long getVolume(List<GridFSDBFile> list){
long partialVolume=0;
for(GridFSDBFile f : list){
long fileVolume=f.getLength();
partialVolume=partialVolume+fileVolume;
}
return partialVolume;
}
private ObjectId forceCreation(String path, ObjectId id) {
if(!ObjectId.isValid(path)){
byte[] data=new byte[1];
GridFSInputFile f2 = null;
if (path.startsWith("/VOLATILE")){
f2=mongoPrimaryInstance.createGFSFileObject(data);//gfs.createFile(data);
}else{
f2=mongoPrimaryInstance.createGFSFileObject(data, null, null);//gfs.createFile(data);
}
int indexName=path.lastIndexOf(Costants.FILE_SEPARATOR);
String name=path.substring(indexName+1);
String dir=path.substring(0, indexName+1);
f2.put("filename", path);
f2.put("name", name);
f2.put("dir", dir);
id=(ObjectId)f2.getId();
f2.save();
close();
}else{
logger.error("Cannot force creation of smp uri without a remote path. The input parameter is not a remotePath valid: "+path);
close();
throw new RemoteBackendException("The uri is not created. Cannot force creation of smp uri without a remote path. The input parameter is not a remotePath: "+path);
}
return id;
}
@Override
public boolean isValidId(String id){
return ObjectId.isValid(id);
}
@Override
public String getRemotePath(String bucket) throws UnknownHostException{
if(!ObjectId.isValid(bucket))
throw new RuntimeException("The following id is not valid: "+bucket);
String path=null;
path=getField(bucket, "filename");
return path;
}
@Override
public String getField(String remoteIdentifier, String fieldName) throws UnknownHostException {
String fieldValue=null;
if(logger.isDebugEnabled())
logger.debug("MongoDB - pathServer: "+remoteIdentifier);
GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(remoteIdentifier, null, true);
if(f!=null){
fieldValue=f.get(fieldName).toString();
}
close();
return fieldValue;
}
public static String getPrimaryCollectionName(){
if ((dbNames != null) && (dbNames.length>0))
return dbNames[0];
else
return null;
}
protected static String getSecondaryCollectionName(){
if ((dbNames != null) && (dbNames.length>1))
return dbNames[1];
else
return null;
}
/**
* Create a new file with the same remotepath and the suffix -dpl
*/
@Override
public String duplicateFile(DuplicateFile duplicate) {
return duplicate.execute(mongoPrimaryInstance);
}
@Override
public String softCopy(SoftCopy copy) throws UnknownHostException{
return copy.execute(mongoPrimaryInstance, copy.getResource(), copy.getSourcePath(), copy.getDestinationPath());
}
}

@ -0,0 +1,28 @@
package org.gcube.contentmanagement.blobstorage.transport.backend;
public class RemoteBackendException extends RuntimeException {
private static final long serialVersionUID = 1L;
public RemoteBackendException()
{
super("Remote backend problem: impossible to complete operation ");
}
public RemoteBackendException(String msg)
{
super(" Remote backend problem: impossible to complete operation "+msg );
}
public RemoteBackendException(Throwable cause)
{
super(" Remote backend problem: impossible to complete operation "+cause );
}
public RemoteBackendException(String msg , Throwable cause){
super(msg, cause);
}
}

@ -0,0 +1,103 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend.operation;
import java.io.InputStream;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.service.operation.CopyDir;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.BasicDBObject;
import com.mongodb.DB;
import com.mongodb.gridfs.GridFS;
import com.mongodb.gridfs.GridFSDBFile;
import com.mongodb.gridfs.GridFSInputFile;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class CopyDirOperator extends CopyDir {
Logger logger=LoggerFactory.getLogger(CopyDirOperator.class);
/**
* @param server
* @param user
* @param pwd
* @param bucket
* @param monitor
* @param isChunk
* @param backendType
* @param dbs
*/
public CopyDirOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk,
String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
// TODO Auto-generated constructor stub
}
/* (non-Javadoc)
* @see org.gcube.contentmanagement.blobstorage.service.operation.CopyDir#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.resource.MyFile, java.lang.String, java.lang.String)
*/
@Override
public List<String> execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath)
throws UnknownHostException {
String source=sourcePath;
source = appendFileSeparator(source);
String destination=destinationPath;
destination = appendFileSeparator(destination);
String parentFolder=extractParent(source);
String destinationId=null;
List<String> idList=null;
logger.debug("copyDir operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination);
if((source != null) && (!source.isEmpty()) && (destination != null) && (!destination.isEmpty())){
DB db = mongoPrimaryInstance.getConnectionDB(MongoOperationManager.getPrimaryCollectionName(), true);// getDB(resource);
GridFS gfs = mongoPrimaryInstance.getGfs();
//// create query for dir field
BasicDBObject query = new BasicDBObject();
query.put( "dir" , new BasicDBObject("$regex", source+"*"));
List<GridFSDBFile> folder = gfs.find(query);
if(folder!=null){
idList=new ArrayList<String>(folder.size());
for(GridFSDBFile f : folder){
if(f.get("type").equals("file")){
String oldFilename=(String)f.get("filename");
String oldDir=(String)f.get("dir");
f=mongoPrimaryInstance.retrieveLinkPayload(f);
InputStream is= f.getInputStream();
int relativePathIndex=source.length();
String relativeDirTree=parentFolder+Costants.FILE_SEPARATOR+oldDir.substring(relativePathIndex);
String relativePath=parentFolder+Costants.FILE_SEPARATOR+oldFilename.substring(relativePathIndex);
String filename=destination+relativePath;
String dir=destination+relativeDirTree;
GridFSInputFile destinationFile=gfs.createFile(is);
destinationFile.put("filename", filename);
destinationFile.put("type", "file");
destinationFile.put("dir", dir);
mongoPrimaryInstance.updateCommonFields(destinationFile, resource, OPERATION.COPY_DIR);
idList.add(destinationFile.getId().toString());
if(logger.isDebugEnabled())
logger.debug("ObjectId: "+destinationId);
mongoPrimaryInstance.buildDirTree(mongoPrimaryInstance.getMetaDataCollection(db), dir);
destinationFile.save();
}
}
}
mongoPrimaryInstance.close();
}
return idList;
}
}

@ -0,0 +1,129 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend.operation;
import java.io.InputStream;
import java.net.UnknownHostException;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.service.operation.Copy;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.gridfs.GridFSDBFile;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class CopyOperator extends Copy {
final Logger logger=LoggerFactory.getLogger(CopyOperator.class);
/**
* @param server
* @param user
* @param pwd
* @param bucket
* @param monitor
* @param isChunk
* @param backendType
* @param dbs
*/
public CopyOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk,
String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
// TODO Auto-generated constructor stub
}
/* (non-Javadoc)
* @see org.gcube.contentmanagement.blobstorage.service.operation.Copy#execute()
*/
@Override
// public String execute(MongoIO mongoPrimaryInstance) throws UnknownHostException {
public String execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException {
String source=sourcePath;
String destination=destinationPath;
String dir=((MyFile)resource).getRemoteDir();
String originalDir=((MyFile)resource).getLocalDir();
logger.debug("from directory: "+originalDir+ "to directory: "+dir);
String name=((MyFile)resource).getName();
REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource();
ObjectId destinationId=null;
logger.debug("copy operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination);
if((source != null) && (!source.isEmpty()) && (destination != null) && (!destination.isEmpty())){
GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(source, remoteResourceIdentifier, true);
if(f != null){
// if it is a copy of an hardLink, then I'm going to retrieve and copy the payload associated to the link
f = mongoPrimaryInstance.retrieveLinkPayload(f);
InputStream is= f.getInputStream();
resource.setInputStream(is);
// check if the destination is a dir or a file and if the destination exist
GridFSDBFile dest = mongoPrimaryInstance.retrieveRemoteDescriptor(destination, remoteResourceIdentifier, false);//gfs.findOne(destination);
// GridFSInputFile destinationFile=mongoPrimaryInstance.createGFSFileObject(is, resource.getWriteConcern(), resource.getReadPreference());//gfs.createFile(is);
ObjectId removedId=null;
if (dest != null){
//overwrite the file
// removedId=mongoPrimaryInstance.checkAndRemove(f, resource);
// the third parameter to true replace the file
removedId = mongoPrimaryInstance.removeFile(resource, null, resource.isReplace(), null, dest);
if((remoteResourceIdentifier != null) && ((remoteResourceIdentifier.equals(REMOTE_RESOURCE.ID))) && (removedId != null)){
destinationId = mongoPrimaryInstance.createNewFile(resource, null, dir, name, removedId);
}else{
destinationId = mongoPrimaryInstance.createNewFile(resource, destination, dir , name, removedId);
}
if(logger.isDebugEnabled())
logger.debug("ObjectId: "+destinationId);
mongoPrimaryInstance.close();
}else{
destinationId = mongoPrimaryInstance.createNewFile(resource, destination, dir , name, null);
mongoPrimaryInstance.close();
}
}else{
mongoPrimaryInstance.close();
throw new RemoteBackendException(" the source path is wrong. There isn't a file at "+source);
}
} else throw new RemoteBackendException("Invalid arguments: source "+source+" destination "+destination);
return destinationId.toString();
}
public String safePut(MongoIOManager mongoPrimaryInstance, Object resource, String bucket, String key, boolean replace) throws UnknownHostException{
OperationDefinition op=((MyFile)resource).getOperationDefinition();
REMOTE_RESOURCE remoteResourceIdentifier=((MyFile)resource).getOperation().getRemoteResource();
logger.info("MongoClient put method: "+op.toString());
String dir=((MyFile)resource).getRemoteDir();
String name=((MyFile)resource).getName();
ObjectId id=null;
ObjectId oldId=null;
// id of the remote file if present
GridFSDBFile fold = mongoPrimaryInstance.retrieveRemoteDescriptor(bucket, remoteResourceIdentifier, false);
if(fold != null){
// if a file is present
logger.info("a file is already present at: "+bucket);
// keep old id
oldId=(ObjectId) fold.getId();
logger.info("get old id: "+oldId);
// create new file
id = mongoPrimaryInstance.createNewFile(resource, bucket, dir, name, null);
// remove old file
oldId = mongoPrimaryInstance.removeFile(resource, key, replace, oldId, fold);
// oldId = removeOldMetadataFile(oldId);
// update the id to the new file
id=mongoPrimaryInstance.updateId(id, oldId);
}else{
// create new file
id = mongoPrimaryInstance.createNewFile(resource, bucket, dir, name, oldId);
}
return id.toString();
}
}

@ -0,0 +1,75 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend.operation;
import java.io.FileNotFoundException;
import java.io.IOException;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.service.operation.Download;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.gridfs.GridFSDBFile;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class DownloadOperator extends Download {
final Logger logger=LoggerFactory.getLogger(DownloadOperator.class);
/**
* @param server
* @param user
* @param pwd
* @param bucket
* @param monitor
* @param isChunk
* @param backendType
* @param dbs
*/
public DownloadOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk,
String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
// TODO Auto-generated constructor stub
}
/* (non-Javadoc)
* @see org.gcube.contentmanagement.blobstorage.service.operation.Download#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO)
*/
@Override
public ObjectId execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance) throws IOException {
OperationDefinition op=resource.getOperationDefinition();
logger.info("MongoClient get method: "+op.toString());
mongoPrimaryInstance.getConnectionDB( MongoOperationManager.getPrimaryCollectionName(), true);// getDB(resource);
// GridFS gfs=mongoPrimaryInstance.getGfs(getPrimaryCollectionName(), true);
//if the operation is required by id we avoid to check if the object is available by path
REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource();
logger.info("operation required by "+remoteResourceIdentifier);
GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(getBucket(), remoteResourceIdentifier, false); //previous value was true
ObjectId id=null;
if(f!=null){
id = mongoPrimaryInstance.getRemoteObject(resource, f);
//check if the file is present on another db in the same backend
}else if(mongoSecondaryInstance!=null){
// DB secondaryDb =mongoSecondaryInstance.getConnectionDB(resource.getWriteConcern(), resource.getReadPreference(), getSecondaryCollectionName(), true);// getDB(resource);
// GridFS secondaryGfs = mongoSecondaryInstance.getGfs(getSecondaryCollectionName(), true);
GridFSDBFile secondaryF = mongoSecondaryInstance.retrieveRemoteDescriptor(getRemotePath(), remoteResourceIdentifier, true);
if(secondaryF !=null){
id = mongoSecondaryInstance.getRemoteObject( resource, secondaryF);
}
}else{
mongoPrimaryInstance.close();
throw new FileNotFoundException("REMOTE FILE NOT FOUND: WRONG PATH OR WRONG OBJECT ID");
}
return id;
}
}

@ -0,0 +1,74 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend.operation;
import java.io.IOException;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.service.operation.DuplicateFile;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.gridfs.GridFSDBFile;
import com.mongodb.gridfs.GridFSInputFile;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class DuplicateOperator extends DuplicateFile {
Logger logger=LoggerFactory.getLogger(DuplicateOperator.class);
/**
* @param server
* @param user
* @param pwd
* @param bucket
* @param monitor
* @param isChunk
* @param backendType
* @param dbs
*/
public DuplicateOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk,
String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
// TODO Auto-generated constructor stub
}
/* (non-Javadoc)
* @see org.gcube.contentmanagement.blobstorage.service.operation.DuplicateFile#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO)
*/
@Override
public String execute(MongoIOManager mongoPrimaryInstance){
String destination=((MyFile)getResource()).getRemotePath()+Costants.DUPLICATE_SUFFIX;
String dir=((MyFile)getResource()).getRemoteDir();
// String name=((MyFile)getResource()).getName();
if((getBucket() != null) && (!getBucket().isEmpty())){
REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource();
GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(getBucket(), remoteResourceIdentifier, true);
GridFSInputFile destinationFile=null;
try {
// GridFSInputFile f2 = mongoPrimaryInstance.createGFSFileObject(f.getFilename());
destinationFile=mongoPrimaryInstance.createGFSFileObject(f.getInputStream(), resource.getWriteConcern(), resource.getReadPreference());//gfs.createFile(is);
mongoPrimaryInstance.setGenericProperties(getResource(), destination, dir,
destinationFile, destination.substring(destination.lastIndexOf(Costants.FILE_SEPARATOR)+1));
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
String destinationId=destinationFile.getId().toString();
destinationFile.save();
if(logger.isDebugEnabled())
logger.debug("ObjectId: "+destinationId);
mongoPrimaryInstance.close();
return destinationId;
} throw new RemoteBackendException("argument cannot be null for duplicate operation");
}
}

@ -0,0 +1,131 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend.operation;
import java.net.UnknownHostException;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.service.operation.Link;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.service.operation.Operation;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.gridfs.GridFSDBFile;
import com.mongodb.gridfs.GridFSInputFile;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class LinkOperator extends Link {
Logger logger=LoggerFactory.getLogger(LinkOperator.class);
/**
* @param server
* @param user
* @param pwd
* @param bucket
* @param monitor
* @param isChunk
* @param backendType
* @param dbs
*/
public LinkOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk,
String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
// TODO Auto-generated constructor stub
}
/* (non-Javadoc)
* @see org.gcube.contentmanagement.blobstorage.service.operation.Link#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.resource.MyFile, java.lang.String)
*/
@Override
public String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException {
boolean replace=true;
String source=sourcePath;
String destination=destinationPath;
String dir=resource.getRemoteDir();
String name=resource.getName();
REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource();
String destinationId=null;
String sourceId=null;
logger.debug("link operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination);
if((source != null) && (!source.isEmpty()) && (destination != null) && (!destination.isEmpty())){
GridFSDBFile f = mongoPrimaryInstance.retrieveRemoteDescriptor(source, remoteResourceIdentifier, false);
if(f != null){
int count=1;
if((f.containsField(Costants.COUNT_IDENTIFIER)) && ((f.get(Costants.COUNT_IDENTIFIER) != null))){
count=(Integer)f.get(Costants.COUNT_IDENTIFIER);
count++;
}
f.put(Costants.COUNT_IDENTIFIER, count);
mongoPrimaryInstance.updateCommonFields(f, resource, OPERATION.LINK);
sourceId=f.getId().toString();
f.save();
}else{
mongoPrimaryInstance.close();
throw new IllegalArgumentException(" source remote file not found at: "+source);
}
// check if the destination file exists
// GridFSDBFile fold = gfs.findOne(destinationPath);
GridFSDBFile fold = mongoPrimaryInstance.retrieveRemoteDescriptor(destinationPath, remoteResourceIdentifier, false);
if(fold != null){
String oldir=(String)fold.get("dir");
if(logger.isDebugEnabled())
logger.debug("old dir found "+oldir);
if((oldir.equalsIgnoreCase(((MyFile)resource).getRemoteDir()))){
ObjectId oldId=(ObjectId) fold.getId();
if(!replace){
return oldId.toString();
}else{
if(logger.isDebugEnabled())
logger.debug("remove id: "+oldId);
String lock=(String)fold.get("lock");
//check if the od file is locked
if((lock !=null) && (!lock.isEmpty()) && (!mongoPrimaryInstance.isTTLUnlocked(fold))){
mongoPrimaryInstance.close();
throw new IllegalAccessError("The file is locked");
}else{
//remove old file
mongoPrimaryInstance.removeGFSFile(fold, oldId);
}
}
}
}
// create destination file
GridFSInputFile destinationFile=null;
//create new file
byte[] data=new byte[1];
if (resource.getGcubeMemoryType()== MemoryType.VOLATILE){
destinationFile = mongoPrimaryInstance.createGFSFileObject(data);//gfs.createFile(data);
}else{
destinationFile = mongoPrimaryInstance.createGFSFileObject(data, resource.getWriteConcern(), resource.getReadPreference());//gfs.createFile(data);
}
if(logger.isDebugEnabled())
logger.debug("Directory: "+dir);
mongoPrimaryInstance.setGenericProperties(resource, destinationPath, dir,
destinationFile, name);
destinationFile.put(Costants.LINK_IDENTIFIER, sourceId);
destinationId=destinationFile.getId().toString();
if(logger.isDebugEnabled())
logger.debug("ObjectId: "+destinationId);
mongoPrimaryInstance.buildDirTree(mongoPrimaryInstance.getMetaDataCollection(null), dir);
destinationFile.save();
mongoPrimaryInstance.close();
}else{
mongoPrimaryInstance.close();
throw new IllegalArgumentException(" invalid argument: source: "+source+" dest: "+destination+" the values must be not null and not empty");
}
return destinationId.toString();
}
}

@ -0,0 +1,86 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend.operation;
import java.io.FileNotFoundException;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.service.operation.Download;
import org.gcube.contentmanagement.blobstorage.service.operation.Lock;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.gridfs.GridFSDBFile;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class LockOperator extends Lock {
final Logger logger=LoggerFactory.getLogger(LockOperator.class);
/**
* @param server
* @param user
* @param pwd
* @param bucket
* @param monitor
* @param isChunk
* @param backendType
* @param dbs
*/
public LockOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk,
String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
// TODO Auto-generated constructor stub
}
/* (non-Javadoc)
* @see org.gcube.contentmanagement.blobstorage.service.operation.Lock#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO)
*/
@Override
public String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String serverLocation) throws Exception {
OperationDefinition op=resource.getOperationDefinition();
REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource();
// if((resource.getLocalPath()!= null) && (!resource.getLocalPath().isEmpty())){
// resource.setOperation(OPERATION.DOWNLOAD);
// Download download= new DownloadOperator(getServer(), getUser(), getPassword(), getBucket(), getMonitor(), isChunk(), getBackendType(), getDbNames());
// setDownload(download);
// get(getDownload(), resource, true);
// resource.setOperation(op);
// mongoPrimaryInstance.close();
// mongoPrimaryInstance=null;
// }
logger.info("MongoClient lock method: "+op.toString());
String key=null;
if(logger.isDebugEnabled())
logger.debug("MongoDB - pathServer: "+resource.getAbsoluteRemotePath());
GridFSDBFile f=mongoPrimaryInstance.retrieveRemoteDescriptor(resource.getAbsoluteRemotePath(), remoteResourceIdentifier, true);
if(f!=null){
//timestamp is used for compare to ttl of a file lock.
String lock=(String)f.get("lock");
if((lock==null || lock.isEmpty()) || (mongoPrimaryInstance.isTTLUnlocked(f))){
key=f.getId()+""+System.currentTimeMillis();
f.put("lock", key);
f.put("timestamp", System.currentTimeMillis());
mongoPrimaryInstance.updateCommonFields(f, resource, OPERATION.LOCK);
f.save();
}else{
mongoPrimaryInstance.checkTTL(f);
}
}else{
mongoPrimaryInstance.close();
throw new FileNotFoundException("REMOTE FILE NOT FOUND: WRONG PATH OR WRONG OBJECT ID");
}
return key;
}
}

@ -0,0 +1,110 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend.operation;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.service.operation.MoveDir;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.BasicDBObject;
import com.mongodb.DB;
import com.mongodb.DBCollection;
import com.mongodb.DBCursor;
import com.mongodb.DBObject;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class MoveDirOperator extends MoveDir {
Logger logger=LoggerFactory.getLogger(MoveDirOperator.class);
/**
* @param server
* @param user
* @param pwd
* @param bucket
* @param monitor
* @param isChunk
* @param backendType
* @param dbs
*/
public MoveDirOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk,
String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
// TODO Auto-generated constructor stub
}
/* (non-Javadoc)
* @see org.gcube.contentmanagement.blobstorage.service.operation.MoveDir#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.resource.MyFile, java.lang.String, java.lang.String)
*/
@Override
public List<String> execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath,
String destinationPath, MemoryType memoryType) throws UnknownHostException {
String source=sourcePath;
source = appendFileSeparator(source);
String parentFolder=extractParent(source);
String destination=destinationPath;
destination = appendFileSeparator(destination);
List<String> idList=null;
logger.debug("moveDir operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination);
if((source != null) && (!source.isEmpty()) && (destination != null) && (!destination.isEmpty())){
DB db=mongoPrimaryInstance.getConnectionDB(MongoOperationManager.getPrimaryCollectionName(), true);
// GridFS meta = new GridFS(db);
DBCollection meta=mongoPrimaryInstance.getMetaDataCollection(db);
// create query for dir field
BasicDBObject query = new BasicDBObject();
query.put( "dir" , new BasicDBObject("$regex", source+"*"));
DBCursor folderCursor = meta.find(query);
if((folderCursor !=null)){
idList=new ArrayList<String>();
while(folderCursor.hasNext()){//GridFSDBFile f : folder){
DBObject f=folderCursor.next();
if(f.get("type").equals("file")){
String oldFilename=(String)f.get("filename");
String oldDir=(String)f.get("dir");
int relativePathIndex=source.length();
String relativeDirTree=parentFolder+Costants.FILE_SEPARATOR+oldDir.substring(relativePathIndex);
String relativePath=parentFolder+Costants.FILE_SEPARATOR+oldFilename.substring(relativePathIndex);
String filename=destination+relativePath;
String dir=destination+relativeDirTree;
f.put("filename", filename);
f.put("dir", dir);
mongoPrimaryInstance.updateCommonFields(f, resource, OPERATION.MOVE_DIR);
String id=f.get("_id").toString();
idList.add(id);
query = new BasicDBObject();
query.put( "_id" , new ObjectId(id));
if(!(memoryType== MemoryType.VOLATILE))
meta.update(query, f, true, false, Costants.DEFAULT_WRITE_TYPE);
else
meta.update(query, f, true, false);
// meta.update(query, f, true, true);
mongoPrimaryInstance.buildDirTree(meta, dir);
}
}
}
}else{
mongoPrimaryInstance.close();
throw new IllegalArgumentException("parameters not completed, source: "+source+", destination: "+destination);
}
mongoPrimaryInstance.close();
return idList;
}
}

@ -0,0 +1,201 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend.operation;
import java.net.InetAddress;
import java.net.UnknownHostException;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.service.operation.Move;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.BasicDBObject;
import com.mongodb.DBCollection;
import com.mongodb.DBObject;
import com.mongodb.gridfs.GridFS;
import com.mongodb.gridfs.GridFSDBFile;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class MoveOperator extends Move {
Logger logger=LoggerFactory.getLogger(MoveOperator.class);
/**
* @param server
* @param user
* @param pwd
* @param bucket
* @param monitor
* @param isChunk
* @param backendType
* @param dbs
*/
public MoveOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk,
String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
// TODO Auto-generated constructor stub
}
/* (non-Javadoc)
* @see org.gcube.contentmanagement.blobstorage.service.operation.Move#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO)
*/
@Override
// public String execute(MongoIO mongoPrimaryInstance, MemoryType memoryType) throws UnknownHostException {
public String execute(MongoIOManager mongoPrimaryInstance, MemoryType memoryType, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException {
String source=sourcePath;
String destination=destinationPath;
resource.setLocalPath(sourcePath);
String dir=((MyFile)resource).getRemoteDir();
String name=((MyFile)resource).getName();
String destinationId=null;
String sourceId=null;
logger.info("move operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination);
logger.debug("MOVE OPERATION operation defined: "+resource.getOperationDefinition().getOperation());
if((source != null) && (!source.isEmpty()) && (destination != null) && (!destination.isEmpty())){
BasicDBObject sourcePathMetaCollection = mongoPrimaryInstance.findMetaCollectionObject(source);
//check if the file exist in the destination path, if it exist then it will be deleted
if(sourcePathMetaCollection != null){
sourceId=sourcePathMetaCollection.get("_id").toString();
sourcePathMetaCollection=setCommonFields(sourcePathMetaCollection, resource, OPERATION.MOVE);
// updateCommonFields(sourcePathMetaCollection, resource);
BasicDBObject queryDestPath = new BasicDBObject();
queryDestPath.put( "filename" , destinationPath);
DBCollection metaCollectionInstance=null;
if(!(memoryType== MemoryType.VOLATILE))
metaCollectionInstance=mongoPrimaryInstance.getMetaDataCollection(mongoPrimaryInstance.getConnectionDB(MongoOperationManager.getPrimaryCollectionName(), true));
else
metaCollectionInstance=mongoPrimaryInstance.getMetaDataCollection(mongoPrimaryInstance.getConnectionDB(MongoOperationManager.getPrimaryCollectionName(), false));
DBObject destPathMetaCollection= mongoPrimaryInstance.executeQuery(metaCollectionInstance, queryDestPath);
// retrieve original object
BasicDBObject querySourcePath = new BasicDBObject();
querySourcePath.put( "filename" , sourcePath);
//update common fields
BasicDBObject updateQuery= new BasicDBObject();
updateQuery.put("$set", sourcePathMetaCollection);
if(!(memoryType== MemoryType.VOLATILE))
metaCollectionInstance.update(querySourcePath, updateQuery, false, true, Costants.DEFAULT_WRITE_TYPE);
else
metaCollectionInstance.update(querySourcePath, updateQuery, false, true);
if(destPathMetaCollection != null)
destinationId=destPathMetaCollection.get("_id").toString();
if((destPathMetaCollection!=null) && (destinationId != null) && (!destinationId.equals(sourceId))){
mongoPrimaryInstance.printObject(destPathMetaCollection);
// if exist, keep id (it need a replace)
destinationId=destPathMetaCollection.get("_id").toString();
logger.info("file in destination path already present with id : "+destinationId);
//remove old one
// GridFS gfs = new GridFS(mongoPrimaryInstance.getConnectionDB(resource.getWriteConcern(), resource.getReadPreference(), getPrimaryCollectionName(), true));
GridFS gfs = mongoPrimaryInstance.getGfs(MongoOperationManager.getPrimaryCollectionName(), true);
GridFSDBFile fNewFSPath = gfs.findOne(queryDestPath);
mongoPrimaryInstance.checkAndRemove(fNewFSPath, resource);
// print
logger.debug("Changing filename metadata from:"+sourcePathMetaCollection.get("filename")+"\n to: "+destinationPath);
logger.debug("original objects:\n ");
logger.debug("source object: ");
mongoPrimaryInstance.printObject(sourcePathMetaCollection);
logger.info("destination object: ");
mongoPrimaryInstance.printObject(destPathMetaCollection);
// update fields
mongoPrimaryInstance.buildDirTree(mongoPrimaryInstance.getMetaDataCollection(mongoPrimaryInstance.getConnectionDB( MongoOperationManager.getPrimaryCollectionName(), true)), dir);
sourcePathMetaCollection= new BasicDBObject();
sourcePathMetaCollection.put("$set", new BasicDBObject().append("dir", dir).append("filename", destinationPath).append("name", name).append("owner", ((MyFile)resource).getOwner()));
logger.info("new object merged ");
mongoPrimaryInstance.printObject(sourcePathMetaCollection);
//applies the update
if(!(memoryType== MemoryType.VOLATILE))
metaCollectionInstance.update(querySourcePath, sourcePathMetaCollection, false, true, Costants.DEFAULT_WRITE_TYPE);
else
metaCollectionInstance.update(querySourcePath, sourcePathMetaCollection, false, true);
logger.info("update metadata done ");
logger.info("check update ");
DBObject newDestPathMetaCollection= mongoPrimaryInstance.executeQuery(metaCollectionInstance, queryDestPath);
mongoPrimaryInstance.printObject(newDestPathMetaCollection);
}else if((destinationId!= null) && (destinationId.equals(sourceId))){
logger.warn("the destination id and the source id are the same id. skip operation. ");
}else{
queryDestPath = new BasicDBObject();
queryDestPath.put( "dir" , destination );
DBObject folder = metaCollectionInstance.findOne(queryDestPath);//= gfs.find(query);
// if the destination is an existing folder
if((folder != null)){
destination=appendFileSeparator(destination);
sourcePathMetaCollection=mongoPrimaryInstance.setGenericMoveProperties(resource, destination+name, destination, name, sourcePathMetaCollection);
destinationId=sourcePathMetaCollection.get("_id").toString();
mongoPrimaryInstance.buildDirTree(metaCollectionInstance, destination);
}else{
// if the last char of dest path is a separator then the destination is a dir otherwise is a file
// then if it is a new folder
if(destination.lastIndexOf(Costants.FILE_SEPARATOR) == destination.length()-1){
sourcePathMetaCollection=mongoPrimaryInstance.setGenericMoveProperties(resource, destination+name, destination, name, sourcePathMetaCollection);
destinationId=sourcePathMetaCollection.get("_id").toString();
mongoPrimaryInstance.buildDirTree(metaCollectionInstance, destination);
}else{
String newName=destination.substring(destination.lastIndexOf(Costants.FILE_SEPARATOR)+1);
sourcePathMetaCollection=mongoPrimaryInstance.setGenericMoveProperties(resource, destination, dir, newName, sourcePathMetaCollection);
destinationId=sourcePathMetaCollection.get("_id").toString();
mongoPrimaryInstance.buildDirTree(metaCollectionInstance, dir);
}
queryDestPath = new BasicDBObject();
queryDestPath.put( "filename" , sourcePath);
//update common fields
updateQuery= new BasicDBObject();
updateQuery.put("$set", sourcePathMetaCollection);
if(!(memoryType== MemoryType.VOLATILE))
metaCollectionInstance.update(queryDestPath, updateQuery, true, true, Costants.DEFAULT_WRITE_TYPE);
else
metaCollectionInstance.update(queryDestPath, updateQuery, true, true);
}
}
mongoPrimaryInstance.close();
return destinationId;
}else{
mongoPrimaryInstance.close();
throw new RemoteBackendException(" the source path is wrong. There isn't a file at this path: "+source);
}
}else{
mongoPrimaryInstance.close();
throw new IllegalArgumentException("parameters not completed, source: "+source+", destination: "+destination);
}
}
private BasicDBObject setCommonFields(BasicDBObject f, MyFile resource, OPERATION op) {
String owner=resource.getOwner();
if(op == null){
op=resource.getOperationDefinition().getOperation();
}
logger.info("set last operation: "+op);
String from=null;
if(op.toString().equalsIgnoreCase(OPERATION.MOVE.toString())){
from=resource.getLocalPath();
}
String address=null;
try {
address=InetAddress.getLocalHost().getCanonicalHostName().toString();
f.put("callerIP", address);
} catch (UnknownHostException e) { }
if(from == null)
f.append("lastAccess", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z")).append("lastUser", owner).append("lastOperation", op.toString()).append("callerIP", address);
else
f.append("lastAccess", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z")).append("lastUser", owner).append("lastOperation", op.toString()).append("callerIP", address).append("from", from);
return f;
}
}

@ -0,0 +1,318 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend.operation;
import java.io.InputStream;
import java.net.UnknownHostException;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.LOCAL_RESOURCE;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.service.operation.Operation;
import org.gcube.contentmanagement.blobstorage.service.operation.SoftCopy;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.DateUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.BasicDBObject;
import com.mongodb.DBCollection;
import com.mongodb.DBObject;
import com.mongodb.gridfs.GridFSDBFile;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class SoftCopyOperator extends SoftCopy {
Logger logger=LoggerFactory.getLogger(SoftCopyOperator.class);
private MemoryType memoryType;
private MongoIOManager mongoPrimaryInstance;
private MyFile resource;
/**
* @param server
* @param user
* @param pwd
* @param bucket
* @param monitor
* @param isChunk
* @param backendType
* @param dbs
*/
public SoftCopyOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk,
String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
// TODO Auto-generated constructor stub
}
@Override
public String execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath)
throws UnknownHostException {
REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource();
LOCAL_RESOURCE localResourceIdentifier=resource.getOperation().getLocalResource();
String source=null;
if(localResourceIdentifier.equals(LOCAL_RESOURCE.ID))
source=resource.getId();
else
source=sourcePath;
String destination=null;
if(remoteResourceIdentifier.equals(REMOTE_RESOURCE.ID))
destination=resource.getId();
else
destination=destinationPath;
if(resource!=null){
String dir=((MyFile)resource).getRemoteDir();
String name=((MyFile)resource).getName();
setMemoryType(((MyFile)resource).getGcubeMemoryType());
}
setMongoPrimaryInstance(mongoPrimaryInstance);
ObjectId mapId=null;
GridFSDBFile destObject=null;
logger.debug("softCopy operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination);
if((source != null) && (!source.isEmpty())){
GridFSDBFile sourceObject = mongoPrimaryInstance.retrieveRemoteDescriptor(source, remoteResourceIdentifier, true);
if(sourceObject != null){
// GridFSDBFile originalObject=sourceObject;
// if it contains a link field, then I'm going to retrieve the related payload
sourceObject = mongoPrimaryInstance.retrieveLinkPayload(sourceObject);
ObjectId sourceId=(ObjectId)sourceObject.getId();
InputStream is= sourceObject.getInputStream();
resource.setInputStream(is);
GridFSDBFile dest = null;
if((destination == null) || (destination.isEmpty())){
// if the destination param is null, the destination object will be filled with values extracted from sourceObject
if(sourceId==null) throw new RemoteBackendException("source object not found: "+source);
destination = fillGenericDestinationFields(resource, sourceId);
logger.warn("SoftCopy without destination parameter. The operation will be executed with the following destination path "+destination);
}else{
// check if the destination is a dir or a file and if the destination exist
dest = mongoPrimaryInstance.retrieveRemoteDescriptor(destination, remoteResourceIdentifier, false);//gfs.findOne(destination);
}
// check if the destination is a dir or a file and if the destination exist
// GridFSDBFile dest = mongoPrimaryInstance.retrieveRemoteDescriptor(destination, remoteResourceIdentifier, false);//gfs.findOne(destination);
// GridFSInputFile destinationFile=mongoPrimaryInstance.createGFSFileObject(is, resource.getWriteConcern(), resource.getReadPreference());//gfs.createFile(is);
ObjectId removedId=null;
// if the destination location is not empty
if (dest != null){
// remove the destination file. The third parameter to true replace the file otherwise the remote id is returned
if(resource.isReplace()){
removedId = mongoPrimaryInstance.removeFile(resource, null, resource.isReplace(), null, dest);
}else{
return dest.getId().toString();
}
}
// get metacollection instance
DBCollection metaCollectionInstance = getMetaCollection();
String md5=sourceObject.getMD5();
// check if the payload is already present on backend
ObjectId md5Id=getDuplicatesMap(md5);
// check if the source object is already a map
if(isMap(sourceObject)){
logger.debug("the sourceObject with the following id: "+mapId+" is already a map");
mapId=sourceId;
// then it's needed to add only the destObject to the map
//first: create link object to destination place
DBObject newObject=createNewLinkObject(resource, sourceObject, destination, metaCollectionInstance, md5, mapId, removedId);
destObject = mongoPrimaryInstance.retrieveRemoteDescriptor(destination, remoteResourceIdentifier, true);
// second: add the new object to the map
mapId = addToDuplicateMap(metaCollectionInstance, mapId, destObject);
// if the payload is already present on backend
}else if(md5Id!=null){
mapId=md5Id;
logger.debug("retrieved md5 on backend with the following id: "+mapId);
mapId = addToDuplicateMap(metaCollectionInstance, mapId, sourceObject);
DBObject newObject=createNewLinkObject(resource, sourceObject, destination, metaCollectionInstance, md5, mapId, removedId);
destObject = mongoPrimaryInstance.retrieveRemoteDescriptor(destination, remoteResourceIdentifier, true);
mapId = addToDuplicateMap(metaCollectionInstance, mapId, destObject);
}else{
// no map present no md5 present
mapId = createNewDuplicatesMap(metaCollectionInstance, resource, sourceObject, destination, sourceId);
mapId = addToDuplicateMap(metaCollectionInstance, mapId, sourceObject);
DBObject newObject=createNewLinkObject(resource, sourceObject,destination, metaCollectionInstance, md5, mapId, removedId);
destObject = mongoPrimaryInstance.retrieveRemoteDescriptor(destination, remoteResourceIdentifier, true);
mapId = addToDuplicateMap(metaCollectionInstance, mapId, destObject);
}
if(logger.isDebugEnabled())
logger.debug("mapId created/updated: "+mapId);
mongoPrimaryInstance.close();
}else{
mongoPrimaryInstance.close();
throw new RemoteBackendException(" the source path is wrong. There isn't a file at "+source);
}
}else throw new RemoteBackendException("Invalid arguments: source "+source+" destination "+destination);
// return mapId.toString();
return destObject.getId().toString();
}
private String fillGenericDestinationFields(MyFile resource, ObjectId souceId) {
String destination;
destination=resource.getRootPath()+souceId;
resource.setName(souceId.toString());
resource.setRemoteDir(resource.getRootPath());
return destination;
}
/**
*
* @param resource
* @param bucket destinationPath
* @param dir destination directory
* @param name name of the new file
* @param oldId id of the file was present in the destination place
* @return id of the new map
* @throws UnknownHostException
*/
private ObjectId createNewDuplicatesMap(DBCollection metaCollectionInstance, Object resource, GridFSDBFile sourceObject, String bucket, ObjectId sourceId) throws UnknownHostException {
ObjectId id = null;
String dir= ((MyFile)resource).getRemoteDir();
// create new dir (is it really needed in case of map object?)
if((dir !=null && !dir.isEmpty()) && (bucket !=null && !bucket.isEmpty())){
getMongoPrimaryInstance().buildDirTree(getMongoPrimaryInstance().getMetaDataCollection(null), dir);
}
// create new map object
id= createNewObjectMap(metaCollectionInstance, (MyFile)resource, sourceObject, sourceId);
return id;
}
private ObjectId createNewObjectMap(DBCollection metaCollectionInstance, MyFile resource, GridFSDBFile source, ObjectId sourceId) throws UnknownHostException {
String md5=source.getMD5();
// set type of object
DBObject document=new BasicDBObject("type", "map");
// initialize count field to 0
document.put("count", 0);
ObjectId id=new ObjectId();
document.put("_id", id);
logger.debug("generated id for new map"+id);
document=fillCommonfields(document, resource, source, metaCollectionInstance, md5);
// update chunks collection
getMongoPrimaryInstance().updateChunksCollection(sourceId, id);
return id;
}
private DBObject createNewLinkObject(MyFile resource, GridFSDBFile sourceObject, String destination, DBCollection metaCollectionInstance, String md5, ObjectId mapId, ObjectId newId){
DBObject document=new BasicDBObject("type", "file");
document.put("filename", destination);
document.put("name", resource.getName());
document.put("dir", resource.getRemoteDir());
document.put("owner", resource.getOwner());
document.put(Costants.LINK_IDENTIFIER, mapId.toString());
ObjectId id=null;
if(newId == null){
id=new ObjectId();
logger.debug("generated id for new object link"+id);
}else{
id=newId;
logger.debug("restored id for new object link"+id);
}
document.put("_id", id);
return fillCommonfields(document, resource, sourceObject, metaCollectionInstance, md5);
}
private DBObject fillCommonfields(DBObject document, MyFile resource, GridFSDBFile sourceObject, DBCollection metaCollectionInstance, String md5) {
document.put("mimetype", ((MyFile)resource).getMimeType());
document.put("creationTime", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z"));
document.put("md5", md5);
document.put("length", sourceObject.getLength());
// set chunkSize inherited from original object
document.put("chunkSize", sourceObject.getChunkSize());
metaCollectionInstance.insert(document);
metaCollectionInstance.save(document);
return document;
}
private DBCollection getMetaCollection() throws UnknownHostException {
DBCollection metaCollectionInstance=null;
if(!(getMemoryType() == MemoryType.VOLATILE))
metaCollectionInstance=mongoPrimaryInstance.getMetaDataCollection(mongoPrimaryInstance.getConnectionDB(MongoOperationManager.getPrimaryCollectionName(), true));
else
metaCollectionInstance=mongoPrimaryInstance.getMetaDataCollection(mongoPrimaryInstance.getConnectionDB(MongoOperationManager.getPrimaryCollectionName(), false));
return metaCollectionInstance;
}
private ObjectId addToDuplicateMap(DBCollection metaCollectionInstance, ObjectId mapId, GridFSDBFile f) throws UnknownHostException {
f.put(Costants.LINK_IDENTIFIER, mapId.toString());
mongoPrimaryInstance.updateCommonFields(f, getResource(), OPERATION.SOFT_COPY);
f.save();
incrementCountField(metaCollectionInstance, mapId);
return mapId;
}
private void incrementCountField(DBCollection metaCollectionInstance, ObjectId mapId) throws UnknownHostException {
logger.info("increment count field on"+mapId+ " object map");
BasicDBObject searchQuery= new BasicDBObject();
searchQuery.put("_id" ,mapId);
DBObject mapObject=mongoPrimaryInstance.findCollectionObject(metaCollectionInstance, searchQuery);
// BasicDBObject updateObject= new BasicDBObject().append("$inc",new BasicDBObject().append("count", 1));;
int count=(int)mapObject.get("count");
count++;
mapObject.put("count", count);
// metaCollectionInstance.update(mapObject, updateObject);
metaCollectionInstance.save(mapObject);
}
private ObjectId getDuplicatesMap(String md5){
ObjectId id= checkMd5(md5);
return id;
}
/**
* @param sourceObject
* @return
*/
private boolean isMap(GridFSDBFile sourceObject) {
String type=sourceObject.get("type").toString();
if(type.equals("map"))
return true;
return false;
}
/**
* Check if the backend already has the payload
* @param md5 string of the file
* @return the ObjectID of the md5 file found on the backend, else null
*/
private ObjectId checkMd5(String md5) {
// TODO Auto-generated method stub
return null;
}
public MemoryType getMemoryType() {
return memoryType;
}
public void setMemoryType(MemoryType memoryType) {
this.memoryType = memoryType;
}
public MongoIOManager getMongoPrimaryInstance() {
return mongoPrimaryInstance;
}
public void setMongoPrimaryInstance(MongoIOManager mongoPrimaryInstance) {
this.mongoPrimaryInstance = mongoPrimaryInstance;
}
public MyFile getResource() {
return resource;
}
public void setResource(MyFile resource) {
this.resource = resource;
}
}

@ -0,0 +1,101 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend.operation;
import java.io.FileNotFoundException;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.service.operation.Unlock;
import org.gcube.contentmanagement.blobstorage.service.operation.Upload;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.gridfs.GridFSDBFile;
import com.mongodb.gridfs.GridFSFile;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class UnlockOperator extends Unlock {
Logger logger= LoggerFactory.getLogger(UnlockOperator.class);
/**
* @param server
* @param user
* @param pwd
* @param bucket
* @param monitor
* @param isChunk
* @param backendType
* @param dbs
*/
public UnlockOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk,
String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
// TODO Auto-generated constructor stub
}
/* (non-Javadoc)
* @see org.gcube.contentmanagement.blobstorage.service.operation.Unlock#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO)
*/
@Override
public String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String bucket, String key4unlock) throws Exception {
String id=null;
OperationDefinition op=resource.getOperationDefinition();
REMOTE_RESOURCE remoteResourceIdentifier=resource.getOperation().getRemoteResource();
logger.info("MongoClient unlock method: "+op.toString());
// if(((resource.getLocalPath() !=null) && (!resource.getLocalPath().isEmpty()))){
// resource.setOperation(OPERATION.UPLOAD);
// Upload upload= new UploadOperator(getServer(), getUser(), getPassword(), getBucket(), getMonitor(), isChunk(), getBackendType(), getDbNames());
// setUpload(upload);
// id=put(getUpload(), getResource(), isChunk(), false, false, true);
// mongoPrimaryInstance.close();
// resource.setOperation(op);
// }
String dir=((MyFile)resource).getRemoteDir();
String name=((MyFile)resource).getName();
String path=getBucket();
if(logger.isDebugEnabled())
logger.debug("DIR: "+dir+" name: "+name+" fullPath "+path+" bucket: "+bucket);
GridFSDBFile f=mongoPrimaryInstance.retrieveRemoteDescriptor(path, remoteResourceIdentifier, true);
if(f != null){
String oldir=(String)f.get("dir");
if(logger.isDebugEnabled())
logger.debug("old dir found "+oldir);
if((oldir.equalsIgnoreCase(((MyFile)resource).getRemoteDir())) || ((MyFile)resource).getRemoteDir()==null){
String lock=(String)f.get("lock");
//check if the od file is locked
if((lock !=null) && (!lock.isEmpty())){
String lck=(String)f.get("lock");
if(lck.equalsIgnoreCase(key4unlock)){
f.put("lock", null);
f.put("timestamp", null);
mongoPrimaryInstance.updateCommonFields((GridFSFile)f, (MyFile)resource, OPERATION.UNLOCK);
f.save();
}else{
mongoPrimaryInstance.close();
throw new IllegalAccessError("bad key for unlock");
}
}else{
mongoPrimaryInstance.updateCommonFields((GridFSFile)f, (MyFile)resource, OPERATION.UNLOCK);
f.save();
}
}else{
mongoPrimaryInstance.close();
throw new FileNotFoundException(path);
}
}else{
mongoPrimaryInstance.close();
throw new FileNotFoundException(path);
}
return id;
}
}

@ -0,0 +1,112 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend.operation;
import java.io.IOException;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.service.operation.Upload;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.gridfs.GridFSDBFile;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public class UploadOperator extends Upload {
Logger logger= LoggerFactory.getLogger(UploadOperator.class);
/**
* @param server
* @param user
* @param pwd
* @param bucket
* @param monitor
* @param isChunk
* @param bck
* @param dbs
*/
public UploadOperator(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk,
String bck, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, bck, dbs);
// TODO Auto-generated constructor stub
}
/* (non-Javadoc)
* @see org.gcube.contentmanagement.blobstorage.service.operation.Upload#execute(org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO, org.gcube.contentmanagement.blobstorage.transport.backend.MongoIO)
*/
@Override
public String execute(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance, MyFile resource, String bucket, boolean replace) throws IOException {
OperationDefinition op=((MyFile)resource).getOperationDefinition();
REMOTE_RESOURCE remoteResourceIdentifier=((MyFile)resource).getOperation().getRemoteResource();
logger.info("MongoClient put method: "+op.toString());
String dir=((MyFile)resource).getRemoteDir();
String name=((MyFile)resource).getName();
Object id=null;
ObjectId oldId=null;
// id of the remote file if present
GridFSDBFile fold = mongoPrimaryInstance.retrieveRemoteDescriptor(bucket, remoteResourceIdentifier, false);
if(fold != null){
// if a file is present
logger.info("a file is already present at: "+getBucket());
// keep old id
oldId=(ObjectId) fold.getId();
logger.info("get old id: "+oldId);
// remove old file
oldId = mongoPrimaryInstance.removeFile(resource, bucket, replace, oldId, fold);
//ADDED 03112015
if(!isReplaceOption()){
return oldId.toString();
}
// END ADDED
}
// create new file
logger.info("create new file "+bucket);
if((remoteResourceIdentifier != null) && ((remoteResourceIdentifier.equals(REMOTE_RESOURCE.ID))) && (ObjectId.isValid(getBucket()))){
id = mongoPrimaryInstance.createNewFile(resource, null, dir, name, new ObjectId(getBucket()));
}else{
id = mongoPrimaryInstance.createNewFile(resource, getBucket(), dir , name, oldId);
}
return id.toString();
}
public String executeSafeMode(MongoIOManager mongoPrimaryInstance, MongoIOManager mongoSecondaryInstance) throws IOException {
OperationDefinition op=((MyFile)resource).getOperationDefinition();
REMOTE_RESOURCE remoteResourceIdentifier=((MyFile)resource).getOperation().getRemoteResource();
logger.info("MongoClient put method: "+op.toString());
String dir=((MyFile)resource).getRemoteDir();
String name=((MyFile)resource).getName();
ObjectId id=null;
ObjectId oldId=null;
// id of the remote file if present
GridFSDBFile fold = mongoPrimaryInstance.retrieveRemoteDescriptor(getBucket(), remoteResourceIdentifier, false);
if(fold != null){
// if a file is present
logger.info("a file is already present at: "+getBucket());
// keep old id
oldId=(ObjectId) fold.getId();
logger.info("get old id: "+oldId);
// create new file
id = mongoPrimaryInstance.createNewFile(resource, getBucket(), dir, name, null);
// remove old file
oldId = mongoPrimaryInstance.removeFile(resource, getBucket(), isReplaceOption(), oldId, fold);
// oldId = removeOldMetadataFile(oldId);
// update the id to the new file
id=mongoPrimaryInstance.updateId(id, oldId);
}else{
// create new file
id = mongoPrimaryInstance.createNewFile(resource, getBucket(), dir, name, oldId);
}
return id.toString();
}
}

@ -0,0 +1,88 @@
/**
*
*/
package org.gcube.contentmanagement.blobstorage.transport.backend.util;
import com.mongodb.ReadPreference;
import com.mongodb.WriteConcern;
/**
* @author Roberto Cirillo (ISTI-CNR) 2018
*
*/
public final class Costants {
public static final String NO_SSL_VARIABLE_NAME="NO-SSL";
// allowed value are: "NO-SSL", "SSL"
public static final String DEFAULT_CONNECTION_MODE="NO-SSL";
public static final int CONNECTION_PER_HOST=30;
//millisecond
public static final int CONNECT_TIMEOUT=30000;
/** Report type - used by : Report factory class */
public static final int ACCOUNTING_TYPE = 1;
// used by MyFile class
public static final boolean DEFAULT_REPLACE_OPTION=false;
// used by BucketCoding class and operation package
public static final String SEPARATOR="_-_";
// used by Encrypter class
public static final String DESEDE_ENCRYPTION_SCHEME = "DESede";
// used by Encrypter class
public static final String DES_ENCRYPTION_SCHEME = "DES";
// used by ServiceEngine class
public static final String FILE_SEPARATOR = "/";
public static final int CONNECTION_RETRY_THRESHOLD=5;
public static final String DEFAULT_SCOPE = "private";
public static final long TTL=180000;
public static final boolean DEFAULT_CHUNK_OPTION=false;
public static final int TTL_RENEW = 5;
public static final String DEFAULT_RESOLVER_HOST= "data.d4science.org";
// *****
// used by Operation class
public static final String COUNT_IDENTIFIER="count";
public static final String LINK_IDENTIFIER="link";
// used by MongoIOManager class
public static final String DEFAULT_META_COLLECTION="fs.files";
public static final String DEFAULT_DB_NAME="remotefs";
public static final String ROOT_PATH_PATCH_V1=Costants.FILE_SEPARATOR+"home"+Costants.FILE_SEPARATOR+"null"+Costants.FILE_SEPARATOR;
public static final String ROOT_PATH_PATCH_V2=Costants.FILE_SEPARATOR+"public"+Costants.FILE_SEPARATOR;
public static final String DEFAULT_CHUNKS_COLLECTION = "fs.chunks";
// public static final WriteConcern DEFAULT_WRITE_TYPE=WriteConcern.NORMAL;
public static final WriteConcern DEFAULT_WRITE_TYPE=WriteConcern.REPLICA_ACKNOWLEDGED;
public static final ReadPreference DEFAULT_READ_PREFERENCE=ReadPreference.primaryPreferred();
// public static final boolean DEFAULT_READWRITE_PREFERENCE= false;
public static final boolean DEFAULT_READWRITE_PREFERENCE= true;
// used by GetHttpsUrl class
public static final String URL_SEPARATOR="/";
public static final String VOLATILE_URL_IDENTIFICATOR = "-VLT";
// used by OperationManager class
//COSTANT CLIENT FACTORY CLIENT
public static final String CLIENT_TYPE="mongo";
// COSTANTS FOR THREAD MANAGEMENT (not used by mongodb)
public static final int MIN_THREAD=1;
public static final int MAX_THREAD=10;
// COSTANTS FOR CHUNK MANAGEMENT (not used by mongodb)
public static final int sogliaNumeroMassimo=400;
public static final int sogliaNumeroMinimo=4;
// dimension is express in byte
public static final int sogliaDimensioneMinima=1024*1024;
// dimension is express in byte
public static final int sogliaDimensioneMassima= 4*1024*1024;
// used by DuplicateOperator class
public static final String DUPLICATE_SUFFIX="-dpl";
// unused by GetPayload map
public static final String MAP_FIELD="";
// used by TransportManager class
public static final String DEFAULT_TRANSPORT_MANAGER="MongoDB";
}

Some files were not shown because too many files have changed in this diff Show More

Loading…
Cancel
Save