Compare commits

..

No commits in common. "master" and "v2.9.0" have entirely different histories.

66 changed files with 410 additions and 822 deletions

View File

@ -15,10 +15,14 @@
<attributes>
<attribute name="optional" value="true"/>
<attribute name="maven.pomderived" value="true"/>
<attribute name="test" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8">
<classpathentry excluding="**" kind="src" output="target/test-classes" path="src/test/resources">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.7">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
@ -26,7 +30,6 @@
<classpathentry kind="con" path="org.eclipse.m2e.MAVEN2_CLASSPATH_CONTAINER">
<attributes>
<attribute name="maven.pomderived" value="true"/>
<attribute name="org.eclipse.jst.component.nondependency" value=""/>
</attributes>
</classpathentry>
<classpathentry kind="output" path="target/classes"/>

4
.gitignore vendored
View File

@ -1,4 +0,0 @@
/target/
/.classpath
/*.project
/.settings

View File

@ -5,11 +5,6 @@
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.eclipse.wst.common.project.facet.core.builder</name>
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.jdt.core.javabuilder</name>
<arguments>
@ -20,17 +15,9 @@
<arguments>
</arguments>
</buildCommand>
<buildCommand>
<name>org.eclipse.wst.validation.validationbuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.eclipse.jem.workbench.JavaEMFNature</nature>
<nature>org.eclipse.wst.common.modulecore.ModuleCoreNature</nature>
<nature>org.eclipse.jdt.core.javanature</nature>
<nature>org.eclipse.m2e.core.maven2Nature</nature>
<nature>org.eclipse.wst.common.project.facet.core.nature</nature>
</natures>
</projectDescription>

View File

@ -1,15 +1,12 @@
eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.7
org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
org.eclipse.jdt.core.compiler.compliance=1.8
org.eclipse.jdt.core.compiler.compliance=1.7
org.eclipse.jdt.core.compiler.debug.lineNumber=generate
org.eclipse.jdt.core.compiler.debug.localVariable=generate
org.eclipse.jdt.core.compiler.debug.sourceFile=generate
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
org.eclipse.jdt.core.compiler.problem.enablePreviewFeatures=disabled
org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
org.eclipse.jdt.core.compiler.problem.reportPreviewFeatures=ignore
org.eclipse.jdt.core.compiler.release=disabled
org.eclipse.jdt.core.compiler.source=1.8
org.eclipse.jdt.core.compiler.source=1.7

View File

@ -1,6 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?><project-modules id="moduleCoreId" project-version="1.5.0">
<wb-module deploy-name="storage-manager-core">
<wb-resource deploy-path="/" source-path="/src/main/java"/>
<wb-resource deploy-path="/" source-path="/src/main/resources"/>
</wb-module>
</project-modules>

View File

@ -1,5 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<faceted-project>
<installed facet="java" version="1.8"/>
<installed facet="jst.utility" version="1.0"/>
</faceted-project>

View File

@ -1,16 +0,0 @@
# Changelog for storage-manager-core
## [v2.9.3-SNAPSHOT] 2022-09-19
* set java to 1.8
## [v2.9.2] 2022-09-07
* restored close() method to IClient
* add slf4j-simple dependency with test scope
* update gcube-bom to 2.0.2
## [v2.9.1] 2022-06-28
* update to version 2.9.1 in order to have a fixed bom in the latest version of the range
## [v2.9.0] 2019-10-19
* SSL enabled

View File

@ -1,311 +0,0 @@
# European Union Public Licence V. 1.1
EUPL © the European Community 2007
This European Union Public Licence (the “EUPL”) applies to the Work or Software
(as defined below) which is provided under the terms of this Licence. Any use of
the Work, other than as authorised under this Licence is prohibited (to the
extent such use is covered by a right of the copyright holder of the Work).
The Original Work is provided under the terms of this Licence when the Licensor
(as defined below) has placed the following notice immediately following the
copyright notice for the Original Work:
Licensed under the EUPL V.1.1
or has expressed by any other mean his willingness to license under the EUPL.
## 1. Definitions
In this Licence, the following terms have the following meaning:
- The Licence: this Licence.
- The Original Work or the Software: the software distributed and/or
communicated by the Licensor under this Licence, available as Source Code and
also as Executable Code as the case may be.
- Derivative Works: the works or software that could be created by the Licensee,
based upon the Original Work or modifications thereof. This Licence does not
define the extent of modification or dependence on the Original Work required
in order to classify a work as a Derivative Work; this extent is determined by
copyright law applicable in the country mentioned in Article 15.
- The Work: the Original Work and/or its Derivative Works.
- The Source Code: the human-readable form of the Work which is the most
convenient for people to study and modify.
- The Executable Code: any code which has generally been compiled and which is
meant to be interpreted by a computer as a program.
- The Licensor: the natural or legal person that distributes and/or communicates
the Work under the Licence.
- Contributor(s): any natural or legal person who modifies the Work under the
Licence, or otherwise contributes to the creation of a Derivative Work.
- The Licensee or “You”: any natural or legal person who makes any usage of the
Software under the terms of the Licence.
- Distribution and/or Communication: any act of selling, giving, lending,
renting, distributing, communicating, transmitting, or otherwise making
available, on-line or off-line, copies of the Work or providing access to its
essential functionalities at the disposal of any other natural or legal
person.
## 2. Scope of the rights granted by the Licence
The Licensor hereby grants You a world-wide, royalty-free, non-exclusive,
sub-licensable licence to do the following, for the duration of copyright vested
in the Original Work:
- use the Work in any circumstance and for all usage, reproduce the Work, modify
- the Original Work, and make Derivative Works based upon the Work, communicate
- to the public, including the right to make available or display the Work or
- copies thereof to the public and perform publicly, as the case may be, the
- Work, distribute the Work or copies thereof, lend and rent the Work or copies
- thereof, sub-license rights in the Work or copies thereof.
Those rights can be exercised on any media, supports and formats, whether now
known or later invented, as far as the applicable law permits so.
In the countries where moral rights apply, the Licensor waives his right to
exercise his moral right to the extent allowed by law in order to make effective
the licence of the economic rights here above listed.
The Licensor grants to the Licensee royalty-free, non exclusive usage rights to
any patents held by the Licensor, to the extent necessary to make use of the
rights granted on the Work under this Licence.
## 3. Communication of the Source Code
The Licensor may provide the Work either in its Source Code form, or as
Executable Code. If the Work is provided as Executable Code, the Licensor
provides in addition a machine-readable copy of the Source Code of the Work
along with each copy of the Work that the Licensor distributes or indicates, in
a notice following the copyright notice attached to the Work, a repository where
the Source Code is easily and freely accessible for as long as the Licensor
continues to distribute and/or communicate the Work.
## 4. Limitations on copyright
Nothing in this Licence is intended to deprive the Licensee of the benefits from
any exception or limitation to the exclusive rights of the rights owners in the
Original Work or Software, of the exhaustion of those rights or of other
applicable limitations thereto.
## 5. Obligations of the Licensee
The grant of the rights mentioned above is subject to some restrictions and
obligations imposed on the Licensee. Those obligations are the following:
Attribution right: the Licensee shall keep intact all copyright, patent or
trademarks notices and all notices that refer to the Licence and to the
disclaimer of warranties. The Licensee must include a copy of such notices and a
copy of the Licence with every copy of the Work he/she distributes and/or
communicates. The Licensee must cause any Derivative Work to carry prominent
notices stating that the Work has been modified and the date of modification.
Copyleft clause: If the Licensee distributes and/or communicates copies of the
Original Works or Derivative Works based upon the Original Work, this
Distribution and/or Communication will be done under the terms of this Licence
or of a later version of this Licence unless the Original Work is expressly
distributed only under this version of the Licence. The Licensee (becoming
Licensor) cannot offer or impose any additional terms or conditions on the Work
or Derivative Work that alter or restrict the terms of the Licence.
Compatibility clause: If the Licensee Distributes and/or Communicates Derivative
Works or copies thereof based upon both the Original Work and another work
licensed under a Compatible Licence, this Distribution and/or Communication can
be done under the terms of this Compatible Licence. For the sake of this clause,
“Compatible Licence” refers to the licences listed in the appendix attached to
this Licence. Should the Licensees obligations under the Compatible Licence
conflict with his/her obligations under this Licence, the obligations of the
Compatible Licence shall prevail.
Provision of Source Code: When distributing and/or communicating copies of the
Work, the Licensee will provide a machine-readable copy of the Source Code or
indicate a repository where this Source will be easily and freely available for
as long as the Licensee continues to distribute and/or communicate the Work.
Legal Protection: This Licence does not grant permission to use the trade names,
trademarks, service marks, or names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the copyright notice.
## 6. Chain of Authorship
The original Licensor warrants that the copyright in the Original Work granted
hereunder is owned by him/her or licensed to him/her and that he/she has the
power and authority to grant the Licence.
Each Contributor warrants that the copyright in the modifications he/she brings
to the Work are owned by him/her or licensed to him/her and that he/she has the
power and authority to grant the Licence.
Each time You accept the Licence, the original Licensor and subsequent
Contributors grant You a licence to their contributions to the Work, under the
terms of this Licence.
## 7. Disclaimer of Warranty
The Work is a work in progress, which is continuously improved by numerous
contributors. It is not a finished work and may therefore contain defects or
“bugs” inherent to this type of software development.
For the above reason, the Work is provided under the Licence on an “as is” basis
and without warranties of any kind concerning the Work, including without
limitation merchantability, fitness for a particular purpose, absence of defects
or errors, accuracy, non-infringement of intellectual property rights other than
copyright as stated in Article 6 of this Licence.
This disclaimer of warranty is an essential part of the Licence and a condition
for the grant of any rights to the Work.
## 8. Disclaimer of Liability
Except in the cases of wilful misconduct or damages directly caused to natural
persons, the Licensor will in no event be liable for any direct or indirect,
material or moral, damages of any kind, arising out of the Licence or of the use
of the Work, including without limitation, damages for loss of goodwill, work
stoppage, computer failure or malfunction, loss of data or any commercial
damage, even if the Licensor has been advised of the possibility of such
damage. However, the Licensor will be liable under statutory product liability
laws as far such laws apply to the Work.
## 9. Additional agreements
While distributing the Original Work or Derivative Works, You may choose to
conclude an additional agreement to offer, and charge a fee for, acceptance of
support, warranty, indemnity, or other liability obligations and/or services
consistent with this Licence. However, in accepting such obligations, You may
act only on your own behalf and on your sole responsibility, not on behalf of
the original Licensor or any other Contributor, and only if You agree to
indemnify, defend, and hold each Contributor harmless for any liability incurred
by, or claims asserted against such Contributor by the fact You have accepted
any such warranty or additional liability.
## 10. Acceptance of the Licence
The provisions of this Licence can be accepted by clicking on an icon “I agree”
placed under the bottom of a window displaying the text of this Licence or by
affirming consent in any other similar way, in accordance with the rules of
applicable law. Clicking on that icon indicates your clear and irrevocable
acceptance of this Licence and all of its terms and conditions.
Similarly, you irrevocably accept this Licence and all of its terms and
conditions by exercising any rights granted to You by Article 2 of this Licence,
such as the use of the Work, the creation by You of a Derivative Work or the
Distribution and/or Communication by You of the Work or copies thereof.
## 11. Information to the public
In case of any Distribution and/or Communication of the Work by means of
electronic communication by You (for example, by offering to download the Work
from a remote location) the distribution channel or media (for example, a
website) must at least provide to the public the information requested by the
applicable law regarding the Licensor, the Licence and the way it may be
accessible, concluded, stored and reproduced by the Licensee.
## 12. Termination of the Licence
The Licence and the rights granted hereunder will terminate automatically upon
any breach by the Licensee of the terms of the Licence.
Such a termination will not terminate the licences of any person who has
received the Work from the Licensee under the Licence, provided such persons
remain in full compliance with the Licence.
## 13. Miscellaneous
Without prejudice of Article 9 above, the Licence represents the complete
agreement between the Parties as to the Work licensed hereunder.
If any provision of the Licence is invalid or unenforceable under applicable
law, this will not affect the validity or enforceability of the Licence as a
whole. Such provision will be construed and/or reformed so as necessary to make
it valid and enforceable.
The European Commission may publish other linguistic versions and/or new
versions of this Licence, so far this is required and reasonable, without
reducing the scope of the rights granted by the Licence. New versions of the
Licence will be published with a unique version number.
All linguistic versions of this Licence, approved by the European Commission,
have identical value. Parties can take advantage of the linguistic version of
their choice.
## 14. Jurisdiction
Any litigation resulting from the interpretation of this License, arising
between the European Commission, as a Licensor, and any Licensee, will be
subject to the jurisdiction of the Court of Justice of the European Communities,
as laid down in article 238 of the Treaty establishing the European Community.
Any litigation arising between Parties, other than the European Commission, and
resulting from the interpretation of this License, will be subject to the
exclusive jurisdiction of the competent court where the Licensor resides or
conducts its primary business.
## 15. Applicable Law
This Licence shall be governed by the law of the European Union country where
the Licensor resides or has his registered office.
This licence shall be governed by the Belgian law if:
- a litigation arises between the European Commission, as a Licensor, and any
- Licensee; the Licensor, other than the European Commission, has no residence
- or registered office inside a European Union country.
## Appendix
“Compatible Licences” according to article 5 EUPL are:
- GNU General Public License (GNU GPL) v. 2
- Open Software License (OSL) v. 2.1, v. 3.0
- Common Public License v. 1.0
- Eclipse Public License v. 1.0
- Cecill v. 2.0

View File

@ -1,18 +0,0 @@
storage-manger-core
----
## Examples of use
## Deployment
Notes about how to deploy this component on an infrastructure or link to wiki doc (if any).
## Documentation
See storage-manager-core on [Wiki](https://gcube.wiki.gcube-system.org/gcube/Storage_Manager).
## License
TBP

65
pom.xml
View File

@ -8,11 +8,9 @@
</parent>
<groupId>org.gcube.contentmanagement</groupId>
<artifactId>storage-manager-core</artifactId>
<version>2.9.3-SNAPSHOT</version>
<version>2.9.0</version>
<properties>
<distroDirectory>${project.basedir}/distro</distroDirectory>
<maven.compiler.target>1.8</maven.compiler.target>
<maven.compiler.source>1.8</maven.compiler.source>
</properties>
<scm>
<connection>scm:git:https://code-repo.d4science.org/gCubeSystem/${project.artifactId}.git</connection>
@ -25,7 +23,7 @@
<dependency>
<groupId>org.gcube.distribution</groupId>
<artifactId>gcube-bom</artifactId>
<version>2.0.2</version>
<version>1.4.0</version>
<type>pom</type>
<scope>import</scope>
</dependency>
@ -39,7 +37,7 @@
<dependency>
<groupId>org.mongodb</groupId>
<artifactId>mongo-java-driver</artifactId>
<version>3.12.0</version>
<version>3.6.0</version>
</dependency>
<dependency>
<groupId>org.gcube.core</groupId>
@ -55,11 +53,54 @@
<artifactId>commons-codec</artifactId>
<version>1.8</version>
</dependency>
<dependency>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-simple</artifactId>
<version>1.7.32</version>
<scope>test</scope>
</dependency>
</dependencies>
</project>
<!-- <build> -->
<!-- <plugins> -->
<!-- <plugin> -->
<!-- <groupId>org.apache.maven.plugins</groupId> -->
<!-- <artifactId>maven-resources-plugin</artifactId> -->
<!-- <version>2.5</version> -->
<!-- <executions> -->
<!-- <execution> -->
<!-- <id>copy-profile</id> -->
<!-- <phase>install</phase> -->
<!-- <goals> -->
<!-- <goal>copy-resources</goal> -->
<!-- </goals> -->
<!-- <configuration> -->
<!-- <outputDirectory>target</outputDirectory> -->
<!-- <resources> -->
<!-- <resource> -->
<!-- <directory>${distroDirectory}</directory> -->
<!-- <filtering>true</filtering> -->
<!-- <includes> -->
<!-- <include>profile.xml</include> -->
<!-- </includes> -->
<!-- </resource> -->
<!-- </resources> -->
<!-- </configuration> -->
<!-- </execution> -->
<!-- </executions> -->
<!-- </plugin> -->
<!-- <plugin> -->
<!-- <groupId>org.apache.maven.plugins</groupId> -->
<!-- <artifactId>maven-assembly-plugin</artifactId> -->
<!-- -->
<!-- <configuration> -->
<!-- <descriptors> -->
<!-- <descriptor>${distroDirectory}/descriptor.xml</descriptor> -->
<!-- </descriptors> -->
<!-- </configuration> -->
<!-- <executions> -->
<!-- <execution> -->
<!-- <id>servicearchive</id> -->
<!-- <phase>install</phase> -->
<!-- <goals> -->
<!-- <goal>single</goal> -->
<!-- </goals> -->
<!-- </execution> -->
<!-- </executions> -->
<!-- </plugin> -->
<!-- </plugins> -->
<!-- </build> -->
</project>

View File

@ -0,0 +1,15 @@
log4j.rootLogger=INFO, A1, stdout
log4j.appender.A1=org.apache.log4j.RollingFileAppender
log4j.appender.A1.File=log.txt
log4j.appender.A1.layout=org.apache.log4j.PatternLayout
log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
# ***** Max file size is set to 100KB
log4j.appender.A1.MaxFileSize=100MB
# ***** Keep one backup file
log4j.appender.A1.MaxBackupIndex=1
#CONSOLE
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.Threshold=INFO
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%t] %-5p %c %d{dd MMM yyyy ;HH:mm:ss.SSS} - %m%n

View File

@ -88,15 +88,7 @@ public class MyFile {
private String readPreference;
private String rootPath;
private boolean replace=false;
private String token;
private String region;
final Logger logger = LoggerFactory.getLogger(MyFile.class);
public MyFile(boolean lock){
setLock(lock);
@ -697,20 +689,6 @@ public class MyFile {
public void setId2(String id2) {
this.id2 = id2;
}
public String getToken() {
return token;
}
public void setToken(String token) {
this.token = token;
}
public void setRegion(String region) {
this.region=region;
}
public String getRegion() {
return region;
}
}

View File

@ -1,7 +1,6 @@
package org.gcube.contentmanagement.blobstorage.service;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.service.impl.AmbiguousResource;
import org.gcube.contentmanagement.blobstorage.service.impl.LocalResource;
import org.gcube.contentmanagement.blobstorage.service.impl.RemoteResource;
@ -97,7 +96,6 @@ public RemoteResourceInfo renewTTL(String key);
*
* @return RemoteResource object
*/
@Deprecated
RemoteResource getUrl();
/**
@ -207,11 +205,6 @@ public RemoteResourceComplexInfo getMetaFile();
/**
* close the connections to backend storage system
*/
public void forceClose();
/**
* close the connections to backend storage system. Method restored for backward compatibility
*/
public void close();
@ -231,16 +224,12 @@ public String getId(String id);
public RemoteResource getRemotePath();
@Deprecated
public RemoteResource getHttpUrl(boolean forceCreation);
@Deprecated
public RemoteResource getHttpUrl(String backendType, boolean forceCreation);
@Deprecated
public RemoteResource getHttpUrl(String backendType);
@Deprecated
public RemoteResource getHttpUrl();
public RemoteResource getHttpsUrl(boolean forceCreation);
@ -270,6 +259,4 @@ public abstract RemoteResourceBoolean exist();
public abstract RemoteResourceBoolean exist(String backendType);
public MemoryType getGcubeMemoryType();
}

View File

@ -29,7 +29,6 @@ public class DirectoryBucket {
String path;
String[] server;
String user, password;
TransportManager tm;
public DirectoryBucket(String[] server, String user, String password, String path, String author){
if(logger.isDebugEnabled())
logger.debug("DirectoryBucket PATH: "+path);
@ -92,7 +91,7 @@ public class DirectoryBucket {
String[] bucketList=null;
bucketList=retrieveBucketsName(path, rootArea);
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(tm, backendType, resource.getGcubeMemoryType(), dbNames, resource.getWriteConcern(), resource.getReadPreference());
TransportManager tm=tmf.getTransport(backendType, resource.getGcubeMemoryType(), dbNames, resource.getWriteConcern(), resource.getReadPreference());
// TerrastoreClient client=new TerrastoreClient( new OrderedHostManager(Arrays.asList(server)), new HTTPConnectionFactory());
for(int i=0;i<bucketList.length;i++){
if(logger.isDebugEnabled())
@ -125,7 +124,7 @@ public class DirectoryBucket {
logger.debug("bucketDir Coded: "+bucketDirCoded);
bucketList=retrieveBucketsName(bucket, rootArea);
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(tm, backendType, resource.getGcubeMemoryType(), dbNames, resource.getWriteConcern(),resource.getReadPreference());
TransportManager tm=tmf.getTransport(backendType, resource.getGcubeMemoryType(), dbNames, resource.getWriteConcern(),resource.getReadPreference());
for(int i=0;i<bucketList.length;i++){
if(logger.isDebugEnabled())
logger.debug("REMOVE: check "+bucketList[i]+" bucketDirCoded: "+bucketDirCoded );

View File

@ -8,6 +8,7 @@ import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryEntity;
import org.gcube.contentmanagement.blobstorage.service.operation.OperationManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
@ -26,7 +27,7 @@ import org.gcube.contentmanagement.blobstorage.resource.StorageObject;
*/
public class RemoteResource extends Resource{
TransportManager tm;
public RemoteResource(MyFile file, ServiceEngine engine) {
super(file, engine);
@ -111,7 +112,7 @@ public class RemoteResource extends Resource{
if(engine.getCurrentOperation().equalsIgnoreCase("showdir")){
dir = new BucketCoding().bucketDirCoding(dir, engine.getContext());
TransportManagerFactory tmf= new TransportManagerFactory(engine.primaryBackend, engine.getBackendUser(), engine.getBackendPassword());
tm=tmf.getTransport(tm, engine.getBackendType(), engine.getGcubeMemoryType(), engine.getDbNames(), engine.getWriteConcern(), engine.getReadConcern());
TransportManager tm=tmf.getTransport(engine.getBackendType(), engine.getGcubeMemoryType(), engine.getDbNames(), engine.getWriteConcern(), engine.getReadConcern());
Map<String, StorageObject> mapDirs=null;
try {
mapDirs = tm.getValues(getMyFile(), dir, DirectoryEntity.class);
@ -132,7 +133,7 @@ public class RemoteResource extends Resource{
dirBuc.removeDirBucket(getMyFile(), dir, engine.getContext(), engine.getBackendType(), engine.getDbNames());
else{
TransportManagerFactory tmf=new TransportManagerFactory(engine.primaryBackend, engine.getBackendUser(), engine.getBackendPassword());
tm=tmf.getTransport(tm, Costants.CLIENT_TYPE, engine.getGcubeMemoryType(), engine.getDbNames(), engine.getWriteConcern(), engine.getReadConcern());
TransportManager tm=tmf.getTransport(Costants.CLIENT_TYPE, engine.getGcubeMemoryType(), engine.getDbNames(), engine.getWriteConcern(), engine.getReadConcern());
dir=new BucketCoding().bucketFileCoding(dir, engine.getContext());
try {
tm.removeDir(dir, getMyFile());

View File

@ -1,9 +1,9 @@
package org.gcube.contentmanagement.blobstorage.service.impl;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Set;
import org.gcube.contentmanagement.blobstorage.resource.AccessType;
@ -17,6 +17,7 @@ import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Bucket
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.Encrypter.EncryptionException;
import org.gcube.contentmanagement.blobstorage.service.operation.*;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
@ -70,19 +71,13 @@ public class ServiceEngine implements IClient {
private String user;
//backend server password
private String password;
// if the backend is mongodb, this field is used for crypt/decrypt. If the backend is S3, this field is a token.
private String passPhrase;
private String resolverHost;
private String[] dbNames;
// private static final String DEFAULT_RESOLVER_HOST= "data.d4science.org";
private String write;
private String read;
private String token;
private String region;
public ServiceEngine(String[] server){
this.primaryBackend=server;
}
@ -147,24 +142,24 @@ public class ServiceEngine implements IClient {
}
private String getPublicArea() {
public String getPublicArea() {
return publicArea;
}
private void setPublicArea(String publicArea) {
public void setPublicArea(String publicArea) {
logger.trace("public area is "+publicArea);
this.publicArea = publicArea;
}
private String getHomeArea() {
public String getHomeArea() {
return homeArea;
}
private void setHomeArea(String rootPath) {
public void setHomeArea(String rootPath) {
this.homeArea = rootPath;
}
private String getEnvironment() {
public String getEnvironment() {
return environment;
}
@ -172,7 +167,7 @@ public class ServiceEngine implements IClient {
* set the remote root path
* @param environment
*/
private void setEnvironment(String environment) {
public void setEnvironment(String environment) {
// delete initial / from variable environment
String newEnv=environment;
int ind=newEnv.indexOf('/');
@ -184,11 +179,11 @@ public class ServiceEngine implements IClient {
this.environment = newEnv;
}
private String getBucketID() {
public String getBucketID() {
return bucketID;
}
private void setBucketID(String bucketID) {
public void setBucketID(String bucketID) {
this.bucketID=bucketID;
}
@ -215,7 +210,7 @@ public class ServiceEngine implements IClient {
logger.debug("get() - start");
}
setCurrentOperation("download");
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
file=setOperationInfo(file, OPERATION.DOWNLOAD);
return new LocalResource(file, this);
}
@ -240,7 +235,7 @@ public class ServiceEngine implements IClient {
logger.debug("get() - start");
}
setCurrentOperation("getSize");
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
file=setOperationInfo(file, OPERATION.GET_SIZE);
return new RemoteResourceInfo(file, this);
}
@ -253,7 +248,7 @@ public class ServiceEngine implements IClient {
logger.debug("get() - start");
}
setCurrentOperation("getMetaFile");
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
file=setOperationInfo(file, OPERATION.GET_META_FILE);
return new RemoteResourceComplexInfo(file, this);
}
@ -264,7 +259,7 @@ public class ServiceEngine implements IClient {
logger.debug("get() - start");
}
setCurrentOperation("getTotalUserVolume");
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
file=setOperationInfo(file, OPERATION.GET_TOTAL_USER_VOLUME);
file = new Resource(file, this).setGenericProperties(getContext(), owner, null, "remote");
file.setRemotePath("/");
@ -296,7 +291,7 @@ public class ServiceEngine implements IClient {
logger.debug("get() - start");
}
setCurrentOperation("getTotalUserItems");
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
file=setOperationInfo(file, OPERATION.GET_USER_TOTAL_ITEMS);
file = new Resource(file, this).setGenericProperties(getContext(), owner, "", "remote");
file.setRemotePath("/");
@ -328,7 +323,7 @@ public class ServiceEngine implements IClient {
logger.debug("get() - start");
}
setCurrentOperation("getFolderSize");
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
file=setOperationInfo(file, OPERATION.GET_FOLDER_TOTAL_VOLUME);
return new RemoteResourceFolderInfo(file, this);
}
@ -339,7 +334,7 @@ public class ServiceEngine implements IClient {
logger.debug("get() - start");
}
setCurrentOperation("getFolderCount");
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
file=setOperationInfo(file, OPERATION.GET_FOLDER_TOTAL_ITEMS);
return new RemoteResourceFolderInfo(file, this);
}
@ -350,7 +345,7 @@ public class ServiceEngine implements IClient {
logger.debug("get() - start");
}
setCurrentOperation("getFolderLastUpdate");
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
file=setOperationInfo(file, OPERATION.GET_FOLDER_LAST_UPDATE);
return new RemoteResourceFolderInfo(file, this);
}
@ -370,7 +365,7 @@ public class ServiceEngine implements IClient {
}
setCurrentOperation("upload");
setReplaceOption(replace);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames());
file=setOperationInfo(file, OPERATION.UPLOAD);
file.setReplaceOption(replace);
return new LocalResource(file, this);
@ -392,7 +387,7 @@ public class ServiceEngine implements IClient {
}
setCurrentOperation("upload");
setReplaceOption(replace);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames());
file=setOperationInfo(file, OPERATION.UPLOAD);
file=setMimeType(file, mimeType);
file.setReplaceOption(replace);
@ -421,7 +416,7 @@ public class ServiceEngine implements IClient {
// remove object operation
setCurrentOperation("remove");
file=setOperationInfo(file, OPERATION.REMOVE);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
return new RemoteResource(file, this);
}
@ -538,7 +533,7 @@ public class ServiceEngine implements IClient {
file.setPassPhrase(passPhrase);
setCurrentOperation("getUrl");
file=setOperationInfo(file, OPERATION.GET_URL);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
RemoteResource resource=new RemoteResource(file, this);
return resource;
}
@ -573,7 +568,7 @@ public class ServiceEngine implements IClient {
file.setPassPhrase(passPhrase);
setCurrentOperation("getHttpUrl");
file=setOperationInfo(file, OPERATION.GET_HTTP_URL);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
RemoteResource resource=new RemoteResource(file, this);
return resource;
}
@ -610,7 +605,7 @@ public class ServiceEngine implements IClient {
file.setPassPhrase(passPhrase);
setCurrentOperation("getHttpsUrl");
file=setOperationInfo(file, OPERATION.GET_HTTPS_URL);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
RemoteResource resource=new RemoteResource(file, this);
return resource;
}
@ -674,7 +669,7 @@ public class ServiceEngine implements IClient {
backendType=setBackendType(backendType);
file = new MyFile(true);
setCurrentOperation("lock");
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
file=setOperationInfo(file, OPERATION.LOCK);
return new AmbiguousResource(file, this);
}
@ -693,7 +688,7 @@ public class ServiceEngine implements IClient {
// put(true);
setCurrentOperation("unlock");
file=setOperationInfo(file, OPERATION.UNLOCK);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
return new AmbiguousResource(file, this);
}
@ -710,7 +705,7 @@ public class ServiceEngine implements IClient {
// put(true);
setCurrentOperation("getTTL");
file=setOperationInfo(file, OPERATION.GET_TTL);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
return new RemoteResourceInfo(file, this);
}
@ -728,7 +723,7 @@ public class ServiceEngine implements IClient {
file.setGenericPropertyField(field);
setCurrentOperation("getMetaInfo");
file=setOperationInfo(file, OPERATION.GET_META_INFO);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
return new RemoteResource(file, this);
}
@ -745,7 +740,7 @@ public class ServiceEngine implements IClient {
file.setGenericPropertyValue(value);
setCurrentOperation("setMetaInfo");
file=setOperationInfo(file, OPERATION.SET_META_INFO);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
return new RemoteResource(file, this);
}
@ -762,7 +757,7 @@ public class ServiceEngine implements IClient {
// put(true);
setCurrentOperation("renewTTL");
file=setOperationInfo(file, OPERATION.RENEW_TTL);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
return new RemoteResourceInfo(file, this);
}
@ -779,7 +774,7 @@ public class ServiceEngine implements IClient {
file=null;
setCurrentOperation("link");
file=setOperationInfo(file, OPERATION.LINK);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames());
return new RemoteResourceSource(file, this);
}
@ -808,7 +803,7 @@ public class ServiceEngine implements IClient {
setCurrentOperation("copy");
file=setOperationInfo(file, OPERATION.COPY);
file.setReplaceOption(replaceOption);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames());
return new RemoteResourceSource(file, this);
}
@ -825,7 +820,7 @@ public class ServiceEngine implements IClient {
file=null;
setCurrentOperation("duplicate");
file=setOperationInfo(file, OPERATION.DUPLICATE);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames());
return new RemoteResource(file, this);
}
@ -851,7 +846,7 @@ public class ServiceEngine implements IClient {
setCurrentOperation("softcopy");
file=setOperationInfo(file, OPERATION.SOFT_COPY);
file.setReplaceOption(replaceOption);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames());
return new RemoteResourceSource(file, this);
}
@ -870,7 +865,7 @@ public class ServiceEngine implements IClient {
file=null;
setCurrentOperation("move");
file=setOperationInfo(file, OPERATION.MOVE);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames());
return new RemoteResourceSource(file, this);
}
@ -887,7 +882,7 @@ public class ServiceEngine implements IClient {
file=null;
setCurrentOperation("copy_dir");
file=setOperationInfo(file, OPERATION.COPY_DIR);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames());
return new RemoteResourceSource(file, this);
}
@ -904,31 +899,10 @@ public class ServiceEngine implements IClient {
file=null;
setCurrentOperation("move_dir");
file=setOperationInfo(file, OPERATION.MOVE_DIR);
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), getMyFile(), backendType, getDbNames());
return new RemoteResourceSource(file, this);
}
@Override
public void forceClose(){
currentOperation="forceclose";
file.setOwner(owner);
getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH);
setMyFile(file);
service.setResource(getMyFile());
service.setTypeOperation("forceclose");
try {
if(((file.getInputStream() != null) || (file.getOutputStream()!=null)) || ((file.getLocalPath() != null) || (file.getRemotePath() != null)))
service.startOperation(file,file.getRemotePath(), owner, primaryBackend, Costants.DEFAULT_CHUNK_OPTION, getContext(), isReplaceOption());
else{
logger.error("parameters incompatible ");
}
} catch (Throwable t) {
logger.error("get()", t.getCause());
throw new RemoteBackendException(" Error in "+currentOperation+" operation ", t.getCause());
}
}
@Override
public void close(){
currentOperation="close";
@ -936,7 +910,7 @@ public class ServiceEngine implements IClient {
getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH);
setMyFile(file);
service.setResource(getMyFile());
service.setTypeOperation("forceclose");
service.setTypeOperation("close");
try {
if(((file.getInputStream() != null) || (file.getOutputStream()!=null)) || ((file.getLocalPath() != null) || (file.getRemotePath() != null)))
service.startOperation(file,file.getRemotePath(), owner, primaryBackend, Costants.DEFAULT_CHUNK_OPTION, getContext(), isReplaceOption());
@ -950,6 +924,7 @@ public class ServiceEngine implements IClient {
}
}
public String getServiceClass() {
return serviceClass;
@ -1027,10 +1002,6 @@ public class ServiceEngine implements IClient {
file.setWriteConcern(getWriteConcern());
if(getReadConcern() != null)
file.setReadPreference(getReadConcern());
if(!Objects.isNull(getToken()))
file.setToken(getToken());
if(!Objects.isNull(getRegion()))
file.setRegion(getRegion());
return file;
}
@ -1092,24 +1063,19 @@ public class ServiceEngine implements IClient {
public String getId(String id){
if (getBackendType().equals("MongoDB")){
if(ObjectId.isValid(id))
return id;
try {
if(Base64.isBase64(id)){
byte[] valueDecoded= Base64.decodeBase64(id);
String encryptedID = new String(valueDecoded);
return new Encrypter("DES", getPassPhrase()).decrypt(encryptedID);
}else{
return new Encrypter("DES", getPassPhrase()).decrypt(id);
}
} catch (EncryptionException e) {
e.printStackTrace();
if(ObjectId.isValid(id))
return id;
try {
if(Base64.isBase64(id)){
byte[] valueDecoded= Base64.decodeBase64(id);
String encryptedID = new String(valueDecoded);
return new Encrypter("DES", getPassPhrase()).decrypt(encryptedID);
}else{
return new Encrypter("DES", getPassPhrase()).decrypt(id);
}
}else {
throw new RemoteBackendException("THe backend is not mongodb, the id cannot be decrypted because it should be not crypted");
} catch (EncryptionException e) {
e.printStackTrace();
}
return null;
}
@ -1120,7 +1086,7 @@ public class ServiceEngine implements IClient {
setCurrentOperation("getRemotePath");
file=setOperationInfo(file, OPERATION.GET_REMOTE_PATH);
file.setRootPath(this.getPublicArea());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
return new RemoteResource(file, this);
}
@ -1168,7 +1134,7 @@ public class ServiceEngine implements IClient {
}
public String[] getDbNames(){
protected String[] getDbNames(){
return this.dbNames;
}
@ -1191,24 +1157,9 @@ public class ServiceEngine implements IClient {
logger.debug("get() - start");
}
setCurrentOperation("exist");
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames(), getToken());
this.service=new OperationManager(primaryBackend, user, password, getCurrentOperation(), file, backendType, getDbNames());
file=setOperationInfo(file, OPERATION.EXIST);
return new RemoteResourceBoolean(file, this);
}
public String getToken() {
return token;
}
public void setToken(String token) {
this.token = token;
}
public String getRegion() {
return region;
}
public void setRegion(String region) {
this.region = region;
}
}

View File

@ -117,7 +117,7 @@ public class ChunkConsumer implements Runnable {
synchronized(ChunkConsumer.class){
String [] randomServer=randomizeServer(server);
TransportManagerFactory tmf=new TransportManagerFactory(randomServer, null, null);
client.set(tmf.getTransport(null, Costants.CLIENT_TYPE, null, null, myFile.getWriteConcern(), myFile.getReadPreference()));
client.set(tmf.getTransport(Costants.CLIENT_TYPE, null, null, myFile.getWriteConcern(), myFile.getReadPreference()));
}
if(logger.isDebugEnabled()){
logger.debug("waiting time for upload: "

View File

@ -2,25 +2,28 @@ package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class ForceClose extends Operation{
public class Close extends Operation{
/**
* Logger for this class
*/
final Logger logger=LoggerFactory.getLogger(GetSize.class);
// public String file_separator = ServiceEngine.FILE_SEPARATOR;//System.getProperty("file.separator");
public ForceClose(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
public Close(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
try {
tm.forceClose();
tm.close();
} catch (Exception e) {
throw new RemoteBackendException(" Error in GetSize operation ", e.getCause()); }
if (logger.isDebugEnabled()) {

View File

@ -4,6 +4,7 @@ import java.net.UnknownHostException;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
@ -37,7 +38,8 @@ public abstract class Copy extends Operation{
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String id=null;
try {
// id=tm.copy(myFile, sourcePath, destinationPath);

View File

@ -5,6 +5,7 @@ import java.util.List;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
@ -40,7 +41,8 @@ public abstract class CopyDir extends Operation{
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm = getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
List<String> ids=null;
try {
// ids=tm.copyDir(myFile, sourcePath, destinationPath);
@ -52,8 +54,6 @@ public abstract class CopyDir extends Operation{
}
return ids.toString();
}
@Override

View File

@ -4,6 +4,7 @@ import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
@ -50,9 +51,8 @@ public abstract class Download extends Operation{
id=get(this, myFile, false);
} catch (Throwable e) {
// TransportManagerFactory tmf=new TransportManagerFactory(getServer(), getUser(), getPassword());
// TransportManager tm=tmf.getTransport(getBackendType(), myFile.getGcubeMemoryType(), getDbNames(), myFile.getWriteConcern(), myFile.getReadPreference());
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf=new TransportManagerFactory(getServer(), getUser(), getPassword());
TransportManager tm=tmf.getTransport(getBackendType(), myFile.getGcubeMemoryType(), getDbNames(), myFile.getWriteConcern(), myFile.getReadPreference());
tm.close();
logger.error("Problem in download from: "+myFile.getRemotePath()+": "+e.getMessage());
// e.printStackTrace();

View File

@ -4,6 +4,7 @@ import java.io.OutputStream;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.operation.DownloadOperator;
import org.slf4j.Logger;
@ -39,7 +40,8 @@ public class DownloadAndLock extends Operation {
//TODO add field for file lock
get(download,myFile, true);
} catch (Exception e) {
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf=new TransportManagerFactory(getServer(), getUser(), getPassword());
TransportManager tm=tmf.getTransport(getBackendType(), myFile.getGcubeMemoryType(), getDbNames(), myFile.getWriteConcern(), myFile.getReadPreference());
tm.close();
throw new RemoteBackendException(" Error in downloadAndLock operation ", e.getCause());
}

View File

@ -7,6 +7,7 @@ import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
@ -30,7 +31,8 @@ public abstract class DuplicateFile extends Operation {
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String id=null;
try {
// id = tm.duplicateFile(myFile, bucket);

View File

@ -8,6 +8,7 @@ import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendEx
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -30,7 +31,8 @@ public class Exist extends Operation{
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
boolean isPresent=false;
try {
isPresent = tm.exist(bucket);

View File

@ -23,7 +23,12 @@ public class FileWriter extends Thread{
final Logger logger=LoggerFactory.getLogger(FileWriter.class);
private Monitor monitor;
private int id;
// private MyFile myFile;
// private byte[] encode;
// private int offset;
// private static int len=0;
private OutputStream out;
// private String path;
private byte[] full;

View File

@ -3,7 +3,9 @@ package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
@ -21,7 +23,8 @@ public class GetFolderCount extends Operation {
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
long dim=0;
try {
dim = tm.getFolderTotalItems(bucket);

View File

@ -4,6 +4,7 @@ import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
@ -21,7 +22,8 @@ public class GetFolderSize extends Operation {
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
long dim=0;
try {
dim = tm.getFolderTotalVolume(bucket);

View File

@ -12,12 +12,6 @@ import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
/**
* this class is replaced by getHttpsUrl
* @author roberto
*
*/
@Deprecated
public class GetHttpUrl extends Operation {
// private OutputStream os;
@ -52,8 +46,7 @@ public class GetHttpUrl extends Operation {
String urlBase="smp://"+resolverHost+Costants.URL_SEPARATOR;
String urlParam="";
try {
// String id=getId(myFile.getAbsoluteRemotePath(), myFile.isForceCreation(), myFile.getGcubeMemoryType(), myFile.getWriteConcern(), myFile.getReadPreference());
String id=getId(myFile);
String id=getId(myFile.getAbsoluteRemotePath(), myFile.isForceCreation(), myFile.getGcubeMemoryType(), myFile.getWriteConcern(), myFile.getReadPreference());
String phrase=myFile.getPassPhrase();
// urlParam =new StringEncrypter("DES", phrase).encrypt(id);
urlParam = new Encrypter("DES", phrase).encrypt(id);
@ -78,11 +71,13 @@ public class GetHttpUrl extends Operation {
return httpUrl.toString();
}
@Deprecated
private String getId(String path, boolean forceCreation, MemoryType memoryType, String writeConcern, String readPreference){
String id=null;
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(tm, backendType, memoryType, dbNames, writeConcern, readPreference);
if(tm ==null){
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(backendType, memoryType, dbNames, writeConcern, readPreference);
}
try {
id = tm.getId(bucket, forceCreation);
} catch (Exception e) {
@ -94,21 +89,6 @@ public class GetHttpUrl extends Operation {
return id;
}
private String getId(MyFile myFile){
String id=null;
TransportManager tm=getTransport(myFile);
try {
id = tm.getId(bucket, myFile.isForceCreation());
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in GetUrl operation. Problem to discover remote file:"+bucket+" "+ e.getMessage(), e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return id;
}
private URL translate(URL url) throws IOException {
logger.debug("translating: "+url);
String urlString=url.toString();

View File

@ -49,7 +49,6 @@ public class GetHttpsUrl extends Operation {
String urlParam="";
try {
String id=getId(myFile.getAbsoluteRemotePath(), myFile.isForceCreation(), myFile.getGcubeMemoryType(), myFile.getWriteConcern(), myFile.getReadPreference());
// String id=getId(myFile);
String phrase=myFile.getPassPhrase();
// urlParam =new StringEncrypter("DES", phrase).encrypt(id);
urlParam = new Encrypter("DES", phrase).encrypt(id);
@ -74,25 +73,12 @@ public class GetHttpsUrl extends Operation {
return httpsUrl.toString();
}
private String getId(MyFile myFile){
String id=null;
TransportManager tm=getTransport(myFile);
try {
id = tm.getId(bucket, myFile.isForceCreation());
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in GetUrl operation. Problem to discover remote file:"+bucket+" "+ e.getMessage(), e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return id;
}
@Deprecated
private String getId(String path, boolean forceCreation, MemoryType memoryType, String writeConcern, String readPreference){
String id=null;
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(tm, backendType, memoryType, dbNames, writeConcern, readPreference);
if(tm ==null){
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(backendType, memoryType, dbNames, writeConcern, readPreference);
}
try {
id = tm.getId(bucket, forceCreation);
} catch (Exception e) {

View File

@ -4,6 +4,7 @@ import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -30,12 +31,13 @@ public class GetMetaFile extends Operation{
*
*/
public MyFile doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
long dim=0;
String id=null;
String mime=null;
try {
dim = tm.getSize(bucket, myFile);
dim = tm.getSize(bucket);
id=tm.getId(bucket, false);
mime=tm.getFileProperty(bucket, "mimetype");
myFile.setOwner(tm.getFileProperty(bucket, "owner"));

View File

@ -4,6 +4,7 @@ import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -20,7 +21,8 @@ public class GetMetaInfo extends Operation {
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String value=null;
try {
value=tm.getFileProperty(bucket, myFile.getGenericPropertyField());

View File

@ -3,6 +3,7 @@ package org.gcube.contentmanagement.blobstorage.service.operation;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -20,7 +21,8 @@ public class GetRemotePath extends Operation{
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String path=null;
try {
path = tm.getRemotePath(bucket);

View File

@ -4,6 +4,7 @@ import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -27,10 +28,11 @@ public class GetSize extends Operation{
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
long dim=0;
try {
dim = tm.getSize(bucket, myFile);
dim = tm.getSize(bucket);
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in GetSize operation ", e.getCause()); }

View File

@ -4,6 +4,7 @@ import java.io.OutputStream;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -35,7 +36,8 @@ public class GetTTL extends Operation {
TransportManager tm=null;
try {
//aggiungere field per il lock del file
tm=getTransport(myFile);
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
currentTTL=tm.getTTL(bucket);
} catch (Exception e) {
tm.close();

View File

@ -9,12 +9,8 @@ import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
/**
* this class is replaced by getHttpsUrl
* @author roberto
*
*/
@Deprecated
public class GetUrl extends Operation{
// private OutputStream os;
@ -44,10 +40,11 @@ public class GetUrl extends Operation{
String urlBase="smp://"+resolverHost+Costants.URL_SEPARATOR;
String urlParam="";
try {
// String id=getId(myFile.getAbsoluteRemotePath(), myFile.isForceCreation(), myFile.getGcubeMemoryType(), myFile.getWriteConcern(), myFile.getReadPreference());
String id=getId(myFile);
String id=getId(myFile.getAbsoluteRemotePath(), myFile.isForceCreation(), myFile.getGcubeMemoryType(), myFile.getWriteConcern(), myFile.getReadPreference());
String phrase=myFile.getPassPhrase();
// urlParam =new StringEncrypter("DES", phrase).encrypt(id);
urlParam = new Encrypter("DES", phrase).encrypt(id);
// String urlEncoded=URLEncoder.encode(urlParam, "UTF-8");
} catch (EncryptionException e) {
throw new RemoteBackendException(" Error in getUrl operation problem to encrypt the string", e.getCause());
}
@ -59,11 +56,12 @@ public class GetUrl extends Operation{
return url;
}
@Deprecated
private String getId(String path, boolean forceCreation, MemoryType memoryType, String writeConcern, String readPreference){
String id=null;
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(tm, backendType, memoryType, dbNames, writeConcern, readPreference);
if(tm ==null){
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(backendType, memoryType, dbNames, writeConcern, readPreference);
}
try {
id = tm.getId(bucket, forceCreation);
} catch (Exception e) {
@ -75,18 +73,4 @@ public class GetUrl extends Operation{
return id;
}
private String getId(MyFile myFile){
String id=null;
TransportManager tm=getTransport(myFile);
try {
id = tm.getId(bucket, myFile.isForceCreation());
} catch (Exception e) {
tm.close();
throw new RemoteBackendException(" Error in GetUrl operation. Problem to discover remote file:"+bucket+" "+ e.getMessage(), e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}
return id;
}
}

View File

@ -4,6 +4,7 @@ import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
@ -19,7 +20,8 @@ public class GetUserTotalItems extends Operation {
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String dim=null;
logger.info("check user total items for user: "+getOwner()+ " user is "+user);
try {

View File

@ -4,6 +4,7 @@ import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
@ -12,13 +13,15 @@ import org.slf4j.LoggerFactory;
public class GetUserTotalVolume extends Operation {
final Logger logger=LoggerFactory.getLogger(GetUserTotalVolume.class);
// public String file_separator = ServiceEngine.FILE_SEPARATOR;//System.getProperty("file.separator");
public GetUserTotalVolume(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String dim=null;
logger.info("check user total volume for user: "+getOwner()+ " user is "+user);
try {

View File

@ -5,6 +5,7 @@ import java.net.UnknownHostException;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
@ -36,7 +37,8 @@ public abstract class Link extends Operation{
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String id=null;
try {
id=tm.link(this);

View File

@ -44,7 +44,8 @@ public abstract class Lock extends Operation {
Download download = new DownloadOperator(getServer(), getUser(), getPassword(), getBucket(), getMonitor(), isChunk(), getBackendType(), getDbNames());
unlockKey=get(download, myFile, true);
} catch (Exception e) {
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf=new TransportManagerFactory(getServer(), getUser(), getPassword());
TransportManager tm=tmf.getTransport(getBackendType(), myFile.getGcubeMemoryType(), getDbNames(), myFile.getWriteConcern(), myFile.getReadPreference());
tm.close();
throw new RemoteBackendException(" Error in lock operation ", e.getCause());
}

View File

@ -1,11 +1,14 @@
package org.gcube.contentmanagement.blobstorage.service.operation;
import java.io.OutputStream;
import java.net.UnknownHostException;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.DirectoryBucket;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
@ -37,9 +40,11 @@ public abstract class Move extends Operation{
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String id=null;
try {
// id=tm.move(myFile, sourcePath, destinationPath);
id=tm.move(this);
} catch (UnknownHostException e) {
tm.close();

View File

@ -7,6 +7,7 @@ import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
@ -38,7 +39,8 @@ public abstract class MoveDir extends Operation{
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
List<String>ids=null;
try {
ids=tm.moveDir(this);

View File

@ -3,6 +3,7 @@ package org.gcube.contentmanagement.blobstorage.service.operation;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
@ -39,7 +40,6 @@ public abstract class Operation {
private Monitor monitor;
private boolean isChunk;
String backendType;
protected static TransportManager transport;
public Operation(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs){
this.server=server;
@ -159,7 +159,8 @@ public abstract class Operation {
}else{
if(logger.isDebugEnabled())
logger.debug("NO THREAD POOL USED");
TransportManager tm=getTransport(resource);
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, resource.getGcubeMemoryType(), dbNames, resource.getWriteConcern(), resource.getReadPreference());
String objectId=tm.uploadManager(upload, resource, bucket, bucket+"_1", replaceOption);
return objectId;
}
@ -176,7 +177,12 @@ public abstract class Operation {
logger.debug("get(String) - start");
}
String unlocKey=null;
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf=null;
// if(server.length >1)
tmf=new TransportManagerFactory(server, user, password);
// else
// tmf=new TransportManagerFactory(server, null, null);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
long start=System.currentTimeMillis();
String path=myFile.getLocalPath();
if(!Costants.CLIENT_TYPE.equalsIgnoreCase("mongo")){
@ -374,10 +380,6 @@ public abstract class Operation {
this.user = user;
}
protected TransportManager getTransport(MyFile myFile) {
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
transport=tmf.getTransport(transport, backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
return transport;
}
}
}

View File

@ -25,9 +25,7 @@ public class OperationFactory {
Monitor monitor;
boolean isChunk;
private String backendType;
private String token;
public OperationFactory(String server[], String user, String pwd, String bucket, Monitor monitor2, boolean isChunk, String backendType, String[] dbs){
this.server=server;
this.user=user;
@ -52,8 +50,6 @@ public class OperationFactory {
op=new Remove(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getSize")){
op=new GetSize(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("forceclose")){
op=new ForceClose(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("duplicate")){
op=new DuplicateOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("softcopy")){
@ -115,12 +111,4 @@ public class OperationFactory {
return op;
}
public String getToken() {
return token;
}
public void setToken(String token) {
this.token = token;
}
}

View File

@ -32,8 +32,7 @@ public class OperationManager {
private String[] dbNames;
public OperationManager(String[] server, String user, String password, String operation, MyFile myFile, String backendType, String[] dbs, String token){
public OperationManager(String[] server, String user, String password, String operation, MyFile myFile, String backendType, String[] dbs){
this.setServer(server);
this.setUser(user);
this.setPassword(password);
@ -42,7 +41,6 @@ public class OperationManager {
this.setTypeOperation(operation);
this.setDbNames(dbs);
this.backendType=backendType;
}
public Object startOperation(MyFile file, String remotePath, String author, String[] server, boolean chunkOpt, String rootArea, boolean replaceOption) throws RemoteBackendException{
@ -146,6 +144,5 @@ public class OperationManager {
this.dbNames = dbNames;
}
}

View File

@ -3,6 +3,7 @@ package org.gcube.contentmanagement.blobstorage.service.operation;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
@ -24,7 +25,8 @@ public class Remove extends Operation{
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
removeBucket(tm, bucket, myFile);
if (logger.isDebugEnabled()) {
logger.debug(" REMOVE " + bucket);

View File

@ -5,6 +5,7 @@ import java.io.OutputStream;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -29,7 +30,8 @@ public class RenewTTL extends Operation {
@Override
public String doIt(MyFile myFile) throws RemoteBackendException {
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
long ttl=-1;
try {
myFile.setRemotePath(bucket);

View File

@ -4,6 +4,7 @@ import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -20,14 +21,14 @@ public class SetMetaInfo extends Operation {
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
try {
tm.setFileProperty(bucket, myFile.getGenericPropertyField(), myFile.getGenericPropertyValue());
} catch (Exception e) {
tm.close();
e.printStackTrace();
logger.error("Problem setting file property", e);
throw new RemoteBackendException(" Error in SetMetaInfo operation ", e); }
throw new RemoteBackendException(" Error in SetMetaInfo operation ", e.getCause()); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}

View File

@ -5,9 +5,11 @@ package org.gcube.contentmanagement.blobstorage.service.operation;
import java.net.UnknownHostException;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.slf4j.Logger;
@ -33,6 +35,21 @@ public abstract class SoftCopy extends Operation {
}
public String initOperation(MyFile file, String remotePath, String author, String[] server, String rootArea, boolean replaceOption) {
// if(remotePath != null){
// boolean isId=ObjectId.isValid(remotePath);
// setResource(file);
// if(!isId){
//// String[] dirs= remotePath.split(file_separator);
// if(logger.isDebugEnabled())
// logger.debug("remotePath: "+remotePath);
// String buck=null;
// buck = new BucketCoding().bucketFileCoding(remotePath, rootArea);
// return bucket=buck;
// }else{
// return bucket=remotePath;
// }
// }return bucket=null;//else throw new RemoteBackendException("argument cannot be null");
this.sourcePath=file.getLocalPath();
this.destinationPath=remotePath;
sourcePath = new BucketCoding().bucketFileCoding(file.getLocalPath(), rootArea);
@ -43,7 +60,8 @@ public abstract class SoftCopy extends Operation {
}
public String doIt(MyFile myFile) throws RemoteBackendException{
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
String id=null;
try {
id=tm.softCopy(this);
@ -65,6 +83,20 @@ public abstract class SoftCopy extends Operation {
destinationPath = new BucketCoding().bucketFileCoding(resource.getRemotePath(), rootArea);
setResource(resource);
return bucket=destinationPath;
// if(remotePath != null){
// boolean isId=ObjectId.isValid(remotePath);
// setResource(resource);
// if(!isId){
//// String[] dirs= remotePath.split(file_separator);
// if(logger.isDebugEnabled())
// logger.debug("remotePath: "+remotePath);
// String buck=null;
// buck = new BucketCoding().bucketFileCoding(remotePath, rootArea);
// return bucket=buck;
// }else{
// return bucket=remotePath;
// }
// }return bucket=null;//else throw new RemoteBackendException("argument cannot be null");
}
public abstract String execute(MongoIOManager mongoPrimaryInstance, MyFile resource, String sourcePath, String destinationPath) throws UnknownHostException;

View File

@ -5,6 +5,7 @@ import java.io.OutputStream;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.operation.UploadOperator;
@ -42,7 +43,8 @@ public abstract class Unlock extends Operation {
//inserire parametro per il lock
objectId=put(upload, myFile, isChunk(), false, false, true);
} catch (Exception e) {
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
tm.close();
throw new RemoteBackendException(" Error in unlock operation ", e.getCause());
}

View File

@ -7,6 +7,7 @@ import java.io.OutputStream;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.service.directoryOperation.BucketCoding;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.TransportManagerFactory;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
@ -48,8 +49,8 @@ public abstract class Upload extends Operation {
try {
objectId=put(this, myFile, isChunk(), false, replaceOption, false);
} catch (Throwable e) {
e.printStackTrace();
TransportManager tm=getTransport(myFile);
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
tm.close();
logger.error("Problem in upload from: "+myFile.getLocalPath()+": "+e.getMessage());
throw new RemoteBackendException(" Error in upload operation ", e.getCause());

View File

@ -33,10 +33,9 @@ public class UploadAndUnlock extends Operation {
objectId=put(upload, myFile, isChunk(), false, false, true);
} catch (Exception e) {
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(transport, backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
tm.close();
throw new RemoteBackendException(" Error in uploadAndUnlock operation ", e);
}
throw new RemoteBackendException(" Error in uploadAndUnlock operation ", e.getCause()); }
return objectId;
}

View File

@ -0,0 +1,28 @@
package org.gcube.contentmanagement.blobstorage.test;
import java.util.List;
import org.gcube.contentmanagement.blobstorage.service.IClient;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendException;
import org.gcube.contentmanagement.blobstorage.resource.StorageObject;
public class SimpleTest2 {
public static void main(String[] args) throws RemoteBackendException{
String[] server=new String[]{"146.48.123.73","146.48.123.74" };
IClient client=new ServiceEngine(server, "rcirillo", "cnr", "private", "rcirillo");
// String localFile="/home/rcirillo/FilePerTest/CostaRica.jpg";
String remoteFile="/img/shared9.jpg";
String newFile="/home/rcirillo/FilePerTest/repl4.jpg";
client.get().LFile(newFile).RFile(remoteFile);
List<StorageObject> list=client.showDir().RDir("/img/");
for(StorageObject obj : list){
System.out.println("obj found: "+obj.getName());
}
String uri=client.getUrl().RFile(remoteFile);
System.out.println(" uri file: "+uri);
}
}

View File

@ -23,7 +23,7 @@ import com.mongodb.MongoException;
public abstract class TransportManager {
protected MemoryType memoryType;
/**
* This method specifies the type of the backend for dynamic loading
* For mongoDB, default backend, the name is MongoDB
@ -36,7 +36,6 @@ public abstract class TransportManager {
* @param server array that contains ip of backend server
* @param pass
* @param user
* @param token api token if is required by backend
*/
public abstract void initBackend(String[] server, String user, String pass, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern);
@ -156,11 +155,10 @@ public abstract class TransportManager {
/**
* get the size of the remote file
* @param bucket identifies the remote file path
* @param myFile the file wrapper
* @return the size of the remote file
* @throws UnknownHostException
*/
public abstract long getSize(String bucket, MyFile myFile);
public abstract long getSize(String bucket);
/**
* lock a remote file
@ -326,8 +324,6 @@ public abstract class TransportManager {
public abstract String getField(String remoteIdentifier, String fieldName) throws UnknownHostException ;
public abstract void close();
public abstract void forceClose();
public abstract void setFileProperty(String remotePath, String propertyField, String propertyValue);

View File

@ -4,12 +4,10 @@ package org.gcube.contentmanagement.blobstorage.transport;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Objects;
import java.util.ServiceLoader;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@ -27,12 +25,9 @@ public class TransportManagerFactory {
// private static final Logger logger = Logger.getLogger(OperationFactory.class);
final Logger logger = LoggerFactory.getLogger(TransportManagerFactory.class);
// TerrastoreClient client;
private String[] server;
private String user;
private String password;
private MemoryType memoryType;
private String dbNames;
TransportManager transport;
String[] server;
String user;
String password;
public TransportManagerFactory(String server[], String user, String password){
this.server=server;
@ -40,37 +35,25 @@ public class TransportManagerFactory {
this.password=password;
}
public TransportManager getTransport(TransportManager tm, String backendType, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern){
public TransportManager getTransport(String backendType, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern){
if (logger.isDebugEnabled()) {
logger.debug("getOperation(String) - start");
}
if(logger.isDebugEnabled() && (!Objects.isNull(transport)))
logger.debug("transportLayer with "+transport.memoryType+" already instatiated. New memoryType request is "+memoryType);
// if we haven't any transport layer instantiated or the transport layer is istantiated on another memory type (persistent, volatile),
// then a new transport layer is needed
if(Objects.isNull(tm) || Objects.isNull(tm.memoryType) || (!tm.memoryType.equals(memoryType))) {
logger.info("new transport layer instantiated for "+memoryType+" memory");
return load(backendType, memoryType, dbNames, writeConcern, readConcern);
}else {
logger.debug("new transport layer not instantiated.");
}
return tm;
return load(backendType, memoryType, dbNames, writeConcern, readConcern);
}
private TransportManager load(String backendType, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern){
ServiceLoader<TransportManager> loader = ServiceLoader.load(TransportManager.class);
Iterator<TransportManager> iterator = loader.iterator();
List<TransportManager> impls = new ArrayList<TransportManager>();
logger.info("Try to load the backend...");
logger.info("the specified backend passed as input param is "+backendType);
while(iterator.hasNext())
impls.add(iterator.next());
int implementationCounted=impls.size();
// System.out.println("size: "+implementationCounted);
if((implementationCounted==0) || backendType.equals(Costants.DEFAULT_TRANSPORT_MANAGER)){
if(implementationCounted==0){
logger.info(" 0 implementation found. Load default implementation of TransportManager");
return new MongoOperationManager(server, user, password, memoryType, dbNames, writeConcern, readConcern);
}else if((implementationCounted==1) && Objects.isNull(backendType)){
}else if(implementationCounted==1){
TransportManager tm = impls.get(0);
logger.info("1 implementation of TransportManager found. Load it. "+tm.getName());
tm.initBackend(server, user, password, memoryType, dbNames, writeConcern, readConcern);
@ -81,7 +64,6 @@ public class TransportManagerFactory {
for(TransportManager tm : impls){
if(tm.getName().equalsIgnoreCase(backendType)){
logger.info("Found implementation "+backendType);
tm.initBackend(server, user, password, memoryType, dbNames, writeConcern, readConcern);
return tm;
}
}

View File

@ -16,6 +16,8 @@ import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.service.operation.Operation;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.DateUtils;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.MongoInputStream;
@ -117,7 +119,7 @@ public class MongoIOManager {
logger.error("Problem to open the DB connection for gridfs file ");
throw new RemoteBackendException("Problem to open the DB connection: "+ e.getMessage());
}
logger.info("mongo connection ready");
logger.info("new mongo connection pool opened");
}
return db;
@ -308,7 +310,6 @@ public class MongoIOManager {
updateCommonFields(f, resource, OPERATION.REMOVE);
// check if the file is linked
if((f!=null) && (f.containsField(Costants.COUNT_IDENTIFIER)) && (f.get(Costants.COUNT_IDENTIFIER) != null)){
logger.debug("RemovingObject: the following object "+idToRemove+" contains a COUNT field");
// this field is only added for reporting tool: storage-manager-trigger
String filename=(String)f.get("filename");
f.put("onScope", filename);
@ -321,7 +322,6 @@ public class MongoIOManager {
// check if the file is a link
}else if((f.containsField(Costants.LINK_IDENTIFIER)) && (f.get(Costants.LINK_IDENTIFIER) != null )){
while((f!=null) && (f.containsField(Costants.LINK_IDENTIFIER)) && (f.get(Costants.LINK_IDENTIFIER) != null )){
logger.debug("RemovingObject: the following object "+idToRemove+" contains a LINK field");
// remove f and decrement linkCount field on linked object
String id=(String)f.get(Costants.LINK_IDENTIFIER);
GridFSDBFile fLink=findGFSCollectionObject(new ObjectId(id));
@ -547,13 +547,10 @@ public class MongoIOManager {
destinationFile.put("creationTime", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z"));
}
public DBObject setGenericMoveProperties(MyFile resource, String filename, String dir,
String name, DBObject sourcePathMetaCollection) {
sourcePathMetaCollection.put("filename", filename);
sourcePathMetaCollection.put("type", "file");
sourcePathMetaCollection.put("name", name);
sourcePathMetaCollection.put("dir", dir);
return sourcePathMetaCollection;
public BasicDBObject setGenericMoveProperties(MyFile resource, String filename, String dir,
String name, BasicDBObject f) {
f.append("filename", filename).append("type", "file").append("name", name).append("dir", dir);
return f;
}
@ -669,10 +666,6 @@ public class MongoIOManager {
f=null;
}
}
if (f==null) {
logger.warn("The objectID is not present. Going to abort the current operation");
throw new RemoteBackendException("Object id "+serverLocation+" not found.");
}
// if the remote identifier is not a specified as ID, try to check if it is a valid remote path
// in this case the remote identifier is a valid objectID but it indicates a path
}else if ((remoteResourceIdentifier != null) && (!(remoteResourceIdentifier.equals(REMOTE_RESOURCE.ID))) && (f==null)){
@ -785,10 +778,10 @@ public class MongoIOManager {
return list;
}
public DBObject findMetaCollectionObject(String source) throws UnknownHostException {
public BasicDBObject findMetaCollectionObject(String source) throws UnknownHostException {
DBCollection fileCollection=getConnectionDB(dbName, false).getCollection(Costants.DEFAULT_META_COLLECTION);
BasicDBObject query = new BasicDBObject();
DBObject obj=null;
BasicDBObject obj=null;
query.put( "filename" ,source);
DBCursor cursor=fileCollection.find(query);
if(cursor != null && !cursor.hasNext()){
@ -797,7 +790,7 @@ public class MongoIOManager {
cursor=fileCollection.find(query);
}
if(cursor.hasNext()){
obj=(DBObject) cursor.next();
obj=(BasicDBObject) cursor.next();
String path=(String)obj.get("filename");
logger.debug("path found "+path);
}
@ -1055,11 +1048,11 @@ public class MongoIOManager {
* the old close method
*/
protected void clean() {
// if(mongo!=null)
// mongo.close();
// mongo=null;
// if(db!=null)
// db=null;
if(mongo!=null)
mongo.close();
mongo=null;
if(db!=null)
db=null;
}
/**
@ -1069,22 +1062,12 @@ public class MongoIOManager {
*/
public void close() {
// if(mongo!=null)
// mongo.close();
logger.debug(" cleaning mongo objects");
// logger.info("Mongo has been closed");
// mongo=null;
gfs=null;
db=null;
}
public void forceClose() {
if(mongo!=null)
mongo.close();
logger.info("Mongo pool closed");
close();
logger.info("Mongo has been closed");
mongo=null;
gfs=null;
db=null;
}
public void removeGFSFile(GridFSDBFile f, ObjectId idF){

View File

@ -1,4 +1,4 @@
package org.gcube.contentmanagement.blobstorage.transport.backend;
package org.gcube.contentmanagement.blobstorage.transport.backend;
import org.bson.types.ObjectId;
@ -11,11 +11,11 @@ import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.service.operation.*;
import org.gcube.contentmanagement.blobstorage.transport.TransportManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
@ -26,7 +26,6 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.BasicDBObject;
import com.mongodb.DBCollection;
import com.mongodb.DBObject;
import com.mongodb.MongoException;
import com.mongodb.gridfs.GridFS;
import com.mongodb.gridfs.GridFSDBFile;
@ -56,10 +55,8 @@ public class MongoOperationManager extends TransportManager{
@Override
public void initBackend(String[] server, String user, String pass, MemoryType memoryType , String[] dbNames, String writeConcern, String readConcern) {
logger.debug("init storage backend with "+memoryType+" memory");
try {
this.memoryType=memoryType;
super.memoryType=memoryType;
MongoOperationManager.dbNames=dbNames;
logger.debug("check mongo configuration");
if (dbNames!=null){
@ -133,13 +130,6 @@ public class MongoOperationManager extends TransportManager{
// mongoSecondaryInstance.close();
}
public void forceClose() {
if(Objects.nonNull(mongoPrimaryInstance))
mongoPrimaryInstance.forceClose();
if(Objects.nonNull(mongoSecondaryInstance))
mongoSecondaryInstance.forceClose();
}
/**
* Unlock the object specified, this method accept the key field for the unlock operation
* @throws FileNotFoundException
@ -300,7 +290,7 @@ public class MongoOperationManager extends TransportManager{
}
@Override
public long getSize(String remotePath, MyFile file){
public long getSize(String remotePath){
long length=-1;
if(logger.isDebugEnabled())
logger.debug("MongoDB - get Size for pathServer: "+remotePath);
@ -466,7 +456,7 @@ public class MongoOperationManager extends TransportManager{
*/
private void updateMetaObject(String remoteIdentifier, String propertyField, String propertyValue)
throws UnknownHostException {
DBObject remoteMetaCollectionObject;
BasicDBObject remoteMetaCollectionObject;
logger.debug("find object...");
remoteMetaCollectionObject = mongoPrimaryInstance.findMetaCollectionObject(remoteIdentifier);
if(remoteMetaCollectionObject!=null){

View File

@ -10,6 +10,7 @@ import java.util.List;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.service.operation.CopyDir;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager;

View File

@ -12,6 +12,7 @@ import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPER
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.service.operation.Link;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.service.operation.Operation;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.util.Costants;
import org.slf4j.Logger;

View File

@ -8,6 +8,7 @@ import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.service.operation.Download;
import org.gcube.contentmanagement.blobstorage.service.operation.Lock;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;

View File

@ -11,6 +11,7 @@ import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.service.operation.MoveDir;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager;

View File

@ -9,6 +9,7 @@ import java.net.UnknownHostException;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
import org.gcube.contentmanagement.blobstorage.resource.MyFile;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.service.impl.ServiceEngine;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.service.operation.Move;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager;
@ -64,7 +65,7 @@ public class MoveOperator extends Move {
logger.info("move operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination);
logger.debug("MOVE OPERATION operation defined: "+resource.getOperationDefinition().getOperation());
if((source != null) && (!source.isEmpty()) && (destination != null) && (!destination.isEmpty())){
DBObject sourcePathMetaCollection = mongoPrimaryInstance.findMetaCollectionObject(source);
BasicDBObject sourcePathMetaCollection = mongoPrimaryInstance.findMetaCollectionObject(source);
//check if the file exist in the destination path, if it exist then it will be deleted
if(sourcePathMetaCollection != null){
sourceId=sourcePathMetaCollection.get("_id").toString();
@ -174,7 +175,7 @@ public class MoveOperator extends Move {
}
private DBObject setCommonFields(DBObject sourcePathMetaCollection, MyFile resource, OPERATION op) {
private BasicDBObject setCommonFields(BasicDBObject f, MyFile resource, OPERATION op) {
String owner=resource.getOwner();
if(op == null){
op=resource.getOperationDefinition().getOperation();
@ -187,23 +188,14 @@ public class MoveOperator extends Move {
String address=null;
try {
address=InetAddress.getLocalHost().getCanonicalHostName().toString();
sourcePathMetaCollection.put("callerIP", address);
f.put("callerIP", address);
} catch (UnknownHostException e) { }
if(from == null) {
sourcePathMetaCollection.put("lastAccess", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z"));
sourcePathMetaCollection.put("lastUser", owner);
sourcePathMetaCollection.put("lastOperation", op.toString());
sourcePathMetaCollection.put("callerIP", address);
}else {
sourcePathMetaCollection.put("lastAccess", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z"));
sourcePathMetaCollection.put("lastUser", owner);
sourcePathMetaCollection.put("lastOperation", op.toString());
sourcePathMetaCollection.put("callerIP", address);
sourcePathMetaCollection.put("from", from);
}
return sourcePathMetaCollection;
if(from == null)
f.append("lastAccess", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z")).append("lastUser", owner).append("lastOperation", op.toString()).append("callerIP", address);
else
f.append("lastAccess", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z")).append("lastUser", owner).append("lastOperation", op.toString()).append("callerIP", address).append("from", from);
return f;
}
}

View File

@ -5,7 +5,6 @@ package org.gcube.contentmanagement.blobstorage.transport.backend.operation;
import java.io.InputStream;
import java.net.UnknownHostException;
import java.util.Objects;
import org.bson.types.ObjectId;
import org.gcube.contentmanagement.blobstorage.resource.MemoryType;
@ -14,6 +13,7 @@ import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.LOCA
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPERATION;
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.service.operation.Operation;
import org.gcube.contentmanagement.blobstorage.service.operation.SoftCopy;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoOperationManager;
@ -26,7 +26,6 @@ import org.slf4j.LoggerFactory;
import com.mongodb.BasicDBObject;
import com.mongodb.DBCollection;
import com.mongodb.DBObject;
import com.mongodb.DuplicateKeyException;
import com.mongodb.gridfs.GridFSDBFile;
/**
@ -86,7 +85,6 @@ public class SoftCopyOperator extends SoftCopy {
// if it contains a link field, then I'm going to retrieve the related payload
sourceObject = mongoPrimaryInstance.retrieveLinkPayload(sourceObject);
ObjectId sourceId=(ObjectId)sourceObject.getId();
logger.debug("source id is "+sourceId);
InputStream is= sourceObject.getInputStream();
resource.setInputStream(is);
GridFSDBFile dest = null;
@ -105,18 +103,11 @@ public class SoftCopyOperator extends SoftCopy {
ObjectId removedId=null;
// if the destination location is not empty
if (dest != null){
String destId=dest.getId().toString();
logger.debug("destination id is "+destId);
// in this case the source and dest are the same object
if(sourceId.toString().equals(destId)) {
logger.info("source and destination are pointing to the same object. The copy operation will have no effects");
return destId;
}
// remove the destination file. The third parameter to true replace the file otherwise the remote id is returned
if(resource.isReplace()){
removedId = mongoPrimaryInstance.removeFile(resource, null, resource.isReplace(), null, dest);
}else{
return destId;
return dest.getId().toString();
}
}
// get metacollection instance
@ -126,7 +117,7 @@ public class SoftCopyOperator extends SoftCopy {
ObjectId md5Id=getDuplicatesMap(md5);
// check if the source object is already a map
if(isMap(sourceObject)){
logger.debug("the sourceObject with the following id: "+sourceId+" is already a map");
logger.debug("the sourceObject with the following id: "+mapId+" is already a map");
mapId=sourceId;
// then it's needed to add only the destObject to the map
//first: create link object to destination place
@ -217,10 +208,10 @@ public class SoftCopyOperator extends SoftCopy {
ObjectId id=null;
if(newId == null){
id=new ObjectId();
logger.debug("generated id for new object link "+id);
logger.debug("generated id for new object link"+id);
}else{
id=newId;
logger.debug("restored id for new object link "+id);
logger.debug("restored id for new object link"+id);
}
document.put("_id", id);
@ -234,20 +225,8 @@ public class SoftCopyOperator extends SoftCopy {
document.put("length", sourceObject.getLength());
// set chunkSize inherited from original object
document.put("chunkSize", sourceObject.getChunkSize());
try {
metaCollectionInstance.insert(document);
metaCollectionInstance.save(document);
}catch (DuplicateKeyException e) {
logger.warn("key already present or not completely removed. Wait few seconds and retry");
try {
Thread.sleep(2000);
} catch (InterruptedException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
metaCollectionInstance.insert(document);
metaCollectionInstance.save(document);
}
metaCollectionInstance.insert(document);
metaCollectionInstance.save(document);
return document;
}
@ -274,21 +253,11 @@ public class SoftCopyOperator extends SoftCopy {
searchQuery.put("_id" ,mapId);
DBObject mapObject=mongoPrimaryInstance.findCollectionObject(metaCollectionInstance, searchQuery);
// BasicDBObject updateObject= new BasicDBObject().append("$inc",new BasicDBObject().append("count", 1));;
if(!Objects.isNull(mapObject)) {
Object counting=mapObject.get("count");
if(Objects.nonNull(counting)) {
int count=(int)counting;
count++;
mapObject.put("count", count);
}else {
mapObject.put("count", 1);
}
// metaCollectionInstance.update(mapObject, updateObject);
metaCollectionInstance.save(mapObject);
}else {
logger.error("no object found associated to the following id: "+mapId);
}
int count=(int)mapObject.get("count");
count++;
mapObject.put("count", count);
// metaCollectionInstance.update(mapObject, updateObject);
metaCollectionInstance.save(mapObject);
}
private ObjectId getDuplicatesMap(String md5){
@ -302,11 +271,8 @@ public class SoftCopyOperator extends SoftCopy {
*/
private boolean isMap(GridFSDBFile sourceObject) {
String type=sourceObject.get("type").toString();
logger.debug("object type: "+type);
if(type.equals("map")) {
logger.debug("sourceFile is a map: "+sourceObject.toString());
if(type.equals("map"))
return true;
}
return false;
}

View File

@ -11,6 +11,7 @@ import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.OPER
import org.gcube.contentmanagement.blobstorage.resource.OperationDefinition.REMOTE_RESOURCE;
import org.gcube.contentmanagement.blobstorage.service.operation.Monitor;
import org.gcube.contentmanagement.blobstorage.service.operation.Unlock;
import org.gcube.contentmanagement.blobstorage.service.operation.Upload;
import org.gcube.contentmanagement.blobstorage.transport.backend.MongoIOManager;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

View File

@ -31,8 +31,8 @@ public class MongoInputStream extends ProxyInputStream{
} catch (IOException e) {
e.printStackTrace();
}
// if (mongo!=null)
// mongo.close();
if (mongo!=null)
mongo.close();
setClosed(true);
}
}

View File

@ -66,7 +66,7 @@ public class MongoOutputStream extends ProxyOutputStream {
// TODO Auto-generated catch block
e.printStackTrace();
}
// mongo.close();
mongo.close();
setClosed(true);
}
}

25
target/profile.xml Normal file
View File

@ -0,0 +1,25 @@
<?xml version="1.0" encoding="UTF-8"?>
<Resource xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ID />
<Type>Service</Type>
<Profile>
<Description>${description}</Description>
<Class>ContentManagement</Class>
<Name>storage-manager-core</Name>
<Version>1.0.0</Version>
<Packages>
<Software>
<Name>storage-manager-core</Name>
<Version>2.9.0-SNAPSHOT</Version>
<MavenCoordinates>
<groupId>org.gcube.contentmanagement</groupId>
<artifactId>storage-manager-core</artifactId>
<version>2.9.0-SNAPSHOT</version>
</MavenCoordinates>
<Files>
<File>storage-manager-core-2.9.0-SNAPSHOT.jar</File>
</Files>
</Software>
</Packages>
</Profile>
</Resource>