merge master into branch 3.1.0-SNAP

This commit is contained in:
Roberto Cirillo 2021-08-04 09:49:10 +02:00
commit fbfa3a7667
26 changed files with 124 additions and 130 deletions

View File

@ -6,23 +6,14 @@
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry excluding="**" kind="src" output="target/classes" path="src/main/resources">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="src" output="target/test-classes" path="src/test/java">
<attributes>
<attribute name="optional" value="true"/>
<attribute name="maven.pomderived" value="true"/>
<attribute name="test" value="true"/>
</attributes>
</classpathentry>
<classpathentry excluding="**" kind="src" output="target/test-classes" path="src/test/resources">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>
</classpathentry>
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.7">
<classpathentry kind="con" path="org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8">
<attributes>
<attribute name="maven.pomderived" value="true"/>
</attributes>

1
.gitignore vendored Normal file
View File

@ -0,0 +1 @@
/target/

View File

@ -1,12 +1,13 @@
eclipse.preferences.version=1
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.7
org.eclipse.jdt.core.compiler.codegen.targetPlatform=1.8
org.eclipse.jdt.core.compiler.codegen.unusedLocal=preserve
org.eclipse.jdt.core.compiler.compliance=1.7
org.eclipse.jdt.core.compiler.compliance=1.8
org.eclipse.jdt.core.compiler.debug.lineNumber=generate
org.eclipse.jdt.core.compiler.debug.localVariable=generate
org.eclipse.jdt.core.compiler.debug.sourceFile=generate
org.eclipse.jdt.core.compiler.problem.assertIdentifier=error
org.eclipse.jdt.core.compiler.problem.enumIdentifier=error
org.eclipse.jdt.core.compiler.problem.forbiddenReference=warning
org.eclipse.jdt.core.compiler.source=1.7
org.eclipse.jdt.core.compiler.release=disabled
org.eclipse.jdt.core.compiler.source=1.8

View File

@ -1,17 +1,19 @@
# Changelog for storage-manager-core
## [v3.1.0-SNAPSHOT]
* upgrade mongo-java-client to version 3.12.0
* removed close method for mongo client. Now the connection pool is managed by java driver
* deprecated getUrl method
## [v2.13.0-SNAPSHOT]
* add close operation on IClient interface
## [v2.12.1-SNAPSHOT]
* add check on transport layer instance: if the memory type is not the same, a new transportLayer is instatiated
* move memoryType var from super class TransportManager
* convert BasicDBObject to DBObject the return type used for metadata collections
## [v3.0.1-SNAPSHOT]
* deprecated Http methods used for returning http url
## [v2.12.0-SNAPSHOT]
* One pool for every operation: static Operation class; no mongo close operation
## [v3.0.0-SNAPSHOT]
* added token and region parameters in order to be compliant with s3 object storage
* refactoring code
## [v2.10.0-SNAPSHOT]
* upgrade mongo-java-driver to 3.12.0
## [v2.11.0-SNAPSHOT]
* upgrade mongo-java-driver to 3.12.0

View File

@ -9,6 +9,9 @@
<groupId>org.gcube.contentmanagement</groupId>
<artifactId>storage-manager-core</artifactId>
<version>3.1.0-SNAPSHOT</version>
<!-- <properties> -->
<!-- <distroDirectory>${project.basedir}/distro</distroDirectory> -->
<!-- </properties> -->
<scm>
<connection>scm:git:https://code-repo.d4science.org/gCubeSystem/${project.artifactId}.git</connection>
<developerConnection>scm:git:https://code-repo.d4science.org/gCubeSystem/${project.artifactId}.git</developerConnection>

View File

@ -1,15 +0,0 @@
log4j.rootLogger=INFO, A1, stdout
log4j.appender.A1=org.apache.log4j.RollingFileAppender
log4j.appender.A1.File=log.txt
log4j.appender.A1.layout=org.apache.log4j.PatternLayout
log4j.appender.A1.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n
# ***** Max file size is set to 100KB
log4j.appender.A1.MaxFileSize=100MB
# ***** Keep one backup file
log4j.appender.A1.MaxBackupIndex=1
#CONSOLE
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.Threshold=INFO
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=[%t] %-5p %c %d{dd MMM yyyy ;HH:mm:ss.SSS} - %m%n

View File

@ -206,7 +206,7 @@ public RemoteResourceComplexInfo getMetaFile();
/**
* close the connections to backend storage system
*/
public void close();
public void forceClose();
public RemoteResource getUrl(boolean forceCreation);

View File

@ -29,6 +29,7 @@ public class DirectoryBucket {
String path;
String[] server;
String user, password;
TransportManager tm;
public DirectoryBucket(String[] server, String user, String password, String path, String author){
if(logger.isDebugEnabled())
logger.debug("DirectoryBucket PATH: "+path);
@ -91,7 +92,7 @@ public class DirectoryBucket {
String[] bucketList=null;
bucketList=retrieveBucketsName(path, rootArea);
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, resource.getGcubeMemoryType(), dbNames, resource.getWriteConcern(), resource.getReadPreference(), resource.getToken(), resource.getRegion());
tm=tmf.getTransport(tm, backendType, resource.getGcubeMemoryType(), dbNames, resource.getWriteConcern(), resource.getReadPreference());
// TerrastoreClient client=new TerrastoreClient( new OrderedHostManager(Arrays.asList(server)), new HTTPConnectionFactory());
for(int i=0;i<bucketList.length;i++){
if(logger.isDebugEnabled())
@ -124,7 +125,7 @@ public class DirectoryBucket {
logger.debug("bucketDir Coded: "+bucketDirCoded);
bucketList=retrieveBucketsName(bucket, rootArea);
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, resource.getGcubeMemoryType(), dbNames, resource.getWriteConcern(),resource.getReadPreference(), resource.getToken(), resource.getRegion());
tm=tmf.getTransport(tm, backendType, resource.getGcubeMemoryType(), dbNames, resource.getWriteConcern(),resource.getReadPreference());
for(int i=0;i<bucketList.length;i++){
if(logger.isDebugEnabled())
logger.debug("REMOVE: check "+bucketList[i]+" bucketDirCoded: "+bucketDirCoded );

View File

@ -27,7 +27,7 @@ import org.gcube.contentmanagement.blobstorage.resource.StorageObject;
*/
public class RemoteResource extends Resource{
TransportManager tm;
public RemoteResource(RequestObject file, ServiceEngine engine) {
super(file, engine);
@ -112,7 +112,7 @@ public class RemoteResource extends Resource{
if(engine.getCurrentOperation().equalsIgnoreCase("showdir")){
dir = new BucketCoding().bucketDirCoding(dir, engine.getContext());
TransportManagerFactory tmf= new TransportManagerFactory(engine.primaryBackend, engine.getBackendUser(), engine.getBackendPassword());
TransportManager tm=tmf.getTransport(engine.getBackendType(), engine.getGcubeMemoryType(), engine.getDbNames(), engine.getWriteConcern(), engine.getReadConcern(), engine.getToken(), engine.getRegion());
tm=tmf.getTransport(tm, engine.getBackendType(), engine.getGcubeMemoryType(), engine.getDbNames(), engine.getWriteConcern(), engine.getReadConcern());
Map<String, StorageObject> mapDirs=null;
try {
mapDirs = tm.getValues(getMyFile(), dir, DirectoryEntity.class);
@ -133,7 +133,7 @@ public class RemoteResource extends Resource{
dirBuc.removeDirBucket(getMyFile(), dir, engine.getContext(), engine.getBackendType(), engine.getDbNames());
else{
TransportManagerFactory tmf=new TransportManagerFactory(engine.primaryBackend, engine.getBackendUser(), engine.getBackendPassword());
TransportManager tm=tmf.getTransport(Costants.CLIENT_TYPE, engine.getGcubeMemoryType(), engine.getDbNames(), engine.getWriteConcern(), engine.getReadConcern(), engine.getToken(), engine.getRegion());
tm=tmf.getTransport(tm, Costants.CLIENT_TYPE, engine.getGcubeMemoryType(), engine.getDbNames(), engine.getWriteConcern(), engine.getReadConcern());
dir=new BucketCoding().bucketFileCoding(dir, engine.getContext());
try {
tm.removeDir(dir, getMyFile());

View File

@ -911,13 +911,13 @@ public class ServiceEngine implements IClient {
}
@Override
public void close(){
currentOperation="close";
public void forceClose(){
currentOperation="forceclose";
file.setOwner(owner);
getMyFile().setRemoteResource(REMOTE_RESOURCE.PATH);
setMyFile(file);
service.setResource(getMyFile());
service.setTypeOperation("close");
service.setTypeOperation("forceclose");
try {
if(((file.getInputStream() != null) || (file.getOutputStream()!=null)) || ((file.getLocalPath() != null) || (file.getRemotePath() != null)))
service.startOperation(file,file.getRemotePath(), owner, primaryBackend, Costants.DEFAULT_CHUNK_OPTION, getContext(), isReplaceOption());

View File

@ -117,7 +117,7 @@ public class ChunkConsumer implements Runnable {
synchronized(ChunkConsumer.class){
String [] randomServer=randomizeServer(server);
TransportManagerFactory tmf=new TransportManagerFactory(randomServer, null, null);
client.set(tmf.getTransport(Costants.CLIENT_TYPE, null, null, myFile.getWriteConcern(), myFile.getReadPreference(), myFile.getToken(), myFile.getRegion()));
client.set(tmf.getTransport(null, Costants.CLIENT_TYPE, null, null, myFile.getWriteConcern(), myFile.getReadPreference()));
}
if(logger.isDebugEnabled()){
logger.debug("waiting time for upload: "

View File

@ -7,7 +7,7 @@ import org.gcube.contentmanagement.blobstorage.transport.backend.RemoteBackendEx
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
public class Close extends Operation{
public class ForceClose extends Operation{
/**
* Logger for this class
@ -15,7 +15,7 @@ public class Close extends Operation{
final Logger logger=LoggerFactory.getLogger(GetSize.class);
// public String file_separator = ServiceEngine.FILE_SEPARATOR;//System.getProperty("file.separator");
public Close(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
public ForceClose(String[] server, String user, String pwd, String bucket, Monitor monitor, boolean isChunk, String backendType, String[] dbs) {
super(server, user, pwd, bucket, monitor, isChunk, backendType, dbs);
}
@ -24,7 +24,7 @@ public class Close extends Operation{
// TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
TransportManager tm=getTransport(myFile);
try {
tm.close();
tm.forceClose();
} catch (Exception e) {
throw new RemoteBackendException(" Error in GetSize operation ", e.getCause()); }
if (logger.isDebugEnabled()) {

View File

@ -74,10 +74,8 @@ public class GetHttpUrl extends Operation {
private String getId(String path, boolean forceCreation, MemoryType memoryType, String writeConcern, String readPreference){
String id=null;
if(tm ==null){
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(backendType, memoryType, dbNames, writeConcern, readPreference, null, null);
}
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(tm, backendType, memoryType, dbNames, writeConcern, readPreference);
try {
id = tm.getId(bucket, forceCreation);
} catch (Exception e) {

View File

@ -75,10 +75,8 @@ public class GetHttpsUrl extends Operation {
private String getId(String path, boolean forceCreation, MemoryType memoryType, String writeConcern, String readPreference){
String id=null;
if(tm ==null){
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(backendType, memoryType, dbNames, writeConcern, readPreference, null, null);
}
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(tm, backendType, memoryType, dbNames, writeConcern, readPreference);
try {
id = tm.getId(bucket, forceCreation);
} catch (Exception e) {

View File

@ -58,10 +58,8 @@ public class GetUrl extends Operation{
private String getId(String path, boolean forceCreation, MemoryType memoryType, String writeConcern, String readPreference){
String id=null;
if(tm ==null){
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(backendType, memoryType, dbNames, writeConcern, readPreference, null, null);
}
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
tm=tmf.getTransport(tm, backendType, memoryType, dbNames, writeConcern, readPreference);
try {
id = tm.getId(bucket, forceCreation);
} catch (Exception e) {

View File

@ -382,10 +382,10 @@ public abstract class Operation {
}
protected TransportManager getTransport(RequestObject myFile) {
if(Objects.isNull(transport)) {
// if(Objects.isNull(transport)) {
TransportManagerFactory tmf= new TransportManagerFactory(server, user, password);
transport=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference(), myFile.getToken(), myFile.getRegion());
}
transport=tmf.getTransport(transport, backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
// }
return transport;
}

View File

@ -52,6 +52,8 @@ public class OperationFactory {
op=new Remove(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("getSize")){
op=new GetSize(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("forceclose")){
op=new ForceClose(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("duplicate")){
op=new DuplicateOperator(server, user, password, bucket, monitor, isChunk, backendType, dbNames);
}else if(operation.equalsIgnoreCase("softcopy")){

View File

@ -29,7 +29,8 @@ public class SetMetaInfo extends Operation {
} catch (Exception e) {
tm.close();
e.printStackTrace();
throw new RemoteBackendException(" Error in SetMetaInfo operation ", e.getCause()); }
logger.error("Problem setting file property", e);
throw new RemoteBackendException(" Error in SetMetaInfo operation ", e); }
if (logger.isDebugEnabled()) {
logger.debug(" PATH " + bucket);
}

View File

@ -33,9 +33,10 @@ public class UploadAndUnlock extends Operation {
objectId=put(upload, myFile, isChunk(), false, false, true);
} catch (Exception e) {
TransportManagerFactory tmf=new TransportManagerFactory(server, user, password);
TransportManager tm=tmf.getTransport(backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference(), myFile.getToken(), myFile.getRegion());
TransportManager tm=tmf.getTransport(transport, backendType, myFile.getGcubeMemoryType(), dbNames, myFile.getWriteConcern(), myFile.getReadPreference());
tm.close();
throw new RemoteBackendException(" Error in uploadAndUnlock operation ", e.getCause()); }
throw new RemoteBackendException(" Error in uploadAndUnlock operation ", e);
}
return objectId;
}

View File

@ -23,7 +23,7 @@ import com.mongodb.MongoException;
public abstract class TransportManager {
protected MemoryType memoryType;
/**
* This method specifies the type of the backend for dynamic loading
* For mongoDB, default backend, the name is MongoDB
@ -38,7 +38,7 @@ public abstract class TransportManager {
* @param user
* @param token api token if is required by backend
*/
public abstract void initBackend(String[] server, String user, String pass, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern, String token, String region);
public abstract void initBackend(String[] server, String user, String pass, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern);
/**
@ -326,6 +326,8 @@ public abstract class TransportManager {
public abstract String getField(String remoteIdentifier, String fieldName) throws UnknownHostException ;
public abstract void close();
public abstract void forceClose();
public abstract void setFileProperty(String remotePath, String propertyField, String propertyValue);

View File

@ -27,9 +27,11 @@ public class TransportManagerFactory {
// private static final Logger logger = Logger.getLogger(OperationFactory.class);
final Logger logger = LoggerFactory.getLogger(TransportManagerFactory.class);
// TerrastoreClient client;
String[] server;
String user;
String password;
private String[] server;
private String user;
private String password;
private MemoryType memoryType;
private String dbNames;
TransportManager transport;
public TransportManagerFactory(String server[], String user, String password){
@ -38,16 +40,24 @@ public class TransportManagerFactory {
this.password=password;
}
public TransportManager getTransport(String backendType, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern, String token, String region){
public TransportManager getTransport(TransportManager tm, String backendType, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern){
if (logger.isDebugEnabled()) {
logger.debug("getOperation(String) - start");
}
if(Objects.isNull(transport))
return load(backendType, memoryType, dbNames, writeConcern, readConcern, token, region);
return transport;
if(logger.isDebugEnabled() && (!Objects.isNull(transport)))
logger.debug("transportLayer with "+transport.memoryType+" already instatiated. New memoryType request is "+memoryType);
// if we haven't any transport layer instantiated or the transport layer is istantiated on another memory type (persistent, volatile),
// then a new transport layer is needed
if(Objects.isNull(tm) || Objects.isNull(tm.memoryType) || (!tm.memoryType.equals(memoryType))) {
logger.info("new transport layer instantiated for "+memoryType+" memory");
return load(backendType, memoryType, dbNames, writeConcern, readConcern);
}else {
logger.debug("new transport layer not instantiated.");
}
return tm;
}
private TransportManager load(String backendType, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern, String token, String region){
private TransportManager load(String backendType, MemoryType memoryType, String[] dbNames, String writeConcern, String readConcern){
ServiceLoader<TransportManager> loader = ServiceLoader.load(TransportManager.class);
Iterator<TransportManager> iterator = loader.iterator();
List<TransportManager> impls = new ArrayList<TransportManager>();
@ -59,11 +69,11 @@ public class TransportManagerFactory {
// System.out.println("size: "+implementationCounted);
if((implementationCounted==0) || backendType.equals(Costants.DEFAULT_TRANSPORT_MANAGER)){
logger.info(" 0 implementation found. Load default implementation of TransportManager");
return new MongoOperationManager(server, user, password, memoryType, dbNames, writeConcern, readConcern, null, null);
return new MongoOperationManager(server, user, password, memoryType, dbNames, writeConcern, readConcern);
}else if((implementationCounted==1) && Objects.isNull(backendType)){
TransportManager tm = impls.get(0);
logger.info("1 implementation of TransportManager found. Load it. "+tm.getName());
tm.initBackend(server, user, password, memoryType, dbNames, writeConcern, readConcern, token, region);
tm.initBackend(server, user, password, memoryType, dbNames, writeConcern, readConcern);
return tm;
}else{
logger.info("found "+implementationCounted+" implementations of TransportManager");
@ -71,7 +81,7 @@ public class TransportManagerFactory {
for(TransportManager tm : impls){
if(tm.getName().equalsIgnoreCase(backendType)){
logger.info("Found implementation "+backendType);
tm.initBackend(server, user, password, memoryType, dbNames, writeConcern, readConcern, token, region);
tm.initBackend(server, user, password, memoryType, dbNames, writeConcern, readConcern);
return tm;
}
}

View File

@ -547,10 +547,13 @@ public class MongoIOManager {
destinationFile.put("creationTime", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z"));
}
public BasicDBObject setGenericMoveProperties(RequestObject resource, String filename, String dir,
String name, BasicDBObject f) {
f.append("filename", filename).append("type", "file").append("name", name).append("dir", dir);
return f;
public DBObject setGenericMoveProperties(RequestObject resource, String filename, String dir,
String name, DBObject sourcePathMetaCollection) {
sourcePathMetaCollection.put("filename", filename);
sourcePathMetaCollection.put("type", "file");
sourcePathMetaCollection.put("name", name);
sourcePathMetaCollection.put("dir", dir);
return sourcePathMetaCollection;
}
@ -778,13 +781,10 @@ public class MongoIOManager {
return list;
}
public BasicDBObject findMetaCollectionObject(String source) throws UnknownHostException {
//set to null in order to perform a query as BasicDBObject and not GridFSObject
db=null;
mongo=null;
public DBObject findMetaCollectionObject(String source) throws UnknownHostException {
DBCollection fileCollection=getConnectionDB(dbName, false).getCollection(Costants.DEFAULT_META_COLLECTION);
BasicDBObject query = new BasicDBObject();
BasicDBObject obj=null;
DBObject obj=null;
query.put( "filename" ,source);
DBCursor cursor=fileCollection.find(query);
if(cursor != null && !cursor.hasNext()){
@ -793,7 +793,7 @@ public class MongoIOManager {
cursor=fileCollection.find(query);
}
if(cursor.hasNext()){
obj=(BasicDBObject) cursor.next();
obj=(DBObject) cursor.next();
String path=(String)obj.get("filename");
logger.debug("path found "+path);
}
@ -1067,12 +1067,20 @@ public class MongoIOManager {
public void close() {
// if(mongo!=null)
// mongo.close();
logger.debug(" try to close backend but the close operation is not implemented");
// logger.info("Mongo has been closed");
// mongo=null;
// gfs=null;
// db=null;
}
public void forceClose() {
if(mongo!=null)
mongo.close();
logger.info("Mongo pool closed");
}
public void removeGFSFile(GridFSDBFile f, ObjectId idF){
// this field is an advice for oplog collection reader
f.put("onDeleting", "true");

View File

@ -26,6 +26,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.mongodb.BasicDBObject;
import com.mongodb.DBCollection;
import com.mongodb.DBObject;
import com.mongodb.MongoException;
import com.mongodb.gridfs.GridFS;
import com.mongodb.gridfs.GridFSDBFile;
@ -48,15 +49,17 @@ public class MongoOperationManager extends TransportManager{
protected static String[] dbNames;
public MongoOperationManager(String[] server, String user, String password, MemoryType memoryType, String[] dbNames,String writeConcern, String readConcern, String token, String region){
initBackend(server,user,password, memoryType,dbNames, writeConcern, readConcern, token, region);
public MongoOperationManager(String[] server, String user, String password, MemoryType memoryType, String[] dbNames,String writeConcern, String readConcern){
initBackend(server,user,password, memoryType,dbNames, writeConcern, readConcern);
}
@Override
public void initBackend(String[] server, String user, String pass, MemoryType memoryType , String[] dbNames, String writeConcern, String readConcern, String token, String region) {
public void initBackend(String[] server, String user, String pass, MemoryType memoryType , String[] dbNames, String writeConcern, String readConcern) {
logger.debug("init storage backend with "+memoryType+" memory");
try {
this.memoryType=memoryType;
super.memoryType=memoryType;
MongoOperationManager.dbNames=dbNames;
logger.debug("check mongo configuration");
if (dbNames!=null){
@ -130,6 +133,11 @@ public class MongoOperationManager extends TransportManager{
// mongoSecondaryInstance.close();
}
public void forceClose() {
mongoPrimaryInstance.forceClose();
mongoSecondaryInstance.forceClose();
}
/**
* Unlock the object specified, this method accept the key field for the unlock operation
* @throws FileNotFoundException
@ -456,7 +464,7 @@ public class MongoOperationManager extends TransportManager{
*/
private void updateMetaObject(String remoteIdentifier, String propertyField, String propertyValue)
throws UnknownHostException {
BasicDBObject remoteMetaCollectionObject;
DBObject remoteMetaCollectionObject;
logger.debug("find object...");
remoteMetaCollectionObject = mongoPrimaryInstance.findMetaCollectionObject(remoteIdentifier);
if(remoteMetaCollectionObject!=null){

View File

@ -65,7 +65,7 @@ public class MoveOperator extends Move {
logger.info("move operation on Mongo backend, parameters: source path: "+source+" destination path: "+destination);
logger.debug("MOVE OPERATION operation defined: "+resource.getOperationDefinition().getOperation());
if((source != null) && (!source.isEmpty()) && (destination != null) && (!destination.isEmpty())){
BasicDBObject sourcePathMetaCollection = mongoPrimaryInstance.findMetaCollectionObject(source);
DBObject sourcePathMetaCollection = mongoPrimaryInstance.findMetaCollectionObject(source);
//check if the file exist in the destination path, if it exist then it will be deleted
if(sourcePathMetaCollection != null){
sourceId=sourcePathMetaCollection.get("_id").toString();
@ -175,7 +175,7 @@ public class MoveOperator extends Move {
}
private BasicDBObject setCommonFields(BasicDBObject f, RequestObject resource, OPERATION op) {
private DBObject setCommonFields(DBObject sourcePathMetaCollection, RequestObject resource, OPERATION op) {
String owner=resource.getOwner();
if(op == null){
op=resource.getOperationDefinition().getOperation();
@ -188,14 +188,23 @@ public class MoveOperator extends Move {
String address=null;
try {
address=InetAddress.getLocalHost().getCanonicalHostName().toString();
f.put("callerIP", address);
sourcePathMetaCollection.put("callerIP", address);
} catch (UnknownHostException e) { }
if(from == null)
f.append("lastAccess", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z")).append("lastUser", owner).append("lastOperation", op.toString()).append("callerIP", address);
else
f.append("lastAccess", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z")).append("lastUser", owner).append("lastOperation", op.toString()).append("callerIP", address).append("from", from);
return f;
if(from == null) {
sourcePathMetaCollection.put("lastAccess", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z"));
sourcePathMetaCollection.put("lastUser", owner);
sourcePathMetaCollection.put("lastOperation", op.toString());
sourcePathMetaCollection.put("callerIP", address);
}else {
sourcePathMetaCollection.put("lastAccess", DateUtils.now("dd MM yyyy 'at' hh:mm:ss z"));
sourcePathMetaCollection.put("lastUser", owner);
sourcePathMetaCollection.put("lastOperation", op.toString());
sourcePathMetaCollection.put("callerIP", address);
sourcePathMetaCollection.put("from", from);
}
return sourcePathMetaCollection;
}
}

View File

@ -67,7 +67,7 @@ public class MongoOutputStream extends ProxyOutputStream {
e.printStackTrace();
}
// mongo.close();
// setClosed(true);
setClosed(true);
}
}

View File

@ -1,25 +0,0 @@
<?xml version="1.0" encoding="UTF-8"?>
<Resource xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<ID />
<Type>Service</Type>
<Profile>
<Description>${description}</Description>
<Class>ContentManagement</Class>
<Name>storage-manager-core</Name>
<Version>1.0.0</Version>
<Packages>
<Software>
<Name>storage-manager-core</Name>
<Version>2.9.0-SNAPSHOT</Version>
<MavenCoordinates>
<groupId>org.gcube.contentmanagement</groupId>
<artifactId>storage-manager-core</artifactId>
<version>2.9.0-SNAPSHOT</version>
</MavenCoordinates>
<Files>
<File>storage-manager-core-2.9.0-SNAPSHOT.jar</File>
</Files>
</Software>
</Packages>
</Profile>
</Resource>